Repository 'data_manager_salmon_index_builder'
hg clone https://toolshed.g2.bx.psu.edu/repos/ieguinoa/data_manager_salmon_index_builder

Changeset 0:6cd60ba8a842 (2018-08-14)
Next changeset 1:c5dea2080109 (2018-08-14)
Commit message:
Uploaded
added:
README.md
data_manager/data_manager_fetch_gff.py
data_manager/data_manager_fetch_gff.xml
data_manager_conf.xml
tool-data/all_gff.loc.sample
tool-data/representative_gff.loc.sample
tool_data_table_conf.xml.sample
b
diff -r 000000000000 -r 6cd60ba8a842 README.md
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/README.md Tue Aug 14 11:14:52 2018 -0400
b
@@ -0,0 +1,2 @@
+# data_manager_fetch_gff
+Galaxy Data Manager to fetch gene annotation files
b
diff -r 000000000000 -r 6cd60ba8a842 data_manager/data_manager_fetch_gff.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager_fetch_gff.py Tue Aug 14 11:14:52 2018 -0400
[
b'@@ -0,0 +1,445 @@\n+#!/usr/bin/env python\n+#Dan Blankenberg\n+\n+import sys\n+import os\n+import tempfile\n+import shutil\n+import optparse\n+from ftplib import FTP\n+import tarfile\n+import zipfile\n+import gzip\n+import bz2\n+try:\n+    # For Python 3.0 and later\n+    from urllib.request import urlopen\n+    from io import BytesIO as StringIO\n+    from io import UnsupportedOperation\n+except ImportError:\n+    # Fall back to Python 2\'s urllib2\n+    from urllib2 import urlopen\n+    from StringIO import StringIO\n+    UnsupportedOperation = AttributeError\n+from json import loads, dumps\n+\n+\n+CHUNK_SIZE = 2**20  # 1mb\n+\n+DATA_TABLE_NAME = \'all_gff\'\n+\n+def cleanup_before_exit( tmp_dir ):\n+    if tmp_dir and os.path.exists( tmp_dir ):\n+        shutil.rmtree( tmp_dir )\n+\n+\n+def stop_err(msg):\n+    sys.stderr.write(msg)\n+    sys.exit(1)\n+\n+\n+def get_dbkey_dbname_id_name( params, dbkey_description=None ):\n+#    dbkey = params[\'param_dict\'][\'dbkey_source\'][\'dbkey\']\n+    #TODO: ensure sequence_id is unique and does not already appear in location file\n+    sequence_id = params[\'param_dict\'][\'sequence_id\']\n+    if not sequence_id:\n+        sequence_id = dbkey #uuid.uuid4() generate and use an uuid instead?\n+    \n+#    if params[\'param_dict\'][\'dbkey_source\'][\'dbkey_source_selector\'] == \'new\':\n+#        dbkey_name = params[\'param_dict\'][\'dbkey_source\'][\'dbkey_name\']\n+#        if not dbkey_name:\n+#            dbkey_name = dbkey\n+#    else:\n+#        dbkey_name = None\n+    dbkey = params[\'param_dict\'][\'dbkey\'] \n+    dbkey_name = dbkey_description\n+    sequence_name = params[\'param_dict\'][\'sequence_name\']\n+    if not sequence_name:\n+        sequence_name = dbkey_description\n+        if not sequence_name:\n+            sequence_name = dbkey\n+    return dbkey, dbkey_name, sequence_id, sequence_name\n+\n+\n+def _get_files_in_ftp_path( ftp, path ):\n+    path_contents = []\n+    ftp.retrlines( \'MLSD %s\' % ( path ), path_contents.append )\n+    return [ line.split( \';\' )[ -1 ].lstrip() for line in path_contents ]\n+\n+\n+def _get_stream_readers_for_tar( fh, tmp_dir ):\n+    fasta_tar = tarfile.open( fileobj=fh, mode=\'r:*\' )\n+    return [x for x in [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()] if x]\n+\n+\n+def _get_stream_readers_for_zip( fh, tmp_dir ):\n+    """\n+    Unpacks all archived files in a zip file.\n+    Individual files will be concatenated (in _stream_fasta_to_file)\n+    """\n+    fasta_zip = zipfile.ZipFile( fh, \'r\' )\n+    rval = []\n+    for member in fasta_zip.namelist():\n+        fasta_zip.extract( member, tmp_dir )\n+        rval.append( open( os.path.join( tmp_dir, member ), \'rb\' ) )\n+    return rval\n+\n+\n+def _get_stream_readers_for_gzip( fh, tmp_dir ):\n+    return [ gzip.GzipFile( fileobj=fh, mode=\'rb\') ]\n+\n+\n+def _get_stream_readers_for_bz2( fh, tmp_dir ):\n+    return [ bz2.BZ2File( fh.name, \'rb\') ]\n+\n+\n+def sort_fasta( fasta_filename, sort_method, params ):\n+    if sort_method is None:\n+        return\n+    assert sort_method in SORTING_METHODS, ValueError( "%s is not a valid sorting option." % sort_method )\n+    return SORTING_METHODS[ sort_method ]( fasta_filename, params )\n+\n+\n+def _move_and_index_fasta_for_sorting( fasta_filename ):\n+    unsorted_filename = tempfile.NamedTemporaryFile().name\n+    shutil.move( fasta_filename, unsorted_filename )\n+    fasta_offsets = {}\n+    unsorted_fh = open( unsorted_filename )\n+    while True:\n+        offset = unsorted_fh.tell()\n+        line = unsorted_fh.readline()\n+        if not line:\n+            break\n+        if line.startswith( ">" ):\n+            line = line.split( None, 1 )[0][1:]\n+            fasta_offsets[ line ] = offset\n+    unsorted_fh.close()\n+    current_order = map( lambda x: x[1], sorted( map( lambda x: ( x[1], x[0] ), fasta_offsets.items() ) ) )\n+    return ( unsorted_filename, fasta_offsets, current_order )\n+\n+\n+def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ):\n+    unsorted_fh = open( unsorted_fasta_filename )\n+    sorted_fh'..b'fh.read( CHUNK_SIZE )\n+                    if data:\n+                        fasta_writer.write( data )\n+                        last_char = data[-1]\n+                    else:\n+                        break\n+                if close_stream:\n+                    fh.close()\n+        else:\n+            while True:\n+                data = fasta_stream.read( CHUNK_SIZE )\n+                if data:\n+                    fasta_writer.write( data )\n+                else:\n+                    break\n+            if close_stream:\n+                fasta_stream.close()\n+\n+    #sort_fasta( fasta_filename, params[\'param_dict\'][\'sorting\'][\'sort_selector\'], params )\n+    \n+    \n+    return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ]\n+\n+\n+def compute_fasta_length( fasta_file, out_file, keep_first_word=False ):\n+\n+    infile = fasta_file\n+    out = open( out_file, \'w\')\n+\n+    fasta_title = \'\'\n+    seq_len = 0\n+\n+    first_entry = True\n+\n+    for line in open( infile ):\n+        line = line.strip()\n+        if not line or line.startswith( \'#\' ):\n+            continue\n+        if line[0] == \'>\':\n+            if first_entry == False:\n+                if keep_first_word:\n+                    fasta_title = fasta_title.split()[0]\n+                out.write( "%s\\t%d\\n" % ( fasta_title[ 1: ], seq_len ) )\n+            else:\n+                first_entry = False\n+            fasta_title = line\n+            seq_len = 0\n+        else:\n+            seq_len += len(line)\n+\n+    # last fasta-entry\n+    if keep_first_word:\n+        fasta_title = fasta_title.split()[0]\n+    out.write( "%s\\t%d\\n" % ( fasta_title[ 1: ], seq_len ) )\n+    out.close()\n+\n+\n+def _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ):\n+    fasta_base_filename = "%s.fa" % sequence_id\n+    fasta_filename = os.path.join( target_directory, fasta_base_filename )\n+    os.symlink( input_filename, fasta_filename )\n+    return [  ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ]\n+\n+\n+REFERENCE_SOURCE_TO_DOWNLOAD = dict( ucsc=download_from_ucsc, ncbi=download_from_ncbi, url=download_from_url, history=download_from_history, directory=copy_from_directory )\n+\n+SORTING_METHODS = dict( as_is=_sort_fasta_as_is, lexicographical=_sort_fasta_lexicographical, gatk=_sort_fasta_gatk, custom=_sort_fasta_custom )\n+\n+\n+def main():\n+    #Parse Command Line\n+    parser = optparse.OptionParser()\n+    parser.add_option( \'-d\', \'--dbkey_description\', dest=\'dbkey_description\', action=\'store\', type="string", default=None, help=\'dbkey_description\' )\n+    parser.add_option( \'-t\', \'--type\', dest=\'file_type\', action=\'store\', type=\'string\', default=None, help=\'file_type\')\n+    (options, args) = parser.parse_args()\n+    \n+    filename = args[0]\n+    global DATA_TABLE_NAME\n+    if options.file_type == \'representative\':\n+       DATA_TABLE_NAME= \'representative_gff\'\n+    params = loads( open( filename ).read() )\n+    target_directory = params[ \'output_data\' ][0][\'extra_files_path\']\n+    os.mkdir( target_directory )\n+    data_manager_dict = {}\n+    \n+    dbkey, dbkey_name, sequence_id, sequence_name = get_dbkey_dbname_id_name( params, dbkey_description=options.dbkey_description ) \n+    \n+    if dbkey in [ None, \'\', \'?\' ]:\n+        raise Exception( \'"%s" is not a valid dbkey. You must specify a valid dbkey.\' % ( dbkey ) )\n+\n+    # Create a tmp_dir, in case a zip file needs to be uncompressed\n+    tmp_dir = tempfile.mkdtemp()\n+    #Fetch the FASTA\n+    try:\n+        REFERENCE_SOURCE_TO_DOWNLOAD[ params[\'param_dict\'][\'reference_source\'][\'reference_source_selector\'] ]( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir )\n+    finally:\n+        cleanup_before_exit(tmp_dir)\n+    #save info to json file\n+    open( filename, \'wb\' ).write( dumps( data_manager_dict ).encode() )\n+        \n+if __name__ == "__main__":\n+    main()\n'
b
diff -r 000000000000 -r 6cd60ba8a842 data_manager/data_manager_fetch_gff.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager_fetch_gff.xml Tue Aug 14 11:14:52 2018 -0400
[
@@ -0,0 +1,66 @@
+<tool id="data_manager_fetch_gff" name="Create entries in gff data table" version="0.0.1" tool_type="manage_data">
+    <description>fetching</description>
+    <command><![CDATA[
+       python "$__tool_directory__"/data_manager_fetch_gff.py "${out_file}"
+       --type $file_type
+       --dbkey_description ${ dbkey.get_display_text() }
+        
+    ]]></command>
+    <inputs>
+         <param name="file_type" type="select" label="GFF file with only one representative transcript per gene (for htseq-count use) or full features file">
+                <option value="representative">Representative GFF</option>
+                <option value="full">GFF with complete features</option>
+            </param>

+        <param name="dbkey" type="genomebuild" label="DBKEY to assign to data" />
+        <param type="text" name="sequence_name" value="" label="Name of sequence" />
+        <param type="text" name="sequence_id" value="" label="ID for sequence" />
+        <conditional name="reference_source">
+            <param name="reference_source_selector" type="select" label="Choose the source for the reference genome">
+                <option value="url">URL</option>
+                <option value="history">History</option>
+                <option value="directory">Directory on Server</option>
+            </param>
+            <when value="url">
+                <param type="text" area="True" name="user_url" value="http://" label="URLs" optional="False" />
+            </when>
+            <when value="history">
+                <param name="input_fasta" type="data" format="fasta" label="FASTA File" multiple="False" optional="False" />
+            </when>
+            <when value="directory">
+                <param type="text" name="fasta_filename" value="" label="Full path to FASTA File on disk" optional="False" />
+                <param type="boolean" name="create_symlink" truevalue="create_symlink" falsevalue="copy_file" label="Create symlink to original data instead of copying" checked="False" />
+            </when>
+        </conditional>
+    </inputs>
+    <outputs>
+        <data name="out_file" format="data_manager_json"/>
+    </outputs>
+    <tests>
+        <!-- TODO: need some way to test that new entry was added to data table -->
+        <test>
+            <param name="dbkey" value="anoGam1"/>
+            <param name="sequence_name" value=""/>
+            <param name="sequence_desc" value=""/>
+            <param name="sequence_id" value=""/>
+            <param name="reference_source_selector" value="history"/>
+            <param name="input_fasta" value="phiX174.fasta"/>
+            <param name="sort_selector" value="as_is"/>
+            <output name="out_file" file="phiX174.data_manager_json"/>
+        </test>
+    </tests>
+    <help>
+**What it does**
+
+Fetches a gff file from various sources (URL, Galaxy History, or a server directory) and populates the "all_gff" data table.
+
+------
+
+
+
+.. class:: infomark
+
+**Notice:** If you leave name, description, or id blank, it will be generated automatically.
+
+    </help>
+</tool>
b
diff -r 000000000000 -r 6cd60ba8a842 data_manager_conf.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager_conf.xml Tue Aug 14 11:14:52 2018 -0400
b
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<data_managers>
+<data_manager tool_file="data_manager/data_manager_fetch_gff.xml" id="data_manager_fetch_gff">
+      <data_table name="all_gff">
+            <output>
+                <column name="value" />
+                <column name="dbkey" />
+                <column name="name" />
+                <column name="path" output_ref="out_file">
+                    <move type="file">
+                        <source>${path}</source>
+                        <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">${dbkey}/gff/${path}</target>
+                    </move>
+                    <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/gff/${path}</value_translation>
+                    <value_translation type="function">abspath</value_translation>
+                </column>
+            </output>
+        </data_table>
+     <data_table name="representative_gff">
+            <output>
+                <column name="value" />
+                <column name="dbkey" />
+                <column name="name" />
+                <column name="path" output_ref="out_file">
+                    <move type="file">
+                        <source>${path}</source>
+                        <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">${dbkey}/representative_gff/${path}</target>
+                    </move>
+                    <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/representative_gff/${path}</value_translation>
+                    <value_translation type="function">abspath</value_translation>
+                </column>
+            </output>
+        </data_table>
+    </data_manager>
+</data_managers>
b
diff -r 000000000000 -r 6cd60ba8a842 tool-data/all_gff.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/all_gff.loc.sample Tue Aug 14 11:14:52 2018 -0400
b
@@ -0,0 +1,3 @@
+#The all_gff.loc file has this format:
+#
+#<unique_build_id> <dbkey> <display_name> <path_to_gff_file>
b
diff -r 000000000000 -r 6cd60ba8a842 tool-data/representative_gff.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/representative_gff.loc.sample Tue Aug 14 11:14:52 2018 -0400
b
@@ -0,0 +1,3 @@
+#The representative_gff.loc file has this format:
+#
+#<unique_build_id> <dbkey> <display_name> <path_to_gff_file>
b
diff -r 000000000000 -r 6cd60ba8a842 tool_data_table_conf.xml.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool_data_table_conf.xml.sample Tue Aug 14 11:14:52 2018 -0400
b
@@ -0,0 +1,5 @@
+<?xml version="1.0"?>
+<tables>
+ <table name="all_gff" comment_char="#"> <columns>value, dbkey, name, path</columns> <file path="tool-data/all_gff.loc" /> </table>
+ <table name="representative_gff" comment_char="#"> <columns>value, dbkey, name, path</columns> <file path="tool-data/representative_gff.loc" /> </table>
+</tables>