Repository 'data_manager_humann2_database_downloader'
hg clone https://toolshed.g2.bx.psu.edu/repos/iuc/data_manager_humann2_database_downloader

Changeset 0:048593e41359 (2017-03-12)
Next changeset 1:1316375a8cbb (2017-03-13)
Commit message:
planemo upload for repository https://github.com/ASaiM/galaxytools/tree/master/data_managers/data_manager_humann2_database_downloader commit 3e179ac4ab2051414320b3811540dfc9b0966061
added:
data_manager/data_manager_humann2_download.py
data_manager/data_manager_humann2_download.xml
data_manager_conf.xml
tool-data/humann2_nucleotide_database.loc.sample
tool-data/humann2_protein_database.loc.sample
tool_data_table_conf.xml.sample
b
diff -r 000000000000 -r 048593e41359 data_manager/data_manager_humann2_download.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager_humann2_download.py Sun Mar 12 14:33:34 2017 -0400
[
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+#
+# Data manager for reference data for the 'humann2' Galaxy tools
+import datetime
+import json
+import optparse
+import os
+import subprocess
+import sys
+
+
+HUMANN2_REFERENCE_DATA = {
+    "full": "Full",
+    "DEMO": "Demo",
+    "uniref50_diamond": "Full UniRef50",
+    "uniref50_ec_filtered_diamond": "EC-filtered UniRef50",
+    "uniref50_GO_filtered_rapsearch2": "GO filtered UniRef50 for rapsearch2",
+    "uniref90_diamond": "Full UniRef50",
+    "uniref90_ec_filtered_diamond": "EC-filtered UniRef90",
+    "DEMO_diamond": "Demo"
+}
+
+
+# Utility functions for interacting with Galaxy JSON
+def read_input_json(jsonfile):
+    """Read the JSON supplied from the data manager tool
+
+    Returns a tuple (param_dict,extra_files_path)
+
+    'param_dict' is an arbitrary dictionary of parameters
+    input into the tool; 'extra_files_path' is the path
+    to a directory where output files must be put for the
+    receiving data manager to pick them up.
+
+    NB the directory pointed to by 'extra_files_path'
+    doesn't exist initially, it is the job of the script
+    to create it if necessary.
+
+    """
+    params = json.loads(open(jsonfile).read())
+    return (params['param_dict'],
+            params['output_data'][0]['extra_files_path'])
+
+
+# Utility functions for creating data table dictionaries
+#
+# Example usage:
+# >>> d = create_data_tables_dict()
+# >>> add_data_table(d,'my_data')
+# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
+# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
+# >>> print str(json.dumps(d))
+def create_data_tables_dict():
+    """Return a dictionary for storing data table information
+
+    Returns a dictionary that can be used with 'add_data_table'
+    and 'add_data_table_entry' to store information about a
+    data table. It can be converted to JSON to be sent back to
+    the data manager.
+
+    """
+    d = {}
+    d['data_tables'] = {}
+    return d
+
+
+def add_data_table(d, table):
+    """Add a data table to the data tables dictionary
+
+    Creates a placeholder for a data table called 'table'.
+
+    """
+    d['data_tables'][table] = []
+
+
+def add_data_table_entry(d, table, entry):
+    """Add an entry to a data table
+
+    Appends an entry to the data table 'table'. 'entry'
+    should be a dictionary where the keys are the names of
+    columns in the data table.
+
+    Raises an exception if the named data table doesn't
+    exist.
+
+    """
+    try:
+        d['data_tables'][table].append(entry)
+    except KeyError:
+        raise Exception("add_data_table_entry: no table '%s'" % table)
+
+
+def download_humann2_db(data_tables, table_name, database, build, target_dir):
+    """Download HUMAnN2 database
+
+    Creates references to the specified file(s) on the Galaxy
+    server in the appropriate data table (determined from the
+    file extension).
+
+    The 'data_tables' dictionary should have been created using
+    the 'create_data_tables_dict' and 'add_data_table' functions.
+
+    Arguments:
+      data_tables: a dictionary containing the data table info
+      table_name: name of the table
+      database: database to download (chocophlan or uniref)
+      build: build of the database to download
+      target_dir: directory to put copy or link to the data file
+
+    """
+    value = "%s-%s-%s" % (database, build, datetime.date.today().isoformat())
+    db_target_dir = os.path.join(target_dir, database)
+    build_target_dir = os.path.join(db_target_dir, build)
+    os.makedirs(build_target_dir)
+    cmd = "humann2_databases --download %s %s %s" % (database,
+                                                     build,
+                                                     db_target_dir)
+    subprocess.check_call(cmd, shell=True)
+    print(os.listdir(db_target_dir))
+    os.rename(os.path.join(db_target_dir, database), build_target_dir)
+    print(os.listdir(db_target_dir))
+    add_data_table_entry(
+        data_tables,
+        table_name,
+        dict(
+            dbkey=build,
+            value=value,
+            name=HUMANN2_REFERENCE_DATA[build],
+            path=build_target_dir))
+
+
+if __name__ == "__main__":
+    print("Starting...")
+
+    # Read command line
+    parser = optparse.OptionParser(description='Download HUMAnN2 database')
+    parser.add_option('--database', help="Database name")
+    parser.add_option('--build', help="Build of the database")
+    options, args = parser.parse_args()
+    print("args   : %s" % args)
+
+    # Check for JSON file
+    if len(args) != 1:
+        sys.stderr.write("Need to supply JSON file name")
+        sys.exit(1)
+
+    jsonfile = args[0]
+
+    # Read the input JSON
+    params, target_dir = read_input_json(jsonfile)
+
+    # Make the target directory
+    print("Making %s" % target_dir)
+    os.mkdir(target_dir)
+
+    # Set up data tables dictionary
+    data_tables = create_data_tables_dict()
+
+    if options.database == "chocophlan":
+        table_name = 'humann2_nucleotide_database'
+    else:
+        table_name = 'humann2_protein_database'
+    add_data_table(data_tables, table_name)
+
+    # Fetch data from specified data sources
+    download_humann2_db(
+        data_tables,
+        table_name,
+        options.database,
+        options.build,
+        target_dir)
+
+    # Write output JSON
+    print("Outputting JSON")
+    print(str(json.dumps(data_tables)))
+    open(jsonfile, 'wb').write(json.dumps(data_tables))
+    print("Done.")
b
diff -r 000000000000 -r 048593e41359 data_manager/data_manager_humann2_download.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager_humann2_download.xml Sun Mar 12 14:33:34 2017 -0400
b
@@ -0,0 +1,56 @@
+<tool id="data_manager_humann2_download" name="HUMAnN2 download" version="0.9.9" tool_type="manage_data">
+    <description>Download HUMAnN2 database</description>
+    <requirements>
+        <requirement type="package" version="0.9.9">humann2</requirement>
+    </requirements>
+    <stdio>
+        <exit_code range=":-1"  level="fatal" description="Error: Cannot open file" />
+        <exit_code range="1:"  level="fatal" description="Error" />
+    </stdio>
+    <command interpreter="python">
+        data_manager_humann2_download.py
+            --database '$db.database'
+            --build '$db.build'
+            '${out_file}'
+    </command>
+    <inputs>
+        <conditional name="db">
+            <param name="database" type="select" label="Type of database to download">
+                <option value="chocophlan" selected="true">Nucleotide database</option>
+                <option value="uniref">Protein database</option>
+            </param>
+            <when value="chocophlan">
+                <param name="build" type="select" label="Build for nucleotide database">
+                    <option value="full" selected="true">Full</option>
+                    <option value="DEMO">Demo</option>
+                </param>
+            </when>
+            <when value="uniref">
+                <param name="build" type="select" label="Build for protein database">
+                    <option value="uniref50_diamond">Full UniRef50</option>
+                    <option value="uniref50_ec_filtered_diamond">EC-filtered UniRef50</option>
+                    <option value="uniref50_GO_filtered_rapsearch2">GO filtered UniRef50 for rapsearch2</option>
+                    <option value="uniref90_diamond" selected="true">Full UniRef50</option>
+                    <option value="uniref90_ec_filtered_diamond">EC-filtered UniRef90</option>
+                    <option value="DEMO_diamond">Demo</option>
+                </param>
+            </when>
+        </conditional>
+    </inputs>
+    <outputs>
+           <data name="out_file" format="data_manager_json" label="${tool.name}"/>
+    </outputs>
+    <tests>
+    </tests>
+    <help>
+
+This tool downloads the HUMAnN2 databases.
+
+`Read more about the tool <http://huttenhower.sph.harvard.edu/humann2/manual>`_.
+
+    </help>
+    <citations>
+        <citation type="doi">10.1371/journal.pcbi.1003153</citation>
+        <yield />
+    </citations>
+</tool>
b
diff -r 000000000000 -r 048593e41359 data_manager_conf.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager_conf.xml Sun Mar 12 14:33:34 2017 -0400
b
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<data_managers>
+    <data_manager tool_file="data_manager/data_manager_humann2_download.xml" id="data_manager_humann2_download" >
+        <data_table name="humann2_nucleotide_database">  <!-- Defines a Data Table to be modified. -->
+            <output> <!-- Handle the output of the Data Manager Tool -->
+                <column name="value" />  <!-- columns that are going to be specified by the Data Manager Tool -->
+                <column name="name" />  <!-- columns that are going to be specified by the Data Manager Tool -->
+                <column name="dbkey" /> <!-- columns that are going to be specified by the Data Manager Tool -->
+                <column name="path" output_ref="out_file" >
+                    <move type="directory">
+                        <source>${path}</source>
+                        <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">humann2/data/nucleotide_database/${dbkey}</target>
+                    </move>
+                    <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/humann2/data/nucleotide_database/${dbkey}</value_translation>
+                </column>
+            </output>
+        </data_table>
+        <data_table name="humann2_protein_database">  <!-- Defines a Data Table to be modified. -->
+            <output> <!-- Handle the output of the Data Manager Tool -->
+                <column name="value" />  <!-- columns that are going to be specified by the Data Manager Tool -->
+                <column name="name" />  <!-- columns that are going to be specified by the Data Manager Tool -->
+                <column name="dbkey" /> <!-- columns that are going to be specified by the Data Manager Tool -->
+                <column name="path" output_ref="out_file" >
+                    <move type="directory">
+                        <source>${path}</source>
+                        <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">humann2/data/protein_database/${dbkey}</target>
+                    </move>
+                    <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/humann2/data/protein_database/${dbkey}</value_translation>
+                </column>
+            </output>
+        </data_table>
+    </data_manager>
+</data_managers>
+
b
diff -r 000000000000 -r 048593e41359 tool-data/humann2_nucleotide_database.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/humann2_nucleotide_database.loc.sample Sun Mar 12 14:33:34 2017 -0400
b
@@ -0,0 +1,4 @@
+#This is a sample file distributed with Galaxy that enables tools
+#to use a directory of metagenomics files.  
+#file has this format (white space characters are TAB characters)
+#02_16_2014 ChocoPhlAn chocophlan /path/to/data
\ No newline at end of file
b
diff -r 000000000000 -r 048593e41359 tool-data/humann2_protein_database.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/humann2_protein_database.loc.sample Sun Mar 12 14:33:34 2017 -0400
b
@@ -0,0 +1,8 @@
+#This is a sample file distributed with Galaxy that enables tools
+#to use a directory of metagenomics files.  
+#file has this format (white space characters are TAB characters)
+#02_16_2014 Full UniRef50 uniref50_diamond /path/to/data
+#02_16_2014 EC-filtered UniRef50 uniref50_ec_filtered_diamond /path/to/data
+#02_16_2014 GO filtered UniRef50 for rapsearch2 uniref50_GO_filtered_rapsearch2 /path/to/data
+#02_16_2014 Full UniRef90 uniref90_diamond /path/to/data
+#02_16_2014 EC-filtered UniRef90 uniref90_ec_filtered_diamond /path/to/data
\ No newline at end of file
b
diff -r 000000000000 -r 048593e41359 tool_data_table_conf.xml.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool_data_table_conf.xml.sample Sun Mar 12 14:33:34 2017 -0400
b
@@ -0,0 +1,10 @@
+<tables>
+    <table name="humann2_nucleotide_database" comment_char="#">
+        <columns>value, name, dbkey, path</columns>
+        <file path="tool-data/humann2_nucleotide_database.loc" />
+    </table>
+    <table name="humann2_protein_database" comment_char="#">
+        <columns>value, name, dbkey, path</columns>
+        <file path="tool-data/humann2_protein_database.loc" />
+    </table>
+</tables>