# HG changeset patch
# User iuc
# Date 1508442980 14400
# Node ID 53eec20e8fb66a1a8d33a3354763eac6d8721a5e
planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/master/data_managers/data_manager_fetch_busco/ commit 2896dcfd180800d00ea413a59264ef8b11788b8e
diff -r 000000000000 -r 53eec20e8fb6 data_manager/busco_fetcher.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/busco_fetcher.xml Thu Oct 19 15:56:20 2017 -0400
@@ -0,0 +1,62 @@
+
+
+ dataset dowloader
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r 53eec20e8fb6 data_manager/data_manager.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager.py Thu Oct 19 15:56:20 2017 -0400
@@ -0,0 +1,69 @@
+import argparse
+import datetime
+import json
+import os
+import shutil
+import tarfile
+import zipfile
+try:
+ # For Python 3.0 and later
+ from urllib.request import Request, urlopen
+except ImportError:
+ # Fall back to Python 2 imports
+ from urllib2 import Request, urlopen
+
+
+def url_download(url, workdir):
+ file_path = os.path.join(workdir, 'download.dat')
+ if not os.path.exists(workdir):
+ os.makedirs(workdir)
+ src = None
+ dst = None
+ try:
+ req = Request(url)
+ src = urlopen(req)
+ with open(file_path, 'wb') as dst:
+ while True:
+ chunk = src.read(2**10)
+ if chunk:
+ dst.write(chunk)
+ else:
+ break
+ finally:
+ if src:
+ src.close()
+ if tarfile.is_tarfile(file_path):
+ fh = tarfile.open(file_path, 'r:*')
+ elif zipfile.is_zipfile(file_path):
+ fh = zipfile.ZipFile(file_path, 'r')
+ else:
+ return
+ fh.extractall(workdir)
+ os.remove(file_path)
+
+
+def main(args):
+ workdir = os.path.join(os.getcwd(), 'busco')
+ url_download(args.url, workdir)
+ data_manager_entry = {}
+ data_manager_entry['value'] = args.name.lower()
+ data_manager_entry['name'] = args.name
+ data_manager_entry['path'] = '.'
+ data_manager_json = dict(data_tables=dict(busco=data_manager_entry))
+ params = json.loads(open(args.output).read())
+ target_directory = params['output_data'][0]['extra_files_path']
+ os.mkdir(target_directory)
+ output_path = os.path.abspath(os.path.join(os.getcwd(), 'busco'))
+ for filename in os.listdir(workdir):
+ shutil.move(os.path.join(output_path, filename), target_directory)
+ file(args.output, 'w').write(json.dumps(data_manager_json))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Create data manager json.')
+ parser.add_argument('--out', dest='output', action='store', help='JSON filename')
+ parser.add_argument('--name', dest='name', action='store', default=str(datetime.date.today()), help='Data table entry unique ID')
+ parser.add_argument('--url', dest='url', action='store', help='Download URL')
+ args = parser.parse_args()
+
+ main(args)
diff -r 000000000000 -r 53eec20e8fb6 data_manager_conf.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager_conf.xml Thu Oct 19 15:56:20 2017 -0400
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
diff -r 000000000000 -r 53eec20e8fb6 tool-data/busco.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/busco.loc.sample Thu Oct 19 15:56:20 2017 -0400
@@ -0,0 +1,13 @@
+# This is a sample file distributed with Galaxy that is used to define a
+# list of busco datasets, using four columns tab separated:
+#
+#
+#
+# Datasets can be retrieved from http://busco.ezlab.org/frame_wget.html
+#
+# "/some/path/arthropoda/" would be the last column in the line
+# If this were for the mm10 mouse genome, the resulting entry would look like:
+#
+#arthropoda_2.0 arthropoda_2.0 /some/path/arthropoda/
+#
+#
diff -r 000000000000 -r 53eec20e8fb6 tool_data_table_conf.xml.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool_data_table_conf.xml.sample Thu Oct 19 15:56:20 2017 -0400
@@ -0,0 +1,7 @@
+
+
+
+