# HG changeset patch # User fabio # Date 1512421545 18000 # Node ID 854be3d51221d924bc8fafef1f2f2e624016b273 Uploaded 20171204 diff -r 000000000000 -r 854be3d51221 ._.DS_Store Binary file ._.DS_Store has changed diff -r 000000000000 -r 854be3d51221 ._retrieve.py Binary file ._retrieve.py has changed diff -r 000000000000 -r 854be3d51221 ._retrieve.xml Binary file ._retrieve.xml has changed diff -r 000000000000 -r 854be3d51221 ._search.py Binary file ._search.py has changed diff -r 000000000000 -r 854be3d51221 ._search.xml Binary file ._search.xml has changed diff -r 000000000000 -r 854be3d51221 .shed.yml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.shed.yml Mon Dec 04 16:05:45 2017 -0500 @@ -0,0 +1,21 @@ +name: srase +owner: iuc +categories: + - Web Services + - Data Source +description: Sequence Read Archive Search Engine +long_description: | + A fast querying tool to search on the Sequence Read Archive repository + using Bloom Filters. +remote_repository_url: https://github.com/fabio-cumbo/sequence-read-archive-search-engine +homepage_url: https://github.com/fabio-cumbo/sequence-read-archive-search-engine +type: unrestricted +auto_tool_repositories: + name_template: "{{ tool_id }}" + descriptor_template: "Wrapper for IWTomics application: {{ tool_name }}." +suite: + name: "srase_suite" + description: "A suite of Galaxy tools designed to work with the query and extract data from the Sequence Read Archive repository." + long_description: | + A fast querying tool to search on the Sequence Read Archive repository + using Bloom Filters. \ No newline at end of file diff -r 000000000000 -r 854be3d51221 retrieve.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/retrieve.py Mon Dec 04 16:05:45 2017 -0500 @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# NCBI SRA Tools +# https://galaxyproject.org/tutorials/upload/ + +import os +import optparse +from subprocess import Popen, PIPE + +db_key = "?"; +sra_instant_url = "ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/"; + +def convertSRA(tmp_dir, accession_number, data_format): + absolute_tmp_dir = os.path.abspath(tmp_dir); + sra_file_path = os.path.join(absolute_tmp_dir, accession_number+".sra"); + if os.path.isdir(absolute_tmp_dir) and os.path.exists(sra_file_path): + process = None; + if data_format == ".fasta.gz": + process = Popen(["fastq-dump", "--fasta", "--gzip", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE); + elif data_format == ".fastq.gz": + process = Popen(["fastq-dump", "--gzip", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE); + elif data_format == ".fasta": + process = Popen(["fastq-dump", "--fasta", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE); + elif data_format == ".fastq": + process = Popen(["fastq-dump", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE); + else: + process = None; + if process is not None: + (output, err) = process.communicate(); + if err: + # kill the process + # kill_process(process.pid); + # remove any trace of the output file + an_file_path = os.path.join(tmp_dir, accession_number+data_format); + if os.path.exists(an_file_path): + os.unlink(an_file_path); + # try to restart the process + return downloadAccessionData(tmp_dir, accession_number, data_format); + #exit_code = process.wait(); + return os.path.join(tmp_dir, accession_number+data_format); + return ""; + +def downloadAccessionData(accession_number, accession_path, appdata_path, data_format, limit=10): + split = accession_number[:6]; + srr_path = sra_instant_url+split+"/"+accession_number+"/"+accession_number+".sra"; + sra_file_path = os.path.join(appdata_path, accession_number+".sra"); + process = Popen(['wget', srr_path, "--output-document="+sra_file_path], stdout=PIPE); + (output, err) = process.communicate(); + if err: + # remove any trace of the output file + if os.path.exists(an_file_path): + os.unlink(an_file_path); + # try to restart the process + if limit > 0: + return downloadAccessionData(accession_number, accession_path, appdata_path, data_format, limit-1); + return -1; + if os.path.exists(sra_file_path): + converted_file_path = convertSRA(appdata_path, accession_number, data_format); + if os.path.exists(converted_file_path): + os.rename(converted_file_path, accession_path); + os.unlink(sra_file_path); + return 0; + +def process_accessions( options, args ): + # create appdata dir if it does not exist + appdata_path = options.appdata; + if not os.path.exists(appdata_path): + os.makedirs(appdata_path); + data_format = options.dataformat; + ''' + # Collection test + test_file_name = "Test Collection" + "_" + "SRRtest" + "_" + data_format[1:] + "_" + db_key; + test_file_path = os.path.join(appdata_path, test_file_name); + file = open(test_file_path, "w"); + file.write("Hello World"); + file.close(); + ''' + # read inputs + comma_sep_file_paths = options.files; + #print("files: "+str(comma_sep_file_paths)+" - "+str(type(comma_sep_file_paths))); + # check if options.files contains at least one file path + if comma_sep_file_paths is not None: + # split file paths + file_paths = comma_sep_file_paths.split(","); + # split file names + comma_sep_file_names = str(options.names); + #print("names: "+str(comma_sep_file_names)); + file_names = comma_sep_file_names.split(","); + # populate a dictionary with the files containing the sequences to query + for idx, file_path in enumerate(file_paths): + file_name = file_names[idx]; + #print(file_name + ": " + file_path); + with open(file_path) as accessions: + for line in accessions: + if line.strip() != "" and not line.startswith(">"): + accession_number = line.strip(); + filename_with_collection_prefix = file_name + "_" + accession_number + "_" + data_format[1:] + "_" + db_key; + accession_path = os.path.join(appdata_path, filename_with_collection_prefix) + # download fastq filte related to accession_number + downloadAccessionData( accession_number, accession_path, appdata_path, data_format ); + return 0; + +def __main__(): + # Parse the command line options + usage = "Usage: retrieve.py --files comma_sep_file_paths --names comma_seq_file_names --format data_format --appdata folder_name"; + parser = optparse.OptionParser(usage = usage); + parser.add_option("-f", "--files", type="string", + action="store", dest="files", help="comma separated files path"); + parser.add_option("-n", "--names", type="string", + action="store", dest="names", help="comma separated names associated to the files specified in --files"); + parser.add_option("-e", "--format", type="string", + action="store", dest="dataformat", help="data format"); + parser.add_option("-a", "--appdata", type="string", + action="store", dest="appdata", help="appdata folder name"); + parser.add_option("-v", "--version", action="store_true", dest="version", + default=False, help="display version and exit"); + (options, args) = parser.parse_args(); + if options.version: + print __version__; + else: + return process_accessions( options, args ); + +if __name__ == "__main__": __main__() diff -r 000000000000 -r 854be3d51221 retrieve.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/retrieve.xml Mon Dec 04 16:05:45 2017 -0500 @@ -0,0 +1,43 @@ + + + data from SRA + + python + sra-tools + + + ${stdouterr} +]]> + + + + + + + + + + + + + + + + + + + diff -r 000000000000 -r 854be3d51221 search.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/search.py Mon Dec 04 16:05:45 2017 -0500 @@ -0,0 +1,133 @@ +#!/usr/bin/env python + +# https://github.com/ross/requests-futures +# http://docs.python-requests.org/en/master/user/quickstart/#more-complicated-post-requests + +import os, uuid +import optparse +import requests +from requests_futures.sessions import FuturesSession + +# proxy to uv0 +service_url = "http://deputy.bx.psu.edu/"; +# url to query page +query_url = service_url+"query.php"; +# url to echo page: just return 'it works!' +#echo_url = service_url+"echo.php"; + +''' +# synchronous +def echo( options, args ): + # create a session + session = requests.Session() + # make a sync get request + resp = session.get(echo_url) + # check for response status code + resp_code = resp.status_code; + if resp_code == requests.codes.ok: + # get output file path + output_file_path = options.output; + # write response on the output file + with open(output_file_path, 'w') as out: + #out.write(resp.data); + out.write(resp.content); + return 0; + else: + return resp_code; +''' + +# asynchronous +def async_request( options, args, payload ): + # add additional parameters to the payload + payload["search_mode"] = str(options.search); + payload["exact_algorithm"] = str(options.exact); + payload["search_threshold"] = str(options.sthreshold); + # create a session + session = FuturesSession(); + # make an async post request with requests-futures + future_req = session.post(query_url, data=payload); + # wait for the request to complete, if it has not already + resp = future_req.result(); + # check for response status code + resp_code = resp.status_code; + # get output file path + output_file_path = options.output; + # write response on the output file + with open(output_file_path, 'w') as out: + #out.write(resp.data); + out.write(str(resp_code)+"\n"+str(resp.content)); + if resp_code == requests.codes.ok: + return 0; + else: + return resp_code; + +def srase_query( options, args ): + multiple_files = {}; + comma_sep_file_paths = options.files; + #print("files: "+str(comma_sep_file_paths)+" - "+str(type(comma_sep_file_paths))); + # check if options.files contains at least one file path + if comma_sep_file_paths is not None: + # split file paths + file_paths = comma_sep_file_paths.split(","); + # split file names + comma_sep_file_names = str(options.names); + #print("names: "+str(comma_sep_file_names)); + file_names = comma_sep_file_names.split(","); + # populate a dictionary with the files containing the sequences to query + for idx, file_path in enumerate(file_paths): + file_name = file_names[idx]; + with open(file_path, 'r') as content_file: + content = content_file.read() + multiple_files[file_name] = content; + if len(multiple_files) > 0: + return async_request( options, args, multiple_files ); + #return echo( options, args ); + else: + # try with the sequence in --sequence + sequences_text = options.sequences; + #print("sequences: "+sequences_text); + # check if options.sequences contains a list of sequences (one for each row) + if sequences_text is not None: + sequences_text = str(sequences_text); + if sequences_text.strip(): + # populate a dictionary with the files containing the sequences to query + seq_counter = 0; + sequences_arr = sequences_text.split("__cn__"); + for seq in sequences_arr: + seq_index = 'sequence'+str(seq_counter); + multiple_files[seq_index] = seq; + #print(str(seq_counter)+": "+seq); + seq_counter += 1; + return async_request( options, args, multiple_files ); + #return echo( options, args ); + else: + return -1; + return -1; + +def __main__(): + # Parse the command line options + usage = "Usage: search.py --files comma_sep_file_paths --names comma_seq_file_names --sequences sequences_text --search search_mode --exact exact_alg --sthreshold threshold --output output_file_path"; + parser = optparse.OptionParser(usage = usage); + parser.add_option("-f", "--files", type="string", + action="store", dest="files", help="comma separated files path"); + parser.add_option("-n", "--names", type="string", + action="store", dest="names", help="comma separated names associated to the files specified in --files"); + parser.add_option("-s", "--sequences", type="string", + action="store", dest="sequences", help="optional filed, contains a list of sequences (one for each row)"); + parser.add_option("-x", "--search", type="int", default=0, + action="store", dest="search", help="search mode"); + parser.add_option("-e", "--exact", type="int", default=0, + action="store", dest="exact", help="exact algorithm (required if search is 1 only)"); + parser.add_option("-t", "--sthreshold", type="string", + action="store", dest="sthreshold", help="threshold applied to the search algrithm"); + parser.add_option("-o", "--output", type="string", + action="store", dest="output", help="output file path"); + parser.add_option("-v", "--version", action="store_true", dest="version", + default=False, help="display version and exit"); + (options, args) = parser.parse_args(); + if options.version: + print __version__; + else: + srase_query( options, args ); + +if __name__ == "__main__": __main__() diff -r 000000000000 -r 854be3d51221 search.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/search.xml Mon Dec 04 16:05:45 2017 -0500 @@ -0,0 +1,69 @@ + + + your sequences in the big SRA data lake + + python + requests + requests-futures + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 10.1101/090464 + +