Mercurial > repos > ieguinoa > data_manager_salmon_index_builder
changeset 1:c5dea2080109 draft
Uploaded
author | ieguinoa |
---|---|
date | Tue, 14 Aug 2018 11:19:49 -0400 |
parents | 6cd60ba8a842 |
children | f7d9182bdcab |
files | .shed.yml README.md data_manager/data_manager_fetch_gff.py data_manager/data_manager_fetch_gff.xml data_manager/salmon_index_builder.py data_manager/salmon_index_builder.xml data_manager_conf.xml tool-data/all_fasta.loc.sample tool-data/all_gff.loc.sample tool-data/representative_gff.loc.sample tool-data/salmon_indexes.loc.sample tool_data_table_conf.xml.sample |
diffstat | 12 files changed, 202 insertions(+), 544 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.shed.yml Tue Aug 14 11:19:49 2018 -0400 @@ -0,0 +1,19 @@ +categories: +- Data Managers +description: Pre-generate indexes for Salmon +homepage_url: https://github.com/COMBINE-lab/salmon +long_description: | + Salmon is a wicked-fast program to produce a highly-accurate, + transcript-level quantification estimates from RNA-seq data. + Salmon achieves is accuracy and speed via a number of different innovations, + including the use of quasi-mapping (accurate but fast-to-compute proxies for traditional read alignments), + and massively-parallel stochastic collapsed variational inference. + The result is a versatile tool that fits nicely into many differnt pipelines. + For example, you can choose to make use of our quasi-mapping algorithm by providing + Salmon with raw sequencing reads, or, if it is more convenient, you can provide + Salmon with regular alignments (e.g. an unsorted BAM file produced with your favorite aligner), + and it will use the same wicked-fast, state-of-the-art inference algorithm to estimate transcript-level abundances for your experiment. +name: data_manager_salmon_index_builder +owner: iuc +remote_repository_url: https://github.com/ieguinoa/data_manager_salmon_index_builder +type: unrestricted
--- a/README.md Tue Aug 14 11:14:52 2018 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,2 +0,0 @@ -# data_manager_fetch_gff -Galaxy Data Manager to fetch gene annotation files
--- a/data_manager/data_manager_fetch_gff.py Tue Aug 14 11:14:52 2018 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,445 +0,0 @@ -#!/usr/bin/env python -#Dan Blankenberg - -import sys -import os -import tempfile -import shutil -import optparse -from ftplib import FTP -import tarfile -import zipfile -import gzip -import bz2 -try: - # For Python 3.0 and later - from urllib.request import urlopen - from io import BytesIO as StringIO - from io import UnsupportedOperation -except ImportError: - # Fall back to Python 2's urllib2 - from urllib2 import urlopen - from StringIO import StringIO - UnsupportedOperation = AttributeError -from json import loads, dumps - - -CHUNK_SIZE = 2**20 # 1mb - -DATA_TABLE_NAME = 'all_gff' - -def cleanup_before_exit( tmp_dir ): - if tmp_dir and os.path.exists( tmp_dir ): - shutil.rmtree( tmp_dir ) - - -def stop_err(msg): - sys.stderr.write(msg) - sys.exit(1) - - -def get_dbkey_dbname_id_name( params, dbkey_description=None ): -# dbkey = params['param_dict']['dbkey_source']['dbkey'] - #TODO: ensure sequence_id is unique and does not already appear in location file - sequence_id = params['param_dict']['sequence_id'] - if not sequence_id: - sequence_id = dbkey #uuid.uuid4() generate and use an uuid instead? - -# if params['param_dict']['dbkey_source']['dbkey_source_selector'] == 'new': -# dbkey_name = params['param_dict']['dbkey_source']['dbkey_name'] -# if not dbkey_name: -# dbkey_name = dbkey -# else: -# dbkey_name = None - dbkey = params['param_dict']['dbkey'] - dbkey_name = dbkey_description - sequence_name = params['param_dict']['sequence_name'] - if not sequence_name: - sequence_name = dbkey_description - if not sequence_name: - sequence_name = dbkey - return dbkey, dbkey_name, sequence_id, sequence_name - - -def _get_files_in_ftp_path( ftp, path ): - path_contents = [] - ftp.retrlines( 'MLSD %s' % ( path ), path_contents.append ) - return [ line.split( ';' )[ -1 ].lstrip() for line in path_contents ] - - -def _get_stream_readers_for_tar( fh, tmp_dir ): - fasta_tar = tarfile.open( fileobj=fh, mode='r:*' ) - return [x for x in [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()] if x] - - -def _get_stream_readers_for_zip( fh, tmp_dir ): - """ - Unpacks all archived files in a zip file. - Individual files will be concatenated (in _stream_fasta_to_file) - """ - fasta_zip = zipfile.ZipFile( fh, 'r' ) - rval = [] - for member in fasta_zip.namelist(): - fasta_zip.extract( member, tmp_dir ) - rval.append( open( os.path.join( tmp_dir, member ), 'rb' ) ) - return rval - - -def _get_stream_readers_for_gzip( fh, tmp_dir ): - return [ gzip.GzipFile( fileobj=fh, mode='rb') ] - - -def _get_stream_readers_for_bz2( fh, tmp_dir ): - return [ bz2.BZ2File( fh.name, 'rb') ] - - -def sort_fasta( fasta_filename, sort_method, params ): - if sort_method is None: - return - assert sort_method in SORTING_METHODS, ValueError( "%s is not a valid sorting option." % sort_method ) - return SORTING_METHODS[ sort_method ]( fasta_filename, params ) - - -def _move_and_index_fasta_for_sorting( fasta_filename ): - unsorted_filename = tempfile.NamedTemporaryFile().name - shutil.move( fasta_filename, unsorted_filename ) - fasta_offsets = {} - unsorted_fh = open( unsorted_filename ) - while True: - offset = unsorted_fh.tell() - line = unsorted_fh.readline() - if not line: - break - if line.startswith( ">" ): - line = line.split( None, 1 )[0][1:] - fasta_offsets[ line ] = offset - unsorted_fh.close() - current_order = map( lambda x: x[1], sorted( map( lambda x: ( x[1], x[0] ), fasta_offsets.items() ) ) ) - return ( unsorted_filename, fasta_offsets, current_order ) - - -def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ): - unsorted_fh = open( unsorted_fasta_filename ) - sorted_fh = open( sorted_fasta_filename, 'wb+' ) - - for name in sorted_names: - offset = fasta_offsets[ name ] - unsorted_fh.seek( offset ) - sorted_fh.write( unsorted_fh.readline() ) - while True: - line = unsorted_fh.readline() - if not line or line.startswith( ">" ): - break - sorted_fh.write( line ) - unsorted_fh.close() - sorted_fh.close() - - -def _sort_fasta_as_is( fasta_filename, params ): - return - -def _sort_fasta_lexicographical( fasta_filename, params ): - ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) - sorted_names = sorted( fasta_offsets.keys() ) - if sorted_names == current_order: - shutil.move( unsorted_filename, fasta_filename ) - else: - _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) - - -def _sort_fasta_gatk( fasta_filename, params ): - #This method was added by reviewer request. - ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) - sorted_names = map( str, range( 1, 23 ) ) + [ 'X', 'Y' ] - #detect if we have chrN, or just N - has_chr = False - for chrom in sorted_names: - if "chr%s" % chrom in current_order: - has_chr = True - break - - if has_chr: - sorted_names = map( lambda x: "chr%s" % x, sorted_names) - sorted_names.insert( 0, "chrM" ) - else: - sorted_names.insert( 0, "MT" ) - sorted_names.extend( map( lambda x: "%s_random" % x, sorted_names ) ) - - existing_sorted_names = [] - for name in sorted_names: - if name in current_order: - existing_sorted_names.append( name ) - for name in current_order: - #TODO: confirm that non-canonical names do not need to be sorted specially - if name not in existing_sorted_names: - existing_sorted_names.append( name ) - - if existing_sorted_names == current_order: - shutil.move( unsorted_filename, fasta_filename ) - else: - _write_sorted_fasta( existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) - - -def _sort_fasta_custom( fasta_filename, params ): - ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) - sorted_names = [] - for id_repeat in params['param_dict']['sorting']['sequence_identifiers']: - sorted_names.append( id_repeat[ 'identifier' ] ) - handle_not_listed = params['param_dict']['sorting']['handle_not_listed_selector'] - if handle_not_listed.startswith( 'keep' ): - add_list = [] - for name in current_order: - if name not in sorted_names: - add_list.append( name ) - if add_list: - if handle_not_listed == 'keep_append': - sorted_names.extend( add_list ) - else: - add_list.extend( sorted_names ) - sorted_names = add_list - if sorted_names == current_order: - shutil.move( unsorted_filename, fasta_filename ) - else: - _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) - - -def _download_file(start, fh): - tmp = tempfile.NamedTemporaryFile() - tmp.write(start) - tmp.write(fh.read()) - tmp.flush() - tmp.seek(0) - return tmp - - -def get_stream_reader(fh, tmp_dir): - """ - Check if file is compressed and return correct stream reader. - If file has to be downloaded, do it now. - """ - magic_dict = { - b"\x1f\x8b\x08": _get_stream_readers_for_gzip, - b"\x42\x5a\x68": _get_stream_readers_for_bz2, - b"\x50\x4b\x03\x04": _get_stream_readers_for_zip, - } - start_of_file = fh.read(CHUNK_SIZE) - try: - fh.seek(0) - except UnsupportedOperation: # This is if fh has been created by urlopen - fh = _download_file(start_of_file, fh) - for k,v in magic_dict.items(): - if start_of_file.startswith(k): - return v(fh, tmp_dir) - try: # Check if file is tar file - if tarfile.open(fileobj=StringIO(start_of_file)): - return _get_stream_readers_for_tar(fh, tmp_dir) - except tarfile.ReadError: - pass - return fh - - -def _get_ucsc_download_address(params, dbkey): - """ - Check if we can find the correct file for the supplied dbkey on UCSC's FTP server - """ - UCSC_FTP_SERVER = 'hgdownload.cse.ucsc.edu' - UCSC_DOWNLOAD_PATH = '/goldenPath/%s/bigZips/' - COMPRESSED_EXTENSIONS = ['.tar.gz', '.tgz', '.tar.bz2', '.zip', '.fa.gz', '.fa.bz2'] - - email = params['param_dict']['__user_email__'] - if not email: - email = 'anonymous@example.com' - - ucsc_dbkey = params['param_dict']['reference_source']['requested_dbkey'] or dbkey - UCSC_CHROM_FA_FILENAMES = ['%s.chromFa' % ucsc_dbkey, 'chromFa', ucsc_dbkey] - - ftp = FTP(UCSC_FTP_SERVER) - ftp.login('anonymous', email) - - ucsc_path = UCSC_DOWNLOAD_PATH % ucsc_dbkey - path_contents = _get_files_in_ftp_path(ftp, ucsc_path) - ftp.quit() - - for ucsc_chrom_fa_filename in UCSC_CHROM_FA_FILENAMES: - for ext in COMPRESSED_EXTENSIONS: - if "%s%s" % (ucsc_chrom_fa_filename, ext) in path_contents: - ucsc_file_name = "%s%s%s" % (ucsc_path, ucsc_chrom_fa_filename, ext) - return "ftp://%s%s" % (UCSC_FTP_SERVER, ucsc_file_name) - - raise Exception('Unable to determine filename for UCSC Genome for %s: %s' % (ucsc_dbkey, path_contents)) - -def add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params): - for data_table_name, data_table_entry in _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ): - if data_table_entry: - _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ) - - -def download_from_ucsc( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): - url = _get_ucsc_download_address(params, dbkey) - fasta_readers = get_stream_reader(urlopen(url), tmp_dir) - add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) - - -def download_from_ncbi( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): - NCBI_DOWNLOAD_URL = 'http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta' #FIXME: taken from dave's genome manager...why some japan site? - requested_identifier = params['param_dict']['reference_source']['requested_identifier'] - url = NCBI_DOWNLOAD_URL % requested_identifier - fasta_readers = get_stream_reader(urlopen(url), tmp_dir) - add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) - - -def download_from_url( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): - urls = filter( bool, map( lambda x: x.strip(), params['param_dict']['reference_source']['user_url'].split( '\n' ) ) ) - fasta_readers = [ get_stream_reader(urlopen( url ), tmp_dir) for url in urls ] - add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id,sequence_name, params) - - -def download_from_history( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): - #TODO: allow multiple FASTA input files - input_filename = params['param_dict']['reference_source']['input_fasta'] - if isinstance( input_filename, list ): - fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ] - else: - fasta_readers = get_stream_reader(open(input_filename), tmp_dir) - add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) - - -def copy_from_directory( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): - input_filename = params['param_dict']['reference_source']['fasta_filename'] - create_symlink = params['param_dict']['reference_source']['create_symlink'] == 'create_symlink' - if create_symlink: - data_table_entries = _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ) - else: - if isinstance( input_filename, list ): - fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ] - else: - fasta_readers = get_stream_reader(open(input_filename), tmp_dir) - data_table_entries = _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ) - for data_table_name, data_table_entry in data_table_entries: - if data_table_entry: - _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ) - - -def _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ): - data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) - data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get( DATA_TABLE_NAME, [] ) - data_manager_dict['data_tables'][data_table_name].append( data_table_entry ) - return data_manager_dict - - -def _stream_fasta_to_file( fasta_stream, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params, close_stream=True ): - fasta_base_filename = "%s.gff" % sequence_id - fasta_filename = os.path.join( target_directory, fasta_base_filename ) - with open( fasta_filename, 'wb+' ) as fasta_writer: - - if isinstance( fasta_stream, list ) and len( fasta_stream ) == 1: - fasta_stream = fasta_stream[0] - - if isinstance( fasta_stream, list ): - last_char = None - for fh in fasta_stream: - if last_char not in [ None, '\n', '\r', b'\n', b'\r' ]: - fasta_writer.write( b'\n' ) - while True: - data = fh.read( CHUNK_SIZE ) - if data: - fasta_writer.write( data ) - last_char = data[-1] - else: - break - if close_stream: - fh.close() - else: - while True: - data = fasta_stream.read( CHUNK_SIZE ) - if data: - fasta_writer.write( data ) - else: - break - if close_stream: - fasta_stream.close() - - #sort_fasta( fasta_filename, params['param_dict']['sorting']['sort_selector'], params ) - - - return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ] - - -def compute_fasta_length( fasta_file, out_file, keep_first_word=False ): - - infile = fasta_file - out = open( out_file, 'w') - - fasta_title = '' - seq_len = 0 - - first_entry = True - - for line in open( infile ): - line = line.strip() - if not line or line.startswith( '#' ): - continue - if line[0] == '>': - if first_entry == False: - if keep_first_word: - fasta_title = fasta_title.split()[0] - out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) ) - else: - first_entry = False - fasta_title = line - seq_len = 0 - else: - seq_len += len(line) - - # last fasta-entry - if keep_first_word: - fasta_title = fasta_title.split()[0] - out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) ) - out.close() - - -def _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ): - fasta_base_filename = "%s.fa" % sequence_id - fasta_filename = os.path.join( target_directory, fasta_base_filename ) - os.symlink( input_filename, fasta_filename ) - return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ] - - -REFERENCE_SOURCE_TO_DOWNLOAD = dict( ucsc=download_from_ucsc, ncbi=download_from_ncbi, url=download_from_url, history=download_from_history, directory=copy_from_directory ) - -SORTING_METHODS = dict( as_is=_sort_fasta_as_is, lexicographical=_sort_fasta_lexicographical, gatk=_sort_fasta_gatk, custom=_sort_fasta_custom ) - - -def main(): - #Parse Command Line - parser = optparse.OptionParser() - parser.add_option( '-d', '--dbkey_description', dest='dbkey_description', action='store', type="string", default=None, help='dbkey_description' ) - parser.add_option( '-t', '--type', dest='file_type', action='store', type='string', default=None, help='file_type') - (options, args) = parser.parse_args() - - filename = args[0] - global DATA_TABLE_NAME - if options.file_type == 'representative': - DATA_TABLE_NAME= 'representative_gff' - params = loads( open( filename ).read() ) - target_directory = params[ 'output_data' ][0]['extra_files_path'] - os.mkdir( target_directory ) - data_manager_dict = {} - - dbkey, dbkey_name, sequence_id, sequence_name = get_dbkey_dbname_id_name( params, dbkey_description=options.dbkey_description ) - - if dbkey in [ None, '', '?' ]: - raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) ) - - # Create a tmp_dir, in case a zip file needs to be uncompressed - tmp_dir = tempfile.mkdtemp() - #Fetch the FASTA - try: - REFERENCE_SOURCE_TO_DOWNLOAD[ params['param_dict']['reference_source']['reference_source_selector'] ]( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ) - finally: - cleanup_before_exit(tmp_dir) - #save info to json file - open( filename, 'wb' ).write( dumps( data_manager_dict ).encode() ) - -if __name__ == "__main__": - main()
--- a/data_manager/data_manager_fetch_gff.xml Tue Aug 14 11:14:52 2018 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,66 +0,0 @@ -<tool id="data_manager_fetch_gff" name="Create entries in gff data table" version="0.0.1" tool_type="manage_data"> - <description>fetching</description> - <command><![CDATA[ - python "$__tool_directory__"/data_manager_fetch_gff.py "${out_file}" - --type $file_type - --dbkey_description ${ dbkey.get_display_text() } - - ]]></command> - <inputs> - <param name="file_type" type="select" label="GFF file with only one representative transcript per gene (for htseq-count use) or full features file"> - <option value="representative">Representative GFF</option> - <option value="full">GFF with complete features</option> - </param> - - <param name="dbkey" type="genomebuild" label="DBKEY to assign to data" /> - <param type="text" name="sequence_name" value="" label="Name of sequence" /> - <param type="text" name="sequence_id" value="" label="ID for sequence" /> - <conditional name="reference_source"> - <param name="reference_source_selector" type="select" label="Choose the source for the reference genome"> - <option value="url">URL</option> - <option value="history">History</option> - <option value="directory">Directory on Server</option> - </param> - <when value="url"> - <param type="text" area="True" name="user_url" value="http://" label="URLs" optional="False" /> - </when> - <when value="history"> - <param name="input_fasta" type="data" format="fasta" label="FASTA File" multiple="False" optional="False" /> - </when> - <when value="directory"> - <param type="text" name="fasta_filename" value="" label="Full path to FASTA File on disk" optional="False" /> - <param type="boolean" name="create_symlink" truevalue="create_symlink" falsevalue="copy_file" label="Create symlink to original data instead of copying" checked="False" /> - </when> - </conditional> - </inputs> - <outputs> - <data name="out_file" format="data_manager_json"/> - </outputs> - <tests> - <!-- TODO: need some way to test that new entry was added to data table --> - <test> - <param name="dbkey" value="anoGam1"/> - <param name="sequence_name" value=""/> - <param name="sequence_desc" value=""/> - <param name="sequence_id" value=""/> - <param name="reference_source_selector" value="history"/> - <param name="input_fasta" value="phiX174.fasta"/> - <param name="sort_selector" value="as_is"/> - <output name="out_file" file="phiX174.data_manager_json"/> - </test> - </tests> - <help> -**What it does** - -Fetches a gff file from various sources (URL, Galaxy History, or a server directory) and populates the "all_gff" data table. - ------- - - - -.. class:: infomark - -**Notice:** If you leave name, description, or id blank, it will be generated automatically. - - </help> -</tool>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/salmon_index_builder.py Tue Aug 14 11:19:49 2018 -0400 @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# Based heavily on the kallisto data manager wrapper script by iuc +from __future__ import print_function + +import argparse +import os +import subprocess +import sys +from json import dumps, loads + +DEFAULT_DATA_TABLE_NAME = "salmon_indexes" + + +def get_id_name( params, dbkey, fasta_description=None): + # TODO: ensure sequence_id is unique and does not already appear in location file + sequence_id = params['param_dict']['sequence_id'] + if not sequence_id: + sequence_id = dbkey + + sequence_name = params['param_dict']['sequence_name'] + if not sequence_name: + sequence_name = fasta_description + if not sequence_name: + sequence_name = dbkey + return sequence_id, sequence_name + + +def build_salmon_index( data_manager_dict, options, params, sequence_id, sequence_name ): + data_table_name = options.data_table_name or DEFAULT_DATA_TABLE_NAME + target_directory = params[ 'output_data' ][0]['extra_files_path'] + if not os.path.exists( target_directory ): + os.mkdir( target_directory ) + if options.kmer_size != '': + args.append('-k') + args.append(options.kmer_size) + args.extend( [ '-t' , options.fasta_filename, '-i', sequence_id ] ) + proc = subprocess.Popen( args=args, shell=False, cwd=target_directory ) + return_code = proc.wait() + if return_code: + print("Error building index.", file=sys.stderr) + sys.exit( return_code ) + data_table_entry = dict( value=sequence_id, dbkey=options.fasta_dbkey, name=sequence_name, path=sequence_id ) + _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ) + + +def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ): + data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) + data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] ) + data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry ) + return data_manager_dict + + +def main(): + # Parse Command Line + parser = argparse.ArgumentParser() + parser.add_argument( '--output', dest='output', action='store', type=str, default=None ) + parser.add_argument( '--fasta_filename', dest='fasta_filename', action='store', type=str, default=None ) + parser.add_argument( '--fasta_dbkey', dest='fasta_dbkey', action='store', type=str, default=None ) + parser.add_argument( '--fasta_description', dest='fasta_description', action='store', type=str, default=None ) + parser.add_argument( '--data_table_name', dest='data_table_name', action='store', type=str, default='salmon_indexes' ) + parser.add_argument( '-k', '--kmer_size', dest='kmer_size', action='store', type=str, help='kmer_size' ) + options = parser.parse_args() + + filename = options.output + + params = loads( open( filename ).read() ) + data_manager_dict = {} + + if options.fasta_dbkey in [ None, '', '?' ]: + raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) ) + + sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description ) + + # build the index + build_salmon_index( data_manager_dict, options, params, sequence_id, sequence_name ) + + # save info to json file + open( filename, 'w' ).write( dumps( data_manager_dict ) ) + + +if __name__ == "__main__": + main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/salmon_index_builder.xml Tue Aug 14 11:19:49 2018 -0400 @@ -0,0 +1,38 @@ +<tool id="salmon_index_builder_data_manager" name="Salmon" tool_type="manage_data" version="0.9.1"> + <description>index builder</description> + <requirements> + <requirement type="package" version="0.9.1">salmon</requirement> + </requirements> + <command detect_errors="exit_code"><![CDATA[ + python '$__tool_directory__/salmon_index_builder.py' --output '${out_file}' + --fasta_filename '${all_fasta_source.fields.path}' + --fasta_dbkey '${all_fasta_source.fields.dbkey}' + --fasta_description '${all_fasta_source.fields.name}' + --kmer_size "${kmer_size}" + --data_table_name salmon_indexes + ]]> + </command> + <inputs> + <param label="Source FASTA Sequence" name="all_fasta_source" type="select"> + <options from_data_table="all_fasta" /> + </param> + <param name="sequence_name" type="text" value="" label="Name of sequence" /> + <param name="sequence_id" type="text" value="" label="ID for sequence" /> + <param name="kmer_size" type="integer" optional='true' value="21" max="32" label="The size of the k-mer on which the index is built" + help="There is a tradeoff here between the distinctiveness of the k-mers and their robustness to errors. The shorter the k-mers, the more robust they will be to errors in the reads, but the longer the k-mers, the more distinct they will be. We generally recommend using a k-mer size of at least 20. MUST BE AN ODD VALUE "/> + + + + </inputs> + <outputs> + <data name="out_file" format="data_manager_json" /> + </outputs> + <help> +<![CDATA[ +.. class:: infomark + +**Notice:** If you leave name, description, or id blank, it will be generated automatically. +]]> + </help> +</tool> +
--- a/data_manager_conf.xml Tue Aug 14 11:14:52 2018 -0400 +++ b/data_manager_conf.xml Tue Aug 14 11:19:49 2018 -0400 @@ -1,32 +1,17 @@ <?xml version="1.0"?> <data_managers> -<data_manager tool_file="data_manager/data_manager_fetch_gff.xml" id="data_manager_fetch_gff"> - <data_table name="all_gff"> + <data_manager tool_file="data_manager/salmon_index_builder.xml" id="salmon_index_builder" version="0.1"> + <data_table name="salmon_indexes"> <output> <column name="value" /> <column name="dbkey" /> <column name="name" /> - <column name="path" output_ref="out_file"> - <move type="file"> - <source>${path}</source> - <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">${dbkey}/gff/${path}</target> + <column name="path" output_ref="out_file" > + <move type="directory" relativize_symlinks="True"> + <!-- <source>${path}</source>--> <!-- out_file.extra_files_path is used as base by default --> <!-- if no source, eg for type=directory, then refers to base --> + <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">${dbkey}/salmon_index/${value}</target> </move> - <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/gff/${path}</value_translation> - <value_translation type="function">abspath</value_translation> - </column> - </output> - </data_table> - <data_table name="representative_gff"> - <output> - <column name="value" /> - <column name="dbkey" /> - <column name="name" /> - <column name="path" output_ref="out_file"> - <move type="file"> - <source>${path}</source> - <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">${dbkey}/representative_gff/${path}</target> - </move> - <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/representative_gff/${path}</value_translation> + <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/salmon_index/${value}/${path}</value_translation> <value_translation type="function">abspath</value_translation> </column> </output>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/all_fasta.loc.sample Tue Aug 14 11:19:49 2018 -0400 @@ -0,0 +1,18 @@ +#This file lists the locations and dbkeys of all the fasta files +#under the "genome" directory (a directory that contains a directory +#for each build). The script extract_fasta.py will generate the file +#all_fasta.loc. This file has the format (white space characters are +#TAB characters): +# +#<unique_build_id> <dbkey> <display_name> <file_path> +# +#So, all_fasta.loc could look something like this: +# +#apiMel3 apiMel3 Honeybee (Apis mellifera): apiMel3 /path/to/genome/apiMel3/apiMel3.fa +#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /path/to/genome/hg19/hg19canon.fa +#hg19full hg19 Human (Homo sapiens): hg19 Full /path/to/genome/hg19/hg19full.fa +# +#Your all_fasta.loc file should contain an entry for each individual +#fasta file. So there will be multiple fasta files for each build, +#such as with hg19 above. +#
--- a/tool-data/all_gff.loc.sample Tue Aug 14 11:14:52 2018 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,3 +0,0 @@ -#The all_gff.loc file has this format: -# -#<unique_build_id> <dbkey> <display_name> <path_to_gff_file>
--- a/tool-data/representative_gff.loc.sample Tue Aug 14 11:14:52 2018 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,3 +0,0 @@ -#The representative_gff.loc file has this format: -# -#<unique_build_id> <dbkey> <display_name> <path_to_gff_file>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/salmon_indexes.loc.sample Tue Aug 14 11:19:49 2018 -0400 @@ -0,0 +1,28 @@ +# salmon_indexes.loc.sample +# This is a *.loc.sample file distributed with Galaxy that enables tools +# to use a directory of indexed data files. This one is for Salmon. +# See the wiki: http://wiki.galaxyproject.org/Admin/NGS%20Local%20Setup +# First create these data files and save them in your own data directory structure. +# Then, create a kallisto_indexes.loc file to use those indexes with tools. +# Copy this file, save it with the same name (minus the .sample), +# follow the format examples, and store the result in this directory. +# The file should include an one line entry for each index set. +# The path points to the "basename" for the set, not a specific file. +# It has four text columns seperated by TABS. +# +# <unique_build_id> <dbkey> <display_name> <file_base_path> +# +# So, for example, if you had sacCer3 indexes stored in: +# +# /depot/data2/galaxy/sacCer3/salmon_indexes/ +# +# then the salmon_indexes.loc entry could look like this: +# +#sacCer3 sacCer3 S. cerevisiae Apr. 2011 (SacCer_Apr2011/sacCer3) (sacCer3) /depot/data2/galaxy/sacCer3/salmon_indexes +# +#More examples: +# +#mm10 mm10 Mouse (mm10) /depot/data2/galaxy/salmon_indexes/mm10 +#dm3 dm3 D. melanogaster (dm3) /depot/data2/galaxy/salmon_indexes/dm3 +# +#
--- a/tool_data_table_conf.xml.sample Tue Aug 14 11:14:52 2018 -0400 +++ b/tool_data_table_conf.xml.sample Tue Aug 14 11:19:49 2018 -0400 @@ -1,5 +1,12 @@ -<?xml version="1.0"?> <tables> - <table name="all_gff" comment_char="#"> <columns>value, dbkey, name, path</columns> <file path="tool-data/all_gff.loc" /> </table> - <table name="representative_gff" comment_char="#"> <columns>value, dbkey, name, path</columns> <file path="tool-data/representative_gff.loc" /> </table> + <!-- Locations of all fasta files under genome directory --> + <table name="all_fasta" comment_char="#" allow_duplicate_entries="False"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/all_fasta.loc" /> + </table> + <!-- Locations of indexes in the kallisto mapper format --> + <table name="salmon_indexes" comment_char="#" allow_duplicate_entries="False"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/salmon_indexes.loc" /> + </table> </tables>