Previous changeset 2:77641d5731c8 (2014-04-14) Next changeset 4:8ddabc73af92 (2016-02-11) |
Commit message:
planemo upload for repository https://github.com/galaxyproject/tools-devteam/tree/master/tool_collections/gops/intersect commit a1517c9d22029095120643bbe2c8fa53754dd2b7 |
modified:
gops_intersect.py intersect.xml operation_filter.py tool_dependencies.xml |
removed:
utils/__init__.py utils/gff_util.py utils/odict.py |
b |
diff -r 77641d5731c8 -r 5f72be09cfd3 gops_intersect.py --- a/gops_intersect.py Mon Apr 14 09:20:28 2014 -0400 +++ b/gops_intersect.py Wed Nov 11 12:48:44 2015 -0500 |
[ |
@@ -11,34 +11,34 @@ -G, --gff1: input 1 is GFF format, meaning start and end coordinates are 1-based, closed interval -H, --gff2: input 2 is GFF format, meaning start and end coordinates are 1-based, closed interval """ -import sys, traceback, fileinput -from warnings import warn -from bx.intervals import * -from bx.intervals.io import * -from bx.intervals.operations.intersect import * +import fileinput +import sys +from bx.intervals.io import GenomicInterval, NiceReaderWrapper +from bx.intervals.operations.intersect import intersect from bx.cookbook import doc_optparse -from galaxy.tools.util.galaxyops import * +from bx.tabular.io import ParseError +from galaxy.tools.util.galaxyops import fail, parse_cols_arg, skipped from utils.gff_util import GFFFeature, GFFReaderWrapper, convert_bed_coords_to_gff assert sys.version_info[:2] >= ( 2, 4 ) + def main(): mincols = 1 - upstream_pad = 0 - downstream_pad = 0 options, args = doc_optparse.parse( __doc__ ) try: chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 ) - chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 ) - if options.mincols: mincols = int( options.mincols ) + chr_col_2, start_col_2, end_col_2, strand_col_2 = parse_cols_arg( options.cols2 ) + if options.mincols: + mincols = int( options.mincols ) pieces = bool( options.pieces ) in1_gff_format = bool( options.gff1 ) in2_gff_format = bool( options.gff2 ) in_fname, in2_fname, out_fname = args except: doc_optparse.exception() - + # Set readers to handle either GFF or default format. if in1_gff_format: in1_reader_wrapper = GFFReaderWrapper @@ -48,29 +48,29 @@ in2_reader_wrapper = GFFReaderWrapper else: in2_reader_wrapper = NiceReaderWrapper - + g1 = in1_reader_wrapper( fileinput.FileInput( in_fname ), - chrom_col=chr_col_1, - start_col=start_col_1, - end_col=end_col_1, - strand_col=strand_col_1, - fix_strand=True ) + chrom_col=chr_col_1, + start_col=start_col_1, + end_col=end_col_1, + strand_col=strand_col_1, + fix_strand=True ) if in1_gff_format: # Intersect requires coordinates in BED format. - g1.convert_to_bed_coord=True + g1.convert_to_bed_coord = True g2 = in2_reader_wrapper( fileinput.FileInput( in2_fname ), - chrom_col=chr_col_2, - start_col=start_col_2, - end_col=end_col_2, - strand_col=strand_col_2, - fix_strand=True ) + chrom_col=chr_col_2, + start_col=start_col_2, + end_col=end_col_2, + strand_col=strand_col_2, + fix_strand=True ) if in2_gff_format: # Intersect requires coordinates in BED format. - g2.convert_to_bed_coord=True - + g2.convert_to_bed_coord = True + out_file = open( out_fname, "w" ) try: - for feature in intersect( [g1,g2], pieces=pieces, mincols=mincols ): + for feature in intersect( [g1, g2], pieces=pieces, mincols=mincols ): if isinstance( feature, GFFFeature ): # Convert back to GFF coordinates since reader converted automatically. convert_bed_coords_to_gff( feature ) |
b |
diff -r 77641d5731c8 -r 5f72be09cfd3 intersect.xml --- a/intersect.xml Mon Apr 14 09:20:28 2014 -0400 +++ b/intersect.xml Wed Nov 11 12:48:44 2015 -0500 |
b |
@@ -32,7 +32,7 @@ <param format="interval,gff" name="input2" type="data" help="Second dataset"> <label>that intersect</label> </param> - <param name="min" size="4" type="integer" value="1" min="1" help="(bp)"> + <param name="min" type="integer" value="1" min="1" help="(bp)"> <label>for at least</label> </param> </inputs> |
b |
diff -r 77641d5731c8 -r 5f72be09cfd3 operation_filter.py --- a/operation_filter.py Mon Apr 14 09:20:28 2014 -0400 +++ b/operation_filter.py Wed Nov 11 12:48:44 2015 -0500 |
[ |
@@ -1,7 +1,4 @@ # runs after the job (and after the default post-filter) -import os -from galaxy import eggs -from galaxy import jobs from galaxy.tools.parameters import DataToolParameter from galaxy.jobs.handler import JOB_ERROR @@ -12,11 +9,6 @@ except: from sets import Set as set -#def exec_before_process(app, inp_data, out_data, param_dict, tool=None): -# """Sets the name of the data""" -# dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] ) -# if len(dbkeys) != 1: -# raise Exception, '<p><font color="yellow">Both Queries must be from the same genome build</font></p>' def validate_input( trans, error_map, param_values, page_param_map ): dbkeys = set() @@ -25,7 +17,7 @@ for name, param in page_param_map.iteritems(): if isinstance( param, DataToolParameter ): # for each dataset parameter - if param_values.get(name, None) != None: + if param_values.get(name, None) is not None: dbkeys.add( param_values[name].dbkey ) data_params += 1 # check meta data @@ -34,17 +26,15 @@ if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ): # TODO: currently cannot validate GFF inputs b/c they are not derived from interval. pass - else: # Validate interval datatype. - startCol = int( param.metadata.startCol ) - endCol = int( param.metadata.endCol ) - chromCol = int( param.metadata.chromCol ) + else: # Validate interval datatype. + int( param.metadata.startCol ) + int( param.metadata.endCol ) + int( param.metadata.chromCol ) if param.metadata.strandCol is not None: - strandCol = int ( param.metadata.strandCol ) - else: - strandCol = 0 + int( param.metadata.strandCol ) except: error_msg = "The attributes of this dataset are not properly set. " + \ - "Click the pencil icon in the history item to set the chrom, start, end and strand columns." + "Click the pencil icon in the history item to set the chrom, start, end and strand columns." error_map[name] = error_msg data_param_names.add( name ) if len( dbkeys ) > 1: @@ -55,38 +45,33 @@ for name in data_param_names: error_map[name] = "A dataset of the appropriate type is required" + # Commented out by INS, 5/30/2007. What is the PURPOSE of this? def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): """Verify the output data after each run""" - items = out_data.items() - - for name, data in items: + for data in out_data.values(): try: if stderr and len( stderr ) > 0: raise Exception( stderr ) - except Exception, exc: + except Exception: data.blurb = JOB_ERROR data.state = JOB_ERROR -## def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): -## pass - def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): exec_after_process( app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) # strip strand column if clusters were merged - items = out_data.items() - for name, data in items: - if param_dict['returntype'] == True: + for data in out_data.values(): + if param_dict['returntype'] is True: data.metadata.chromCol = 1 data.metadata.startCol = 2 data.metadata.endCol = 3 # merge always clobbers strand data.metadata.strandCol = None - + def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): exec_after_process( @@ -94,6 +79,5 @@ # strip strand column if clusters were merged if param_dict["returntype"] == '1': - items = out_data.items() - for name, data in items: + for data in out_data.values(): data.metadata.strandCol = None |
b |
diff -r 77641d5731c8 -r 5f72be09cfd3 tool_dependencies.xml --- a/tool_dependencies.xml Mon Apr 14 09:20:28 2014 -0400 +++ b/tool_dependencies.xml Wed Nov 11 12:48:44 2015 -0500 |
b |
@@ -1,9 +1,9 @@ <?xml version="1.0"?> <tool_dependency> <package name="bx-python" version="0.7.1"> - <repository changeset_revision="41eb9d9f667d" name="package_bx_python_0_7" owner="devteam" prior_installation_required="False" toolshed="http://toolshed.g2.bx.psu.edu" /> + <repository changeset_revision="2d0c08728bca" name="package_bx_python_0_7" owner="devteam" toolshed="https://toolshed.g2.bx.psu.edu" /> </package> <package name="galaxy-ops" version="1.0.0"> - <repository changeset_revision="4e39032e4ec6" name="package_galaxy_ops_1_0_0" owner="devteam" prior_installation_required="False" toolshed="http://toolshed.g2.bx.psu.edu" /> + <repository changeset_revision="9cbb20b85c01" name="package_galaxy_ops_1_0_0" owner="devteam" toolshed="https://toolshed.g2.bx.psu.edu" /> </package> </tool_dependency> |
b |
diff -r 77641d5731c8 -r 5f72be09cfd3 utils/gff_util.py --- a/utils/gff_util.py Mon Apr 14 09:20:28 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 |
[ |
b'@@ -1,430 +0,0 @@\n-"""\n-Provides utilities for working with GFF files.\n-"""\n-\n-import copy\n-from bx.intervals.io import *\n-from bx.tabular.io import Header, Comment\n-from utils.odict import odict\n-\n-class GFFInterval( GenomicInterval ):\n- """\n- A GFF interval, including attributes. If file is strictly a GFF file,\n- only attribute is \'group.\'\n- """\n- def __init__( self, reader, fields, chrom_col=0, feature_col=2, start_col=3, end_col=4, \\\n- strand_col=6, score_col=5, default_strand=\'.\', fix_strand=False ):\n- # HACK: GFF format allows \'.\' for strand but GenomicInterval does not. To get around this,\n- # temporarily set strand and then unset after initing GenomicInterval.\n- unknown_strand = False\n- if not fix_strand and fields[ strand_col ] == \'.\':\n- unknown_strand = True\n- fields[ strand_col ] = \'+\'\n- GenomicInterval.__init__( self, reader, fields, chrom_col, start_col, end_col, strand_col, \\\n- default_strand, fix_strand=fix_strand )\n- if unknown_strand:\n- self.strand = \'.\'\n- self.fields[ strand_col ] = \'.\'\n-\n- # Handle feature, score column.\n- self.feature_col = feature_col\n- if self.feature_col >= self.nfields:\n- raise MissingFieldError( "No field for feature_col (%d)" % feature_col )\n- self.feature = self.fields[ self.feature_col ]\n- self.score_col = score_col\n- if self.score_col >= self.nfields:\n- raise MissingFieldError( "No field for score_col (%d)" % score_col )\n- self.score = self.fields[ self.score_col ]\n-\n- # GFF attributes.\n- self.attributes = parse_gff_attributes( fields[8] )\n-\n- def copy( self ):\n- return GFFInterval(self.reader, list( self.fields ), self.chrom_col, self.feature_col, self.start_col,\n- self.end_col, self.strand_col, self.score_col, self.strand)\n-\n-class GFFFeature( GFFInterval ):\n- """\n- A GFF feature, which can include multiple intervals.\n- """\n- def __init__( self, reader, chrom_col=0, feature_col=2, start_col=3, end_col=4, \\\n- strand_col=6, score_col=5, default_strand=\'.\', fix_strand=False, intervals=[], \\\n- raw_size=0 ):\n- # Use copy so that first interval and feature do not share fields.\n- GFFInterval.__init__( self, reader, copy.deepcopy( intervals[0].fields ), chrom_col, feature_col, \\\n- start_col, end_col, strand_col, score_col, default_strand, \\\n- fix_strand=fix_strand )\n- self.intervals = intervals\n- self.raw_size = raw_size\n- # Use intervals to set feature attributes.\n- for interval in self.intervals:\n- # Error checking. NOTE: intervals need not share the same strand.\n- if interval.chrom != self.chrom:\n- raise ValueError( "interval chrom does not match self chrom: %s != %s" % \\\n- ( interval.chrom, self.chrom ) )\n- # Set start, end of interval.\n- if interval.start < self.start:\n- self.start = interval.start\n- if interval.end > self.end:\n- self.end = interval.end\n-\n- def name( self ):\n- """ Returns feature\'s name. """\n- name = None\n- # Preference for name: GTF, GFF3, GFF.\n- for attr_name in [\n- # GTF:\n- \'gene_id\', \'transcript_id\',\n- # GFF3:\n- \'ID\', \'id\',\n- # GFF (TODO):\n- \'group\' ]:\n- name = self.attributes.get( attr_name, None )\n- if name is not None:\n- break\n- return name\n-\n- def copy( self ):\n- intervals_copy = []\n- for interval in self.intervals:\n- intervals_copy.append( interval.copy() )\n- '..b' if pair == \'\':\n- continue\n- name = pair[0].strip()\n- if name == \'\':\n- continue\n- # Need to strip double quote from values\n- value = pair[1].strip(" \\"")\n- attributes[ name ] = value\n-\n- if len( attributes ) == 0:\n- # Could not split attributes string, so entire string must be\n- # \'group\' attribute. This is the case for strictly GFF files.\n- attributes[\'group\'] = attr_str\n- return attributes\n-\n-def gff_attributes_to_str( attrs, gff_format ):\n- """\n- Convert GFF attributes to string. Supported formats are GFF3, GTF.\n- """\n- if gff_format == \'GTF\':\n- format_string = \'%s "%s"\'\n- # Convert group (GFF) and ID, parent (GFF3) attributes to transcript_id, gene_id\n- id_attr = None\n- if \'group\' in attrs:\n- id_attr = \'group\'\n- elif \'ID\' in attrs:\n- id_attr = \'ID\'\n- elif \'Parent\' in attrs:\n- id_attr = \'Parent\'\n- if id_attr:\n- attrs[\'transcript_id\'] = attrs[\'gene_id\'] = attrs[id_attr]\n- elif gff_format == \'GFF3\':\n- format_string = \'%s=%s\'\n- attrs_strs = []\n- for name, value in attrs.items():\n- attrs_strs.append( format_string % ( name, value ) )\n- return " ; ".join( attrs_strs )\n-\n-def read_unordered_gtf( iterator, strict=False ):\n- """\n- Returns GTF features found in an iterator. GTF lines need not be ordered\n- or clustered for reader to work. Reader returns GFFFeature objects sorted\n- by transcript_id, chrom, and start position.\n- """\n-\n- # -- Get function that generates line/feature key. --\n-\n- get_transcript_id = lambda fields: parse_gff_attributes( fields[8] )[ \'transcript_id\' ]\n- if strict:\n- # Strict GTF parsing uses transcript_id only to group lines into feature.\n- key_fn = get_transcript_id\n- else:\n- # Use lenient parsing where chromosome + transcript_id is the key. This allows\n- # transcripts with same ID on different chromosomes; this occurs in some popular\n- # datasources, such as RefGenes in UCSC.\n- key_fn = lambda fields: fields[0] + \'_\' + get_transcript_id( fields )\n-\n-\n- # Aggregate intervals by transcript_id and collect comments.\n- feature_intervals = odict()\n- comments = []\n- for count, line in enumerate( iterator ):\n- if line.startswith( \'#\' ):\n- comments.append( Comment( line ) )\n- continue\n-\n- line_key = key_fn( line.split(\'\\t\') )\n- if line_key in feature_intervals:\n- feature = feature_intervals[ line_key ]\n- else:\n- feature = []\n- feature_intervals[ line_key ] = feature\n- feature.append( GFFInterval( None, line.split( \'\\t\' ) ) )\n-\n- # Create features.\n- chroms_features = {}\n- for count, intervals in enumerate( feature_intervals.values() ):\n- # Sort intervals by start position.\n- intervals.sort( lambda a,b: cmp( a.start, b.start ) )\n- feature = GFFFeature( None, intervals=intervals )\n- if feature.chrom not in chroms_features:\n- chroms_features[ feature.chrom ] = []\n- chroms_features[ feature.chrom ].append( feature )\n-\n- # Sort features by chrom, start position.\n- chroms_features_sorted = []\n- for chrom_features in chroms_features.values():\n- chroms_features_sorted.append( chrom_features )\n- chroms_features_sorted.sort( lambda a,b: cmp( a[0].chrom, b[0].chrom ) )\n- for features in chroms_features_sorted:\n- features.sort( lambda a,b: cmp( a.start, b.start ) )\n-\n- # Yield comments first, then features.\n- # FIXME: comments can appear anywhere in file, not just the beginning.\n- # Ideally, then comments would be associated with features and output\n- # just before feature/line.\n- for comment in comments:\n- yield comment\n-\n- for chrom_features in chroms_features_sorted:\n- for feature in chrom_features:\n- yield feature\n-\n' |
b |
diff -r 77641d5731c8 -r 5f72be09cfd3 utils/odict.py --- a/utils/odict.py Mon Apr 14 09:20:28 2014 -0400 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 |
[ |
@@ -1,85 +0,0 @@ -""" -Ordered dictionary implementation. -""" - -from UserDict import UserDict - -class odict(UserDict): - """ - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747 - - This dictionary class extends UserDict to record the order in which items are - added. Calling keys(), values(), items(), etc. will return results in this - order. - """ - def __init__( self, dict = None ): - self._keys = [] - UserDict.__init__( self, dict ) - - def __delitem__( self, key ): - UserDict.__delitem__( self, key ) - self._keys.remove( key ) - - def __setitem__( self, key, item ): - UserDict.__setitem__( self, key, item ) - if key not in self._keys: - self._keys.append( key ) - - def clear( self ): - UserDict.clear( self ) - self._keys = [] - - def copy(self): - new = odict() - new.update( self ) - return new - - def items( self ): - return zip( self._keys, self.values() ) - - def keys( self ): - return self._keys[:] - - def popitem( self ): - try: - key = self._keys[-1] - except IndexError: - raise KeyError( 'dictionary is empty' ) - val = self[ key ] - del self[ key ] - return ( key, val ) - - def setdefault( self, key, failobj=None ): - if key not in self._keys: - self._keys.append( key ) - return UserDict.setdefault( self, key, failobj ) - - def update( self, dict ): - for ( key, val ) in dict.items(): - self.__setitem__( key, val ) - - def values( self ): - return map( self.get, self._keys ) - - def iterkeys( self ): - return iter( self._keys ) - - def itervalues( self ): - for key in self._keys: - yield self.get( key ) - - def iteritems( self ): - for key in self._keys: - yield key, self.get( key ) - - def __iter__( self ): - for key in self._keys: - yield key - - def reverse( self ): - self._keys.reverse() - - def insert( self, index, key, item ): - if key not in self._keys: - self._keys.insert( index, key ) - UserDict.__setitem__( self, key, item ) |