Mercurial > repos > bgruening > jbrowse2
comparison gff3_rebase.py @ 0:53c2be00bb6f draft default tip
planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/master/tools/jbrowse2 commit 0a86c88a95b0d1cc49d84544136de6556b95320f
author | bgruening |
---|---|
date | Wed, 05 Jun 2024 08:15:49 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:53c2be00bb6f |
---|---|
1 #!/usr/bin/env python | |
2 import argparse | |
3 import copy | |
4 import logging | |
5 import sys | |
6 | |
7 from BCBio import GFF | |
8 from Bio.SeqFeature import FeatureLocation | |
9 | |
10 logging.basicConfig(level=logging.INFO) | |
11 log = logging.getLogger(__name__) | |
12 | |
13 __author__ = "Eric Rasche" | |
14 __version__ = "0.4.0" | |
15 __maintainer__ = "Eric Rasche" | |
16 __email__ = "esr@tamu.edu" | |
17 | |
18 | |
19 def feature_lambda(feature_list, test, test_kwargs, subfeatures=True): | |
20 """Recursively search through features, testing each with a test function, yielding matches. | |
21 | |
22 GFF3 is a hierachical data structure, so we need to be able to recursively | |
23 search through features. E.g. if you're looking for a feature with | |
24 ID='bob.42', you can't just do a simple list comprehension with a test | |
25 case. You don't know how deeply burried bob.42 will be in the feature tree. This is where feature_lambda steps in. | |
26 | |
27 :type feature_list: list | |
28 :param feature_list: an iterable of features | |
29 | |
30 :type test: function reference | |
31 :param test: a closure with the method signature (feature, **kwargs) where | |
32 the kwargs are those passed in the next argument. This | |
33 function should return True or False, True if the feature is | |
34 to be yielded as part of the main feature_lambda function, or | |
35 False if it is to be ignored. This function CAN mutate the | |
36 features passed to it (think "apply"). | |
37 | |
38 :type test_kwargs: dictionary | |
39 :param test_kwargs: kwargs to pass to your closure when it is called. | |
40 | |
41 :type subfeatures: boolean | |
42 :param subfeatures: when a feature is matched, should just that feature be | |
43 yielded to the caller, or should the entire sub_feature | |
44 tree for that feature be included? subfeatures=True is | |
45 useful in cases such as searching for a gene feature, | |
46 and wanting to know what RBS/Shine_Dalgarno_sequences | |
47 are in the sub_feature tree (which can be accomplished | |
48 with two feature_lambda calls). subfeatures=False is | |
49 useful in cases when you want to process (and possibly | |
50 return) the entire feature tree, such as applying a | |
51 qualifier to every single feature. | |
52 | |
53 :rtype: yielded list | |
54 :return: Yields a list of matching features. | |
55 """ | |
56 # Either the top level set of [features] or the subfeature attribute | |
57 for feature in feature_list: | |
58 if test(feature, **test_kwargs): | |
59 if not subfeatures: | |
60 feature_copy = copy.deepcopy(feature) | |
61 feature_copy.sub_features = [] | |
62 yield feature_copy | |
63 else: | |
64 yield feature | |
65 | |
66 if hasattr(feature, 'sub_features'): | |
67 for x in feature_lambda(feature.sub_features, test, test_kwargs, subfeatures=subfeatures): | |
68 yield x | |
69 | |
70 | |
71 def feature_test_qual_value(feature, **kwargs): | |
72 """Test qualifier values. | |
73 | |
74 For every feature, check that at least one value in | |
75 feature.quailfiers(kwargs['qualifier']) is in kwargs['attribute_list'] | |
76 """ | |
77 for attribute_value in feature.qualifiers.get(kwargs['qualifier'], []): | |
78 if attribute_value in kwargs['attribute_list']: | |
79 return True | |
80 return False | |
81 | |
82 | |
83 def __get_features(child, interpro=False): | |
84 child_features = {} | |
85 for rec in GFF.parse(child): | |
86 # Only top level | |
87 for feature in rec.features: | |
88 # Get the record id as parent_feature_id (since this is how it will be during remapping) | |
89 parent_feature_id = rec.id | |
90 # If it's an interpro specific gff3 file | |
91 if interpro: | |
92 # Then we ignore polypeptide features as they're useless | |
93 if feature.type == 'polypeptide': | |
94 continue | |
95 # If there's an underscore, we strip up to that underscore? | |
96 # I do not know the rationale for this, removing. | |
97 # if '_' in parent_feature_id: | |
98 # parent_feature_id = parent_feature_id[parent_feature_id.index('_') + 1:] | |
99 | |
100 try: | |
101 child_features[parent_feature_id].append(feature) | |
102 except KeyError: | |
103 child_features[parent_feature_id] = [feature] | |
104 # Keep a list of feature objects keyed by parent record id | |
105 return child_features | |
106 | |
107 | |
108 def __update_feature_location(feature, parent, protein2dna): | |
109 start = feature.location.start | |
110 end = feature.location.end | |
111 if protein2dna: | |
112 start *= 3 | |
113 end *= 3 | |
114 | |
115 if parent.location.strand >= 0: | |
116 ns = parent.location.start + start | |
117 ne = parent.location.start + end | |
118 st = +1 | |
119 else: | |
120 ns = parent.location.end - end | |
121 ne = parent.location.end - start | |
122 st = -1 | |
123 | |
124 # Don't let start/stops be less than zero. It's technically valid for them | |
125 # to be (at least in the model I'm working with) but it causes numerous | |
126 # issues. | |
127 # | |
128 # Instead, we'll replace with %3 to try and keep it in the same reading | |
129 # frame that it should be in. | |
130 if ns < 0: | |
131 ns %= 3 | |
132 if ne < 0: | |
133 ne %= 3 | |
134 | |
135 feature.location = FeatureLocation(ns, ne, strand=st) | |
136 | |
137 if hasattr(feature, 'sub_features'): | |
138 for subfeature in feature.sub_features: | |
139 __update_feature_location(subfeature, parent, protein2dna) | |
140 | |
141 | |
142 def rebase(parent, child, interpro=False, protein2dna=False, map_by='ID'): | |
143 # get all of the features we will be re-mapping in a dictionary, keyed by parent feature ID | |
144 child_features = __get_features(child, interpro=interpro) | |
145 | |
146 for rec in GFF.parse(parent): | |
147 replacement_features = [] | |
148 for feature in feature_lambda( | |
149 rec.features, | |
150 # Filter features in the parent genome by those that are | |
151 # "interesting", i.e. have results in child_features array. | |
152 # Probably an unnecessary optimisation. | |
153 feature_test_qual_value, | |
154 { | |
155 'qualifier': map_by, | |
156 'attribute_list': child_features.keys(), | |
157 }, | |
158 subfeatures=False): | |
159 | |
160 # Features which will be re-mapped | |
161 to_remap = child_features[feature.id] | |
162 # TODO: update starts | |
163 fixed_features = [] | |
164 for x in to_remap: | |
165 # Then update the location of the actual feature | |
166 __update_feature_location(x, feature, protein2dna) | |
167 | |
168 if interpro: | |
169 for y in ('status', 'Target'): | |
170 try: | |
171 del x.qualifiers[y] | |
172 except Exception: | |
173 pass | |
174 | |
175 fixed_features.append(x) | |
176 replacement_features.extend(fixed_features) | |
177 # We do this so we don't include the original set of features that we | |
178 # were rebasing against in our result. | |
179 rec.features = replacement_features | |
180 rec.annotations = {} | |
181 GFF.write([rec], sys.stdout) | |
182 | |
183 | |
184 if __name__ == '__main__': | |
185 parser = argparse.ArgumentParser(description='rebase gff3 features against parent locations', epilog="") | |
186 parser.add_argument('parent', type=argparse.FileType('r'), help='Parent GFF3 annotations') | |
187 parser.add_argument('child', type=argparse.FileType('r'), help='Child GFF3 annotations to rebase against parent') | |
188 parser.add_argument('--interpro', action='store_true', | |
189 help='Interpro specific modifications') | |
190 parser.add_argument('--protein2dna', action='store_true', | |
191 help='Map protein translated results to original DNA data') | |
192 parser.add_argument('--map_by', help='Map by key', default='ID') | |
193 args = parser.parse_args() | |
194 rebase(**vars(args)) |