Mercurial > repos > ieguinoa > data_manager_fetch_gff
comparison data_manager/data_manager_fetch_gff.py @ 0:ac4fde07eaed draft
Uploaded
author | ieguinoa |
---|---|
date | Mon, 09 Jul 2018 11:58:50 -0400 |
parents | |
children | c57bd7f3fb46 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:ac4fde07eaed |
---|---|
1 #!/usr/bin/env python | |
2 #Dan Blankenberg | |
3 | |
4 import sys | |
5 import os | |
6 import tempfile | |
7 import shutil | |
8 import optparse | |
9 from ftplib import FTP | |
10 import tarfile | |
11 import zipfile | |
12 import gzip | |
13 import bz2 | |
14 try: | |
15 # For Python 3.0 and later | |
16 from urllib.request import urlopen | |
17 from io import BytesIO as StringIO | |
18 from io import UnsupportedOperation | |
19 except ImportError: | |
20 # Fall back to Python 2's urllib2 | |
21 from urllib2 import urlopen | |
22 from StringIO import StringIO | |
23 UnsupportedOperation = AttributeError | |
24 from json import loads, dumps | |
25 | |
26 | |
27 CHUNK_SIZE = 2**20 # 1mb | |
28 | |
29 DATA_TABLE_NAME = 'all_gff' | |
30 | |
31 def cleanup_before_exit( tmp_dir ): | |
32 if tmp_dir and os.path.exists( tmp_dir ): | |
33 shutil.rmtree( tmp_dir ) | |
34 | |
35 | |
36 def stop_err(msg): | |
37 sys.stderr.write(msg) | |
38 sys.exit(1) | |
39 | |
40 | |
41 def get_dbkey_dbname_id_name( params, dbkey_description=None ): | |
42 dbkey = params['param_dict']['dbkey_source']['dbkey'] | |
43 #TODO: ensure sequence_id is unique and does not already appear in location file | |
44 sequence_id = params['param_dict']['sequence_id'] | |
45 if not sequence_id: | |
46 sequence_id = dbkey #uuid.uuid4() generate and use an uuid instead? | |
47 | |
48 if params['param_dict']['dbkey_source']['dbkey_source_selector'] == 'new': | |
49 dbkey_name = params['param_dict']['dbkey_source']['dbkey_name'] | |
50 if not dbkey_name: | |
51 dbkey_name = dbkey | |
52 else: | |
53 dbkey_name = None | |
54 | |
55 sequence_name = params['param_dict']['sequence_name'] | |
56 if not sequence_name: | |
57 sequence_name = dbkey_description | |
58 if not sequence_name: | |
59 sequence_name = dbkey | |
60 return dbkey, dbkey_name, sequence_id, sequence_name | |
61 | |
62 | |
63 def _get_files_in_ftp_path( ftp, path ): | |
64 path_contents = [] | |
65 ftp.retrlines( 'MLSD %s' % ( path ), path_contents.append ) | |
66 return [ line.split( ';' )[ -1 ].lstrip() for line in path_contents ] | |
67 | |
68 | |
69 def _get_stream_readers_for_tar( fh, tmp_dir ): | |
70 fasta_tar = tarfile.open( fileobj=fh, mode='r:*' ) | |
71 return [x for x in [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()] if x] | |
72 | |
73 | |
74 def _get_stream_readers_for_zip( fh, tmp_dir ): | |
75 """ | |
76 Unpacks all archived files in a zip file. | |
77 Individual files will be concatenated (in _stream_fasta_to_file) | |
78 """ | |
79 fasta_zip = zipfile.ZipFile( fh, 'r' ) | |
80 rval = [] | |
81 for member in fasta_zip.namelist(): | |
82 fasta_zip.extract( member, tmp_dir ) | |
83 rval.append( open( os.path.join( tmp_dir, member ), 'rb' ) ) | |
84 return rval | |
85 | |
86 | |
87 def _get_stream_readers_for_gzip( fh, tmp_dir ): | |
88 return [ gzip.GzipFile( fileobj=fh, mode='rb') ] | |
89 | |
90 | |
91 def _get_stream_readers_for_bz2( fh, tmp_dir ): | |
92 return [ bz2.BZ2File( fh.name, 'rb') ] | |
93 | |
94 | |
95 def sort_fasta( fasta_filename, sort_method, params ): | |
96 if sort_method is None: | |
97 return | |
98 assert sort_method in SORTING_METHODS, ValueError( "%s is not a valid sorting option." % sort_method ) | |
99 return SORTING_METHODS[ sort_method ]( fasta_filename, params ) | |
100 | |
101 | |
102 def _move_and_index_fasta_for_sorting( fasta_filename ): | |
103 unsorted_filename = tempfile.NamedTemporaryFile().name | |
104 shutil.move( fasta_filename, unsorted_filename ) | |
105 fasta_offsets = {} | |
106 unsorted_fh = open( unsorted_filename ) | |
107 while True: | |
108 offset = unsorted_fh.tell() | |
109 line = unsorted_fh.readline() | |
110 if not line: | |
111 break | |
112 if line.startswith( ">" ): | |
113 line = line.split( None, 1 )[0][1:] | |
114 fasta_offsets[ line ] = offset | |
115 unsorted_fh.close() | |
116 current_order = map( lambda x: x[1], sorted( map( lambda x: ( x[1], x[0] ), fasta_offsets.items() ) ) ) | |
117 return ( unsorted_filename, fasta_offsets, current_order ) | |
118 | |
119 | |
120 def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ): | |
121 unsorted_fh = open( unsorted_fasta_filename ) | |
122 sorted_fh = open( sorted_fasta_filename, 'wb+' ) | |
123 | |
124 for name in sorted_names: | |
125 offset = fasta_offsets[ name ] | |
126 unsorted_fh.seek( offset ) | |
127 sorted_fh.write( unsorted_fh.readline() ) | |
128 while True: | |
129 line = unsorted_fh.readline() | |
130 if not line or line.startswith( ">" ): | |
131 break | |
132 sorted_fh.write( line ) | |
133 unsorted_fh.close() | |
134 sorted_fh.close() | |
135 | |
136 | |
137 def _sort_fasta_as_is( fasta_filename, params ): | |
138 return | |
139 | |
140 def _sort_fasta_lexicographical( fasta_filename, params ): | |
141 ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) | |
142 sorted_names = sorted( fasta_offsets.keys() ) | |
143 if sorted_names == current_order: | |
144 shutil.move( unsorted_filename, fasta_filename ) | |
145 else: | |
146 _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) | |
147 | |
148 | |
149 def _sort_fasta_gatk( fasta_filename, params ): | |
150 #This method was added by reviewer request. | |
151 ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) | |
152 sorted_names = map( str, range( 1, 23 ) ) + [ 'X', 'Y' ] | |
153 #detect if we have chrN, or just N | |
154 has_chr = False | |
155 for chrom in sorted_names: | |
156 if "chr%s" % chrom in current_order: | |
157 has_chr = True | |
158 break | |
159 | |
160 if has_chr: | |
161 sorted_names = map( lambda x: "chr%s" % x, sorted_names) | |
162 sorted_names.insert( 0, "chrM" ) | |
163 else: | |
164 sorted_names.insert( 0, "MT" ) | |
165 sorted_names.extend( map( lambda x: "%s_random" % x, sorted_names ) ) | |
166 | |
167 existing_sorted_names = [] | |
168 for name in sorted_names: | |
169 if name in current_order: | |
170 existing_sorted_names.append( name ) | |
171 for name in current_order: | |
172 #TODO: confirm that non-canonical names do not need to be sorted specially | |
173 if name not in existing_sorted_names: | |
174 existing_sorted_names.append( name ) | |
175 | |
176 if existing_sorted_names == current_order: | |
177 shutil.move( unsorted_filename, fasta_filename ) | |
178 else: | |
179 _write_sorted_fasta( existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) | |
180 | |
181 | |
182 def _sort_fasta_custom( fasta_filename, params ): | |
183 ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) | |
184 sorted_names = [] | |
185 for id_repeat in params['param_dict']['sorting']['sequence_identifiers']: | |
186 sorted_names.append( id_repeat[ 'identifier' ] ) | |
187 handle_not_listed = params['param_dict']['sorting']['handle_not_listed_selector'] | |
188 if handle_not_listed.startswith( 'keep' ): | |
189 add_list = [] | |
190 for name in current_order: | |
191 if name not in sorted_names: | |
192 add_list.append( name ) | |
193 if add_list: | |
194 if handle_not_listed == 'keep_append': | |
195 sorted_names.extend( add_list ) | |
196 else: | |
197 add_list.extend( sorted_names ) | |
198 sorted_names = add_list | |
199 if sorted_names == current_order: | |
200 shutil.move( unsorted_filename, fasta_filename ) | |
201 else: | |
202 _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) | |
203 | |
204 | |
205 def _download_file(start, fh): | |
206 tmp = tempfile.NamedTemporaryFile() | |
207 tmp.write(start) | |
208 tmp.write(fh.read()) | |
209 tmp.flush() | |
210 tmp.seek(0) | |
211 return tmp | |
212 | |
213 | |
214 def get_stream_reader(fh, tmp_dir): | |
215 """ | |
216 Check if file is compressed and return correct stream reader. | |
217 If file has to be downloaded, do it now. | |
218 """ | |
219 magic_dict = { | |
220 b"\x1f\x8b\x08": _get_stream_readers_for_gzip, | |
221 b"\x42\x5a\x68": _get_stream_readers_for_bz2, | |
222 b"\x50\x4b\x03\x04": _get_stream_readers_for_zip, | |
223 } | |
224 start_of_file = fh.read(CHUNK_SIZE) | |
225 try: | |
226 fh.seek(0) | |
227 except UnsupportedOperation: # This is if fh has been created by urlopen | |
228 fh = _download_file(start_of_file, fh) | |
229 for k,v in magic_dict.items(): | |
230 if start_of_file.startswith(k): | |
231 return v(fh, tmp_dir) | |
232 try: # Check if file is tar file | |
233 if tarfile.open(fileobj=StringIO(start_of_file)): | |
234 return _get_stream_readers_for_tar(fh, tmp_dir) | |
235 except tarfile.ReadError: | |
236 pass | |
237 return fh | |
238 | |
239 | |
240 def _get_ucsc_download_address(params, dbkey): | |
241 """ | |
242 Check if we can find the correct file for the supplied dbkey on UCSC's FTP server | |
243 """ | |
244 UCSC_FTP_SERVER = 'hgdownload.cse.ucsc.edu' | |
245 UCSC_DOWNLOAD_PATH = '/goldenPath/%s/bigZips/' | |
246 COMPRESSED_EXTENSIONS = ['.tar.gz', '.tgz', '.tar.bz2', '.zip', '.fa.gz', '.fa.bz2'] | |
247 | |
248 email = params['param_dict']['__user_email__'] | |
249 if not email: | |
250 email = 'anonymous@example.com' | |
251 | |
252 ucsc_dbkey = params['param_dict']['reference_source']['requested_dbkey'] or dbkey | |
253 UCSC_CHROM_FA_FILENAMES = ['%s.chromFa' % ucsc_dbkey, 'chromFa', ucsc_dbkey] | |
254 | |
255 ftp = FTP(UCSC_FTP_SERVER) | |
256 ftp.login('anonymous', email) | |
257 | |
258 ucsc_path = UCSC_DOWNLOAD_PATH % ucsc_dbkey | |
259 path_contents = _get_files_in_ftp_path(ftp, ucsc_path) | |
260 ftp.quit() | |
261 | |
262 for ucsc_chrom_fa_filename in UCSC_CHROM_FA_FILENAMES: | |
263 for ext in COMPRESSED_EXTENSIONS: | |
264 if "%s%s" % (ucsc_chrom_fa_filename, ext) in path_contents: | |
265 ucsc_file_name = "%s%s%s" % (ucsc_path, ucsc_chrom_fa_filename, ext) | |
266 return "ftp://%s%s" % (UCSC_FTP_SERVER, ucsc_file_name) | |
267 | |
268 raise Exception('Unable to determine filename for UCSC Genome for %s: %s' % (ucsc_dbkey, path_contents)) | |
269 | |
270 def add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params): | |
271 for data_table_name, data_table_entry in _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ): | |
272 if data_table_entry: | |
273 _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ) | |
274 | |
275 | |
276 def download_from_ucsc( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
277 url = _get_ucsc_download_address(params, dbkey) | |
278 fasta_readers = get_stream_reader(urlopen(url), tmp_dir) | |
279 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) | |
280 | |
281 | |
282 def download_from_ncbi( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
283 NCBI_DOWNLOAD_URL = 'http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta' #FIXME: taken from dave's genome manager...why some japan site? | |
284 requested_identifier = params['param_dict']['reference_source']['requested_identifier'] | |
285 url = NCBI_DOWNLOAD_URL % requested_identifier | |
286 fasta_readers = get_stream_reader(urlopen(url), tmp_dir) | |
287 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) | |
288 | |
289 | |
290 def download_from_url( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
291 urls = filter( bool, map( lambda x: x.strip(), params['param_dict']['reference_source']['user_url'].split( '\n' ) ) ) | |
292 fasta_readers = [ get_stream_reader(urlopen( url ), tmp_dir) for url in urls ] | |
293 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id,sequence_name, params) | |
294 | |
295 | |
296 def download_from_history( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
297 #TODO: allow multiple FASTA input files | |
298 input_filename = params['param_dict']['reference_source']['input_fasta'] | |
299 if isinstance( input_filename, list ): | |
300 fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ] | |
301 else: | |
302 fasta_readers = get_stream_reader(open(input_filename), tmp_dir) | |
303 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) | |
304 | |
305 | |
306 def copy_from_directory( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
307 input_filename = params['param_dict']['reference_source']['fasta_filename'] | |
308 create_symlink = params['param_dict']['reference_source']['create_symlink'] == 'create_symlink' | |
309 if create_symlink: | |
310 data_table_entries = _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ) | |
311 else: | |
312 if isinstance( input_filename, list ): | |
313 fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ] | |
314 else: | |
315 fasta_readers = get_stream_reader(open(input_filename), tmp_dir) | |
316 data_table_entries = _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ) | |
317 for data_table_name, data_table_entry in data_table_entries: | |
318 if data_table_entry: | |
319 _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ) | |
320 | |
321 | |
322 def _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ): | |
323 data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) | |
324 data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get( DATA_TABLE_NAME, [] ) | |
325 data_manager_dict['data_tables'][data_table_name].append( data_table_entry ) | |
326 return data_manager_dict | |
327 | |
328 | |
329 def _stream_fasta_to_file( fasta_stream, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params, close_stream=True ): | |
330 fasta_base_filename = "%s.gff" % sequence_id | |
331 fasta_filename = os.path.join( target_directory, fasta_base_filename ) | |
332 with open( fasta_filename, 'wb+' ) as fasta_writer: | |
333 | |
334 if isinstance( fasta_stream, list ) and len( fasta_stream ) == 1: | |
335 fasta_stream = fasta_stream[0] | |
336 | |
337 if isinstance( fasta_stream, list ): | |
338 last_char = None | |
339 for fh in fasta_stream: | |
340 if last_char not in [ None, '\n', '\r', b'\n', b'\r' ]: | |
341 fasta_writer.write( b'\n' ) | |
342 while True: | |
343 data = fh.read( CHUNK_SIZE ) | |
344 if data: | |
345 fasta_writer.write( data ) | |
346 last_char = data[-1] | |
347 else: | |
348 break | |
349 if close_stream: | |
350 fh.close() | |
351 else: | |
352 while True: | |
353 data = fasta_stream.read( CHUNK_SIZE ) | |
354 if data: | |
355 fasta_writer.write( data ) | |
356 else: | |
357 break | |
358 if close_stream: | |
359 fasta_stream.close() | |
360 | |
361 #sort_fasta( fasta_filename, params['param_dict']['sorting']['sort_selector'], params ) | |
362 | |
363 dbkey_dict = None | |
364 if dbkey_name: | |
365 #do len calc here | |
366 #len_base_name = "%s.len" % ( dbkey ) | |
367 #compute_fasta_length( fasta_filename, os.path.join( target_directory, len_base_name ), keep_first_word=True ) | |
368 dbkey_dict = dict( value=dbkey, name=dbkey_name, len_path='' ) | |
369 | |
370 return [ ( '__dbkeys__', dbkey_dict ), ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ] | |
371 | |
372 | |
373 def compute_fasta_length( fasta_file, out_file, keep_first_word=False ): | |
374 | |
375 infile = fasta_file | |
376 out = open( out_file, 'w') | |
377 | |
378 fasta_title = '' | |
379 seq_len = 0 | |
380 | |
381 first_entry = True | |
382 | |
383 for line in open( infile ): | |
384 line = line.strip() | |
385 if not line or line.startswith( '#' ): | |
386 continue | |
387 if line[0] == '>': | |
388 if first_entry == False: | |
389 if keep_first_word: | |
390 fasta_title = fasta_title.split()[0] | |
391 out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) ) | |
392 else: | |
393 first_entry = False | |
394 fasta_title = line | |
395 seq_len = 0 | |
396 else: | |
397 seq_len += len(line) | |
398 | |
399 # last fasta-entry | |
400 if keep_first_word: | |
401 fasta_title = fasta_title.split()[0] | |
402 out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) ) | |
403 out.close() | |
404 | |
405 | |
406 def _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ): | |
407 fasta_base_filename = "%s.fa" % sequence_id | |
408 fasta_filename = os.path.join( target_directory, fasta_base_filename ) | |
409 os.symlink( input_filename, fasta_filename ) | |
410 | |
411 dbkey_dict = None | |
412 if dbkey_name: | |
413 #do len calc here | |
414 len_base_name = "%s.len" % ( dbkey ) | |
415 compute_fasta_length( fasta_filename, os.path.join( target_directory, len_base_name ), keep_first_word=True ) | |
416 dbkey_dict = dict( value=dbkey, name=dbkey_name, len_path=len_base_name ) | |
417 | |
418 return [ ( '__dbkeys__', dbkey_dict ), ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ] | |
419 | |
420 | |
421 REFERENCE_SOURCE_TO_DOWNLOAD = dict( ucsc=download_from_ucsc, ncbi=download_from_ncbi, url=download_from_url, history=download_from_history, directory=copy_from_directory ) | |
422 | |
423 SORTING_METHODS = dict( as_is=_sort_fasta_as_is, lexicographical=_sort_fasta_lexicographical, gatk=_sort_fasta_gatk, custom=_sort_fasta_custom ) | |
424 | |
425 | |
426 def main(): | |
427 #Parse Command Line | |
428 parser = optparse.OptionParser() | |
429 parser.add_option( '-d', '--dbkey_description', dest='dbkey_description', action='store', type="string", default=None, help='dbkey_description' ) | |
430 parser.add_option( '-t', '--type', dest='file_type', action='store', type='string', default=None, help='file_type') | |
431 (options, args) = parser.parse_args() | |
432 | |
433 filename = args[0] | |
434 global DATA_TABLE_NAME | |
435 if options.file_type == 'representative': | |
436 DATA_TABLE_NAME= 'representative_gff' | |
437 params = loads( open( filename ).read() ) | |
438 target_directory = params[ 'output_data' ][0]['extra_files_path'] | |
439 os.mkdir( target_directory ) | |
440 data_manager_dict = {} | |
441 | |
442 dbkey, dbkey_name, sequence_id, sequence_name = get_dbkey_dbname_id_name( params, dbkey_description=options.dbkey_description ) | |
443 | |
444 if dbkey in [ None, '', '?' ]: | |
445 raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) ) | |
446 | |
447 # Create a tmp_dir, in case a zip file needs to be uncompressed | |
448 tmp_dir = tempfile.mkdtemp() | |
449 #Fetch the FASTA | |
450 try: | |
451 REFERENCE_SOURCE_TO_DOWNLOAD[ params['param_dict']['reference_source']['reference_source_selector'] ]( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ) | |
452 finally: | |
453 cleanup_before_exit(tmp_dir) | |
454 #save info to json file | |
455 open( filename, 'wb' ).write( dumps( data_manager_dict ).encode() ) | |
456 | |
457 if __name__ == "__main__": | |
458 main() |