Mercurial > repos > fabio > gdcwebapp
comparison json_data_source_mod.py @ 6:7815152f70c6 draft
Uploaded 20170525
author | fabio |
---|---|
date | Thu, 25 May 2017 15:37:16 -0400 |
parents | |
children | c0be9583df97 |
comparison
equal
deleted
inserted
replaced
5:23de6169957a | 6:7815152f70c6 |
---|---|
1 #!/usr/bin/env python | |
2 import json | |
3 import optparse | |
4 import urllib | |
5 import os.path | |
6 import os | |
7 from operator import itemgetter | |
8 import tarfile | |
9 | |
10 __version__ = "1.0.0" | |
11 CHUNK_SIZE = 2**20 #1mb | |
12 VALID_CHARS = '.-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ ' | |
13 | |
14 | |
15 def splitext(path): | |
16 for ext in ['.tar.gz', '.tar.bz2']: | |
17 if path.endswith(ext): | |
18 path, ext = path[:-len(ext)], path[-len(ext):] | |
19 break | |
20 else: | |
21 path, ext = os.path.splitext(path) | |
22 return path, ext[1:] | |
23 | |
24 | |
25 def chunk_write( source_stream, target_stream, source_method = "read", target_method="write" ): | |
26 source_method = getattr( source_stream, source_method ) | |
27 target_method = getattr( target_stream, target_method ) | |
28 while True: | |
29 chunk = source_method( CHUNK_SIZE ) | |
30 if chunk: | |
31 target_method( chunk ) | |
32 else: | |
33 break | |
34 | |
35 | |
36 def deconstruct_multi_filename( multi_filename ): | |
37 keys = [ 'primary', 'id', 'name', 'visible', 'file_type' ] | |
38 return ( dict( zip( keys, multi_filename.split('_') ) ) ) | |
39 | |
40 | |
41 def construct_multi_filename( id, name, file_type ): | |
42 """ Implementation of *Number of Output datasets cannot be determined until tool run* from documentation_. | |
43 .. _documentation: http://wiki.galaxyproject.org/Admin/Tools/Multiple%20Output%20Files | |
44 """ | |
45 filename = "%s_%s_%s_%s_%s" % ( 'primary', id, name, 'visible', file_type ) | |
46 return filename | |
47 | |
48 | |
49 def download_from_query( query_data, target_output_filename ): | |
50 """ Download file from the json data and write it to target_output_filename. | |
51 """ | |
52 query_url = query_data.get( 'url' ) | |
53 query_file_type = query_data.get( 'extension' ) | |
54 query_stream = urllib.urlopen( query_url ) | |
55 output_stream = open( target_output_filename, 'wb' ) | |
56 chunk_write( query_stream, output_stream ) | |
57 query_stream.close() | |
58 output_stream.close() | |
59 | |
60 def store_file_from_archive( file_object, target_output_filename ): | |
61 """ Store file after extracting from archive and organize them as a collection using the structure | |
62 (collection-name)_(file-name).ext as file name | |
63 """ | |
64 output_stream = open( target_output_filename, 'wb' ) | |
65 chunk_write( file_object.read(), output_stream ) | |
66 output_stream.close() | |
67 | |
68 | |
69 def download_extra_data( query_ext_data, base_path ): | |
70 """ Download any extra data defined in the JSON. | |
71 NOTE: the "path" value is a relative path to the file on our | |
72 file system. This is slightly dangerous and we should make every effort | |
73 to avoid a malicious absolute path to write the file elsewhere on the | |
74 filesystem. | |
75 """ | |
76 for ext_data in query_ext_data: | |
77 if not os.path.exists( base_path ): | |
78 os.mkdir( base_path ) | |
79 query_stream = urllib.urlopen( ext_data.get( 'url' ) ) | |
80 ext_path = ext_data.get( 'path' ) | |
81 os.makedirs( os.path.normpath( '/'.join( [ base_path, os.path.dirname( ext_path ) ] ) ) ) | |
82 output_stream = open( os.path.normpath( '/'.join( [ base_path, ext_path ] ) ), 'wb' ) | |
83 chunk_write( query_stream, output_stream ) | |
84 query_stream.close() | |
85 output_stream.close() | |
86 | |
87 | |
88 def metadata_to_json_for_archive_entry( dataset_id, extension, metaname, filename, ds_type='dataset', primary=False ): | |
89 """ Return line separated JSON """ | |
90 meta_dict = dict( type = ds_type, | |
91 ext = extension, | |
92 filename = filename, | |
93 name = metaname, | |
94 metadata = {} ) | |
95 if primary: | |
96 meta_dict[ 'base_dataset_id' ] = dataset_id | |
97 else: | |
98 meta_dict[ 'dataset_id' ] = dataset_id | |
99 return "%s\n" % json.dumps( meta_dict ) | |
100 | |
101 | |
102 def metadata_to_json( dataset_id, metadata, filename, ds_type='dataset', primary=False): | |
103 """ Return line separated JSON """ | |
104 meta_dict = dict( type = ds_type, | |
105 ext = metadata.get( 'extension' ), | |
106 filename = filename, | |
107 name = metadata.get( 'name' ), | |
108 metadata = metadata.get( 'metadata', {} ) ) | |
109 if metadata.get( 'extra_data', None ): | |
110 meta_dict[ 'extra_files' ] = '_'.join( [ filename, 'files' ] ) | |
111 if primary: | |
112 meta_dict[ 'base_dataset_id' ] = dataset_id | |
113 else: | |
114 meta_dict[ 'dataset_id' ] = dataset_id | |
115 return "%s\n" % json.dumps( meta_dict ) | |
116 | |
117 | |
118 def download_files_and_write_metadata(query_item, json_params, output_base_path, metadata_parameter_file, primary, appdata_path): | |
119 """ Main work function that operates on the JSON representation of | |
120 one dataset and its metadata. Returns True. | |
121 """ | |
122 dataset_url, output_filename, \ | |
123 extra_files_path, file_name, \ | |
124 ext, out_data_name, \ | |
125 hda_id, dataset_id = set_up_config_values(json_params) | |
126 extension = query_item.get( 'extension' ) | |
127 filename = query_item.get( 'url' ) | |
128 extra_data = query_item.get( 'extra_data', None ) | |
129 if primary: | |
130 filename = ''.join( c in VALID_CHARS and c or '-' for c in filename ) | |
131 name = construct_multi_filename( hda_id, filename, extension ) | |
132 target_output_filename = os.path.normpath( '/'.join( [ output_base_path, name ] ) ) | |
133 metadata_parameter_file.write( metadata_to_json( dataset_id, query_item, | |
134 target_output_filename, | |
135 ds_type='new_primary_dataset', | |
136 primary=primary) ) | |
137 else: | |
138 target_output_filename = output_filename | |
139 metadata_parameter_file.write( metadata_to_json( dataset_id, query_item, | |
140 target_output_filename, | |
141 ds_type='dataset', | |
142 primary=primary) ) | |
143 download_from_query( query_item, target_output_filename ) | |
144 if extra_data: | |
145 extra_files_path = ''.join( [ target_output_filename, 'files' ] ) | |
146 download_extra_data( extra_data, extra_files_path ) | |
147 | |
148 check_ext = "" | |
149 if ( fname.endswith( "gz" ) ): | |
150 check_ext = "r:gz" | |
151 elif ( fname.endswith( "bz2" ) ): | |
152 check_ext = "r:bz2" | |
153 elif ( fname.endswith( "tar" ) ): | |
154 check_ext = "r:" | |
155 if ( bool( check_ext and check_ext.strip() ) ): | |
156 with tarfile.open( target_output_filename, check_ext ) as tf: | |
157 for entry in tf: | |
158 fileobj = tf.extractfile( entry ) | |
159 if entry.isfile(): | |
160 """ | |
161 dataset_url, output_filename, \ | |
162 extra_files_path, file_name, \ | |
163 ext, out_data_name, \ | |
164 hda_id, dataset_id = set_up_config_values(json_params) | |
165 """ | |
166 filename = os.path.basename( entry.name ) | |
167 extension = splitext( filename ) | |
168 extra_data = None | |
169 #target_output_filename = output_filename | |
170 """ (?P<archive_name>.*)_(?P<file_name>.*)\..* """ | |
171 filename_with_collection_prefix = query_item.get( 'name' ) + "_" + filename | |
172 target_output_filename = os.path.join(appdata_path, filename_with_collection_prefix) | |
173 """ | |
174 metadata_parameter_file.write( metadata_to_json_for_archive_entry( dataset_id, extension, | |
175 filename, target_output_filename, | |
176 ds_type='dataset', | |
177 primary=primary) ) | |
178 """ | |
179 store_file_from_archive( fileobj, target_output_filename ) | |
180 | |
181 return True | |
182 | |
183 | |
184 def set_up_config_values(): | |
185 extra_files_path, file_name, ext, out_data_name, hda_id, dataset_id = \ | |
186 itemgetter('extra_files_path', 'file_name', 'ext', 'out_data_name', 'hda_id', 'dataset_id')(output_data[0]) | |
187 | |
188 def set_up_config_values(json_params): | |
189 """ Parse json_params file and return a tuple of necessary configuration | |
190 values. | |
191 """ | |
192 datasource_params = json_params.get( 'param_dict' ) | |
193 dataset_url = datasource_params.get( 'URL' ) | |
194 output_filename = datasource_params.get( 'output1', None ) | |
195 output_data = json_params.get( 'output_data' ) | |
196 extra_files_path, file_name, ext, out_data_name, hda_id, dataset_id = \ | |
197 itemgetter('extra_files_path', 'file_name', 'ext', 'out_data_name', 'hda_id', 'dataset_id')(output_data[0]) | |
198 return (dataset_url, output_filename, | |
199 extra_files_path, file_name, | |
200 ext, out_data_name, | |
201 hda_id, dataset_id) | |
202 | |
203 | |
204 def download_from_json_data( options, args ): | |
205 """ Parse the returned JSON data and download files. Write metadata | |
206 to flat JSON file. | |
207 """ | |
208 output_base_path = options.path | |
209 appdata_path = options.appdata | |
210 if not os.path.exists(appdata_path): | |
211 os.makedirs(appdata_path) | |
212 | |
213 # read tool job configuration file and parse parameters we need | |
214 json_params = json.loads( open( options.json_param_file, 'r' ).read() ) | |
215 dataset_url, output_filename, \ | |
216 extra_files_path, file_name, \ | |
217 ext, out_data_name, \ | |
218 hda_id, dataset_id = set_up_config_values(json_params) | |
219 # line separated JSON file to contain all dataset metadata | |
220 metadata_parameter_file = open( json_params['job_config']['TOOL_PROVIDED_JOB_METADATA_FILE'], 'wb' ) | |
221 | |
222 # get JSON response from data source | |
223 # TODO: make sure response is not enormous | |
224 query_params = json.loads(urllib.urlopen( dataset_url ).read()) | |
225 # download and write files | |
226 primary = False | |
227 # query_item, hda_id, output_base_path, dataset_id | |
228 for query_item in query_params: | |
229 if isinstance( query_item, list ): | |
230 # TODO: do something with the nested list as a collection | |
231 for query_subitem in query_item: | |
232 primary = download_files_and_write_metadata(query_subitem, json_params, output_base_path, | |
233 metadata_parameter_file, primary, appdata_path) | |
234 | |
235 elif isinstance( query_item, dict ): | |
236 primary = download_files_and_write_metadata(query_item, json_params, output_base_path, | |
237 metadata_parameter_file, primary, appdata_path) | |
238 metadata_parameter_file.close() | |
239 | |
240 def __main__(): | |
241 """ Read the JSON return from a data source. Parse each line and request | |
242 the data, download to "newfilepath", and write metadata. | |
243 | |
244 Schema | |
245 ------ | |
246 | |
247 [ {"url":"http://url_of_file", | |
248 "name":"encode WigData", | |
249 "extension":"wig", | |
250 "metadata":{"db_key":"hg19"}, | |
251 "extra_data":[ {"url":"http://url_of_ext_file", | |
252 "path":"rel/path/to/ext_file"} | |
253 ] | |
254 } | |
255 ] | |
256 | |
257 """ | |
258 # Parse the command line options | |
259 usage = "Usage: json_data_source_mod.py max_size --json_param_file filename [options]" | |
260 parser = optparse.OptionParser(usage = usage) | |
261 parser.add_option("-j", "--json_param_file", type="string", | |
262 action="store", dest="json_param_file", help="json schema return data") | |
263 parser.add_option("-p", "--path", type="string", | |
264 action="store", dest="path", help="new file path") | |
265 parser.add_option("-a", "--appdata", type="string", | |
266 action="store", dest="appdata", help="appdata folder name") | |
267 parser.add_option("-v", "--version", action="store_true", dest="version", | |
268 default=False, help="display version and exit") | |
269 | |
270 (options, args) = parser.parse_args() | |
271 if options.version: | |
272 print __version__ | |
273 else: | |
274 download_from_json_data( options, args ) | |
275 | |
276 | |
277 if __name__ == "__main__": __main__() |