Mercurial > repos > fabio > gdcwebapp
comparison json_collect_data_source.py @ 32:1edc869cd008 draft
Uploaded 20170607
| author | fabio |
|---|---|
| date | Wed, 07 Jun 2017 18:01:57 -0400 |
| parents | |
| children | d65de900967e |
comparison
equal
deleted
inserted
replaced
| 31:9cb5e4f12ce5 | 32:1edc869cd008 |
|---|---|
| 1 #!/usr/bin/env python | |
| 2 import json | |
| 3 import optparse | |
| 4 import urllib | |
| 5 import os.path | |
| 6 import os | |
| 7 from operator import itemgetter | |
| 8 import tarfile | |
| 9 | |
| 10 __version__ = "1.0.0" | |
| 11 CHUNK_SIZE = 2**20 #1mb | |
| 12 VALID_CHARS = '.-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ ' | |
| 13 | |
| 14 | |
| 15 def splitext(path): | |
| 16 for ext in ['.tar.gz', '.tar.bz2']: | |
| 17 if path.endswith(ext): | |
| 18 path, ext = path[:-len(ext)], path[-len(ext):] | |
| 19 break | |
| 20 else: | |
| 21 path, ext = os.path.splitext(path) | |
| 22 return path, ext[1:] | |
| 23 | |
| 24 | |
| 25 def chunk_write( source_stream, target_stream, source_method = "read", target_method="write" ): | |
| 26 source_method = getattr( source_stream, source_method ) | |
| 27 target_method = getattr( target_stream, target_method ) | |
| 28 while True: | |
| 29 chunk = source_method( CHUNK_SIZE ) | |
| 30 if chunk: | |
| 31 target_method( chunk ) | |
| 32 else: | |
| 33 break | |
| 34 | |
| 35 | |
| 36 def deconstruct_multi_filename( multi_filename ): | |
| 37 keys = [ 'primary', 'id', 'name', 'visible', 'file_type' ] | |
| 38 return ( dict( zip( keys, multi_filename.split('_') ) ) ) | |
| 39 | |
| 40 | |
| 41 def construct_multi_filename( id, name, file_type ): | |
| 42 """ Implementation of *Number of Output datasets cannot be determined until tool run* from documentation_. | |
| 43 .. _documentation: http://wiki.galaxyproject.org/Admin/Tools/Multiple%20Output%20Files | |
| 44 """ | |
| 45 filename = "%s_%s_%s_%s_%s" % ( 'primary', id, name, 'visible', file_type ) | |
| 46 return filename | |
| 47 | |
| 48 | |
| 49 def download_from_query( query_data, target_output_filename ): | |
| 50 """ Download file from the json data and write it to target_output_filename. | |
| 51 """ | |
| 52 query_url = query_data.get( 'url' ) | |
| 53 query_file_type = query_data.get( 'extension' ) | |
| 54 query_stream = urllib.urlopen( query_url ) | |
| 55 output_stream = open( target_output_filename, 'wb' ) | |
| 56 chunk_write( query_stream, output_stream ) | |
| 57 query_stream.close() | |
| 58 output_stream.close() | |
| 59 | |
| 60 def store_file_from_archive( file_object, target_output_filename, isString=False ): | |
| 61 """ Store file after extracting from archive and organize them as a collection using the structure | |
| 62 (collection-name)_(file-name).ext as file name | |
| 63 """ | |
| 64 output_stream = open( target_output_filename, 'wb' ) | |
| 65 #chunk_write( file_object.read(), output_stream ) | |
| 66 if not isString: | |
| 67 output_stream.write(file_object.read()) | |
| 68 else: | |
| 69 output_stream.write(file_object) | |
| 70 output_stream.close() | |
| 71 | |
| 72 | |
| 73 def download_extra_data( query_ext_data, base_path ): | |
| 74 """ Download any extra data defined in the JSON. | |
| 75 NOTE: the "path" value is a relative path to the file on our | |
| 76 file system. This is slightly dangerous and we should make every effort | |
| 77 to avoid a malicious absolute path to write the file elsewhere on the | |
| 78 filesystem. | |
| 79 """ | |
| 80 for ext_data in query_ext_data: | |
| 81 if not os.path.exists( base_path ): | |
| 82 os.mkdir( base_path ) | |
| 83 query_stream = urllib.urlopen( ext_data.get( 'url' ) ) | |
| 84 ext_path = ext_data.get( 'path' ) | |
| 85 os.makedirs( os.path.normpath( '/'.join( [ base_path, os.path.dirname( ext_path ) ] ) ) ) | |
| 86 output_stream = open( os.path.normpath( '/'.join( [ base_path, ext_path ] ) ), 'wb' ) | |
| 87 chunk_write( query_stream, output_stream ) | |
| 88 query_stream.close() | |
| 89 output_stream.close() | |
| 90 | |
| 91 | |
| 92 def metadata_to_json( dataset_id, metadata, filename, ds_type='dataset', primary=False): | |
| 93 """ Return line separated JSON """ | |
| 94 meta_dict = dict( type = ds_type, | |
| 95 ext = metadata.get( 'extension' ), | |
| 96 filename = filename, | |
| 97 name = metadata.get( 'name' ), | |
| 98 metadata = metadata.get( 'metadata', {} ) ) | |
| 99 if metadata.get( 'extra_data', None ): | |
| 100 meta_dict[ 'extra_files' ] = '_'.join( [ filename, 'files' ] ) | |
| 101 if primary: | |
| 102 meta_dict[ 'base_dataset_id' ] = dataset_id | |
| 103 else: | |
| 104 meta_dict[ 'dataset_id' ] = dataset_id | |
| 105 return "%s\n" % json.dumps( meta_dict ) | |
| 106 | |
| 107 | |
| 108 def walk_on_archive(target_output_filename, check_ext, archive_name, appdata_path, db_key="?"): | |
| 109 archive_name = archive_name.replace("_", "-").replace(".", "-") | |
| 110 with tarfile.open( target_output_filename, check_ext ) as tf: | |
| 111 for entry in tf: | |
| 112 if entry.isfile(): | |
| 113 fileobj = tf.extractfile( entry ) | |
| 114 # reserve the underscore for the collection searator | |
| 115 filename = os.path.basename( entry.name ).replace("_", "-") | |
| 116 extension = splitext( filename )[1] | |
| 117 # pattern: (?P<identifier_0>[^_]+)_(?P<identifier_1>[^_]+)_(?P<ext>[^_]+)_(?P<dbkey>[^_]+) | |
| 118 if (len(extension) > 0): | |
| 119 filename = (filename[0:len(filename)-(len(extension)+1)]).replace(".", "-") + "." + extension + "_" + extension | |
| 120 else: | |
| 121 extension = "auto" | |
| 122 filename_with_collection_prefix = archive_name + "_" + filename + "_" + db_key | |
| 123 target_entry_output_filename = os.path.join(appdata_path, filename_with_collection_prefix) | |
| 124 store_file_from_archive( fileobj, target_entry_output_filename ) | |
| 125 return True | |
| 126 | |
| 127 | |
| 128 def download_files_and_write_metadata(query_item, json_params, output_base_path, metadata_parameter_file, primary, appdata_path, options, args): | |
| 129 """ Main work function that operates on the JSON representation of | |
| 130 one dataset and its metadata. Returns True. | |
| 131 """ | |
| 132 dataset_url, output_filename, \ | |
| 133 extra_files_path, file_name, \ | |
| 134 ext, out_data_name, \ | |
| 135 hda_id, dataset_id = set_up_config_values(json_params) | |
| 136 extension = query_item.get( 'extension' ) | |
| 137 url = query_item.get( 'url' ) | |
| 138 filename = query_item.get( 'name' ) | |
| 139 | |
| 140 check_ext = "" | |
| 141 if ( url.endswith( "gz" ) ): | |
| 142 check_ext = "r:gz" | |
| 143 elif ( url.endswith( "bz2" ) ): | |
| 144 check_ext = "r:bz2" | |
| 145 elif ( url.endswith( "tar" ) ): | |
| 146 check_ext = "r:" | |
| 147 isArchive = bool( check_ext and check_ext.strip() ) | |
| 148 | |
| 149 extra_data = query_item.get( 'extra_data', None ) | |
| 150 if primary: | |
| 151 filename = ''.join( c in VALID_CHARS and c or '-' for c in filename ) | |
| 152 name = construct_multi_filename( hda_id, filename, extension ) | |
| 153 target_output_filename = os.path.normpath( '/'.join( [ output_base_path, name ] ) ) | |
| 154 if isArchive is False: | |
| 155 metadata_parameter_file.write( metadata_to_json( dataset_id, query_item, | |
| 156 target_output_filename, | |
| 157 ds_type='new_primary_dataset', | |
| 158 primary=primary) ) | |
| 159 else: | |
| 160 target_output_filename = output_filename | |
| 161 if isArchive is False: | |
| 162 metadata_parameter_file.write( metadata_to_json( dataset_id, query_item, | |
| 163 target_output_filename, | |
| 164 ds_type='dataset', | |
| 165 primary=primary) ) | |
| 166 | |
| 167 if isArchive is False: | |
| 168 download_from_query( query_item, target_output_filename ) | |
| 169 else: | |
| 170 target_output_path = os.path.join(appdata_path, filename) | |
| 171 download_from_query( query_item, target_output_path ) | |
| 172 if extra_data: | |
| 173 extra_files_path = ''.join( [ target_output_filename, 'files' ] ) | |
| 174 download_extra_data( extra_data, extra_files_path ) | |
| 175 | |
| 176 """ the following code handles archives and decompress them in a collection """ | |
| 177 if ( isArchive ): | |
| 178 db_key = "?" | |
| 179 archive_metadata = query_item.get( 'metadata', None ) | |
| 180 if archive_metadata is not None: | |
| 181 try: | |
| 182 db_key = archive_metadata.get( 'db_key' ) | |
| 183 except: | |
| 184 pass | |
| 185 walk_on_archive(target_output_path, check_ext, filename, appdata_path, db_key) | |
| 186 | |
| 187 return True | |
| 188 | |
| 189 | |
| 190 def set_up_config_values(json_params): | |
| 191 """ Parse json_params file and return a tuple of necessary configuration | |
| 192 values. | |
| 193 """ | |
| 194 datasource_params = json_params.get( 'param_dict' ) | |
| 195 dataset_url = datasource_params.get( 'URL' ) | |
| 196 output_filename = datasource_params.get( 'output1', None ) | |
| 197 output_data = json_params.get( 'output_data' ) | |
| 198 extra_files_path, file_name, ext, out_data_name, hda_id, dataset_id = \ | |
| 199 itemgetter('extra_files_path', 'file_name', 'ext', 'out_data_name', 'hda_id', 'dataset_id')(output_data[0]) | |
| 200 return (dataset_url, output_filename, | |
| 201 extra_files_path, file_name, | |
| 202 ext, out_data_name, | |
| 203 hda_id, dataset_id) | |
| 204 | |
| 205 | |
| 206 def download_from_json_data( options, args ): | |
| 207 """ Parse the returned JSON data and download files. Write metadata | |
| 208 to flat JSON file. | |
| 209 """ | |
| 210 output_base_path = options.path | |
| 211 appdata_path = options.appdata | |
| 212 if not os.path.exists(appdata_path): | |
| 213 os.makedirs(appdata_path) | |
| 214 | |
| 215 # read tool job configuration file and parse parameters we need | |
| 216 json_params = json.loads( open( options.json_param_file, 'r' ).read() ) | |
| 217 print("json_params: "+str(json_params)) | |
| 218 | |
| 219 dataset_url, output_filename, \ | |
| 220 extra_files_path, file_name, \ | |
| 221 ext, out_data_name, \ | |
| 222 hda_id, dataset_id = set_up_config_values(json_params) | |
| 223 # line separated JSON file to contain all dataset metadata | |
| 224 metadata_parameter_file = open( json_params['job_config']['TOOL_PROVIDED_JOB_METADATA_FILE'], 'wb' ) | |
| 225 | |
| 226 # get JSON response from data source | |
| 227 # TODO: make sure response is not enormous | |
| 228 query_params = json.loads(urllib.urlopen( dataset_url ).read()) | |
| 229 # download and write files | |
| 230 primary = False | |
| 231 #primary = True | |
| 232 # query_item, hda_id, output_base_path, dataset_id | |
| 233 for query_item in query_params: | |
| 234 if isinstance( query_item, list ): | |
| 235 # TODO: do something with the nested list as a collection | |
| 236 for query_subitem in query_item: | |
| 237 primary = download_files_and_write_metadata(query_subitem, json_params, output_base_path, | |
| 238 metadata_parameter_file, primary, appdata_path, options, args) | |
| 239 | |
| 240 elif isinstance( query_item, dict ): | |
| 241 primary = download_files_and_write_metadata(query_item, json_params, output_base_path, | |
| 242 metadata_parameter_file, primary, appdata_path, options, args) | |
| 243 metadata_parameter_file.close() | |
| 244 | |
| 245 def __main__(): | |
| 246 """ Read the JSON return from a data source. Parse each line and request | |
| 247 the data, download to "newfilepath", and write metadata. | |
| 248 | |
| 249 Schema | |
| 250 ------ | |
| 251 | |
| 252 [ {"url":"http://url_of_file", | |
| 253 "name":"encode WigData", | |
| 254 "extension":"wig", | |
| 255 "metadata":{"db_key":"hg19"}, | |
| 256 "extra_data":[ {"url":"http://url_of_ext_file", | |
| 257 "path":"rel/path/to/ext_file"} | |
| 258 ] | |
| 259 } | |
| 260 ] | |
| 261 | |
| 262 """ | |
| 263 # Parse the command line options | |
| 264 usage = "Usage: json_data_source_mod.py max_size --json_param_file filename [options]" | |
| 265 parser = optparse.OptionParser(usage = usage) | |
| 266 parser.add_option("-j", "--json_param_file", type="string", | |
| 267 action="store", dest="json_param_file", help="json schema return data") | |
| 268 parser.add_option("-p", "--path", type="string", | |
| 269 action="store", dest="path", help="new file path") | |
| 270 parser.add_option("-a", "--appdata", type="string", | |
| 271 action="store", dest="appdata", help="appdata folder name") | |
| 272 parser.add_option("-v", "--version", action="store_true", dest="version", | |
| 273 default=False, help="display version and exit") | |
| 274 | |
| 275 (options, args) = parser.parse_args() | |
| 276 if options.version: | |
| 277 print __version__ | |
| 278 else: | |
| 279 download_from_json_data( options, args ) | |
| 280 | |
| 281 | |
| 282 if __name__ == "__main__": __main__() |
