comparison queue_genotype_workflow.py @ 1:d00c4cc7e8c2 draft

Uploaded
author greg
date Mon, 19 Aug 2019 13:25:40 -0400
parents c80fae8c94c1
children 163ecfba5961
comparison
equal deleted inserted replaced
0:c80fae8c94c1 1:d00c4cc7e8c2
6 import sys 6 import sys
7 import threading 7 import threading
8 import time 8 import time
9 9
10 from bioblend import galaxy 10 from bioblend import galaxy
11 from datetime import datetime
11 from six.moves import configparser 12 from six.moves import configparser
12 13
13 parser = argparse.ArgumentParser() 14 parser = argparse.ArgumentParser()
14 parser.add_argument('--affy_metadata', dest='affy_metadata', help='Input Affymetrix 96 well plate metadata file') 15 parser.add_argument('--affy_metadata', dest='affy_metadata', help='Input Affymetrix 96 well plate metadata file')
15 parser.add_argument('--annot', dest='annot', help='Probeset annotation file') 16 parser.add_argument('--annot', dest='annot', help='Probeset annotation file')
43 new_library_dataset_dict = gi.libraries.copy_from_dataset(library_id, dataset_id) 44 new_library_dataset_dict = gi.libraries.copy_from_dataset(library_id, dataset_id)
44 return new_library_dataset_dict 45 return new_library_dataset_dict
45 46
46 47
47 def copy_dataset_to_storage(src_path, dst_base_path, dataset_name, output_fh): 48 def copy_dataset_to_storage(src_path, dst_base_path, dataset_name, output_fh):
48 # Copy a dataset via its file path to a storage directory on disk. 49 # Copy a dataset to a storage directory on disk. Use the date
49 if not os.path.isdir(dst_base_path): 50 # to name the storage directory to enable storing a file per day
50 os.makedirs(dst_base_path) 51 # (multiple runs per day will overwrite the existing file).
51 dst_path = os.path.join(dst_base_path, dataset_name) 52 date_str = datetime.now().strftime("%Y_%m_%d")
53 dst_dir = os.path.join(dst_base_path, date_str)
54 if not os.path.isdir(dst_dir):
55 os.makedirs(dst_dir)
56 dst_path = os.path.join(dst_dir, dataset_name)
52 shutil.copyfile(src_path, dst_path) 57 shutil.copyfile(src_path, dst_path)
53 outputfh.write("Copied %s to storage.\n" % dataset_name) 58 outputfh.write("Copied %s to storage.\n" % dataset_name)
54 59
55 60
56 def delete_history_dataset(gi, history_id, dataset_id, outputfh, purge=False): 61 def delete_history_dataset(gi, history_id, dataset_id, outputfh, purge=False):
424 deleted_dataset_dict = delete_library_dataset(admin_gi, ags_library_id, ags_ldda_id, outputfh) 429 deleted_dataset_dict = delete_library_dataset(admin_gi, ags_library_id, ags_ldda_id, outputfh)
425 # To save disk space, delete the all_genotyped_samples hda 430 # To save disk space, delete the all_genotyped_samples hda
426 # in the current history to enable later purging by an admin. 431 # in the current history to enable later purging by an admin.
427 ags_hda_id = get_history_dataset_id_by_name(gi, args.history_id, "all_genotyped_samples", outputfh) 432 ags_hda_id = get_history_dataset_id_by_name(gi, args.history_id, "all_genotyped_samples", outputfh)
428 delete_history_dataset(gi, args.history_id, ags_hda_id, outputfh) 433 delete_history_dataset(gi, args.history_id, ags_hda_id, outputfh)
434 else:
435 outputfh.write("\nProcessing ended in error...\n")
436 outputfh.close()
437 lock.release()
438 sys.exit(1)
429 else: 439 else:
430 outputfh.write("\nProcessing ended in error...\n") 440 outputfh.write("\nProcessing ended in error...\n")
431 outputfh.close() 441 outputfh.close()
432 lock.release() 442 lock.release()
433 sys.exit(1) 443 sys.exit(1)