# HG changeset patch
# User in_silico
# Date 1500490730 14400
# Node ID cdd97b06c80244dc61d41a8ef21d5af6c097f58f
# Parent 9c6b7291c4e6c78293084d01e4afd1c92d178016
Uploaded
diff -r 9c6b7291c4e6 -r cdd97b06c802 cravat_score_and_annotate-9c6b7291c4e6/cravat_submit.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cravat_score_and_annotate-9c6b7291c4e6/cravat_submit.py Wed Jul 19 14:58:50 2017 -0400
@@ -0,0 +1,134 @@
+import requests
+import json
+import time
+import urllib
+import sys
+import csv
+
+input_filename = sys.argv[1]
+input_select_bar = sys.argv[2]
+output_filename = sys.argv[3]
+
+#in_file = open('input_call.txt', "r")
+#out_file = open('output_call.txt', "w")
+
+write_header = True
+
+#plugs in params to given URL
+submit = requests.post('http://staging.cravat.us/CRAVAT/rest/service/submit', files={'inputfile':open(input_filename)}, data={'email':'znylund@insilico.us.com', 'analyses': input_select_bar})
+#,'analysis':input_select_bar,'functionalannotation': "on"})
+#Makes the data a json dictionary, takes out only the job ID
+jobid = json.loads(submit.text)['jobid']
+#out_file.write(jobid)
+submitted = json.loads(submit.text)['status']
+#out_file.write('\t' + submitted)
+
+#loops until we find a status equal to Success, then breaks
+while True:
+ check = requests.get('http://staging.cravat.us/CRAVAT/rest/service/status', params={'jobid': jobid})
+ status = json.loads(check.text)['status']
+ resultfileurl = json.loads(check.text)['resultfileurl']
+ #out_file.write(str(status) + ', ')
+ if status == 'Success':
+ #out_file.write('\t' + resultfileurl)
+ break
+ else:
+ time.sleep(2)
+
+#out_file.write('\n')
+
+#creates three files
+file_1 = time.strftime("%H:%M") + '_Z_Variant_Result.tsv'
+file_2 = time.strftime("%H:%M") + '_Z_Additional_Details.tsv'
+file_3 = time.strftime("%H:%M") + 'Combined_Variant_Results.tsv'
+
+
+#Download the two results
+urllib.urlretrieve("http://staging.cravat.us/CRAVAT/results/" + jobid + "/" + "Variant.Result.tsv", file_1)
+urllib.urlretrieve("http://staging.cravat.us/CRAVAT/results/" + jobid + "/" + "Variant_Additional_Details.Result.tsv", file_2)
+
+headers = []
+duplicates = []
+
+#opens the Variant Result file and the Variant Additional Details file as csv readers, then opens the output file (galaxy) as a writer
+with open(file_1) as tsvin_1, open(file_2) as tsvin_2, open(output_filename, 'wb') as tsvout:
+ tsvreader_1 = csv.reader(tsvin_1, delimiter='\t')
+ tsvreader_2 = csv.reader(tsvin_2, delimiter='\t')
+ tsvout = csv.writer(tsvout, delimiter='\t')
+
+#loops through each row in the Variant Additional Details file
+ for row in tsvreader_2:
+ #sets row_2 equal to the same row in Variant Result file
+ row_2 = tsvreader_1.next()
+ #checks if row is empty or if the first term contains '#'
+ if row == [] or row[0][0] == '#':
+ continue
+ #checks if the row begins with input line
+ if row[0] == 'Input line':
+ #Goes through each value in the headers list in VAD
+ for value in row:
+ #Adds each value into headers
+ headers.append(value)
+ #Loops through the Keys in VR
+ for value in row_2:
+ #Checks if the value is already in headers
+ if value in headers:
+ continue
+ #else adds the header to headers
+ else:
+ headers.append(value)
+
+ print headers
+ tsvout.writerow(headers)
+
+
+ else:
+
+ cells = []
+ #Goes through each value in the next list
+ for value in row:
+ #adds it to cells
+ cells.append(value)
+ #Goes through each value from the VR file after position 11 (After it is done repeating from VAD file)
+ for value in row_2[11:]:
+ #adds in the rest of the values to cells
+ cells.append(value)
+
+ print cells
+ tsvout.writerow(cells)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#a = 'col1\tcol2\tcol3'
+#header_list = a.split('\t')
+
+#loop through the two results, when you first hit header you print out the headers in tabular form
+#Print out each header only once
+#Combine both headers into one output file
+#loop through the rest of the data and assign each value to its assigned header
+#combine this all into one output file
+
+
+
+
+
diff -r 9c6b7291c4e6 -r cdd97b06c802 cravat_score_and_annotate-9c6b7291c4e6/cravat_submit.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cravat_score_and_annotate-9c6b7291c4e6/cravat_submit.xml Wed Jul 19 14:58:50 2017 -0400
@@ -0,0 +1,34 @@
+
+ Submits, checks for, and retrieves data for cancer annotation
+ cravat_submit.py $input $dropdown $output
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ This tool submits, checks for, and retrieves data for cancer annotation.
+
+
+
diff -r 9c6b7291c4e6 -r cdd97b06c802 cravat_submit.py
--- a/cravat_submit.py Wed Jul 19 14:57:54 2017 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-import requests
-import json
-import time
-import urllib
-import sys
-import csv
-
-input_filename = sys.argv[1]
-input_select_bar = sys.argv[2]
-output_filename = sys.argv[3]
-
-#in_file = open('input_call.txt', "r")
-#out_file = open('output_call.txt', "w")
-
-write_header = True
-
-#plugs in params to given URL
-submit = requests.post('http://staging.cravat.us/CRAVAT/rest/service/submit', files={'inputfile':open(input_filename)}, data={'email':'znylund@insilico.us.com', 'analyses': input_select_bar})
-#,'analysis':input_select_bar,'functionalannotation': "on"})
-#Makes the data a json dictionary, takes out only the job ID
-jobid = json.loads(submit.text)['jobid']
-#out_file.write(jobid)
-submitted = json.loads(submit.text)['status']
-#out_file.write('\t' + submitted)
-
-#loops until we find a status equal to Success, then breaks
-while True:
- check = requests.get('http://staging.cravat.us/CRAVAT/rest/service/status', params={'jobid': jobid})
- status = json.loads(check.text)['status']
- resultfileurl = json.loads(check.text)['resultfileurl']
- #out_file.write(str(status) + ', ')
- if status == 'Success':
- #out_file.write('\t' + resultfileurl)
- break
- else:
- time.sleep(2)
-
-#out_file.write('\n')
-
-#creates three files
-file_1 = time.strftime("%H:%M") + '_Z_Variant_Result.tsv'
-file_2 = time.strftime("%H:%M") + '_Z_Additional_Details.tsv'
-file_3 = time.strftime("%H:%M") + 'Combined_Variant_Results.tsv'
-
-
-#Download the two results
-urllib.urlretrieve("http://staging.cravat.us/CRAVAT/results/" + jobid + "/" + "Variant.Result.tsv", file_1)
-urllib.urlretrieve("http://staging.cravat.us/CRAVAT/results/" + jobid + "/" + "Variant_Additional_Details.Result.tsv", file_2)
-
-headers = []
-duplicates = []
-
-#opens the Variant Result file and the Variant Additional Details file as csv readers, then opens the output file (galaxy) as a writer
-with open(file_1) as tsvin_1, open(file_2) as tsvin_2, open(output_filename, 'wb') as tsvout:
- tsvreader_1 = csv.reader(tsvin_1, delimiter='\t')
- tsvreader_2 = csv.reader(tsvin_2, delimiter='\t')
- tsvout = csv.writer(tsvout, delimiter='\t')
-
-#loops through each row in the Variant Additional Details file
- for row in tsvreader_2:
- #sets row_2 equal to the same row in Variant Result file
- row_2 = tsvreader_1.next()
- #checks if row is empty or if the first term contains '#'
- if row == [] or row[0][0] == '#':
- continue
- #checks if the row begins with input line
- if row[0] == 'Input line':
- #Goes through each value in the headers list in VAD
- for value in row:
- #Adds each value into headers
- headers.append(value)
- #Loops through the Keys in VR
- for value in row_2:
- #Checks if the value is already in headers
- if value in headers:
- continue
- #else adds the header to headers
- else:
- headers.append(value)
-
- print headers
- tsvout.writerow(headers)
-
-
- else:
-
- cells = []
- #Goes through each value in the next list
- for value in row:
- #adds it to cells
- cells.append(value)
- #Goes through each value from the VR file after position 11 (After it is done repeating from VAD file)
- for value in row_2[11:]:
- #adds in the rest of the values to cells
- cells.append(value)
-
- print cells
- tsvout.writerow(cells)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#a = 'col1\tcol2\tcol3'
-#header_list = a.split('\t')
-
-#loop through the two results, when you first hit header you print out the headers in tabular form
-#Print out each header only once
-#Combine both headers into one output file
-#loop through the rest of the data and assign each value to its assigned header
-#combine this all into one output file
-
-
-
-
-
diff -r 9c6b7291c4e6 -r cdd97b06c802 cravat_submit.xml
--- a/cravat_submit.xml Wed Jul 19 14:57:54 2017 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-
- Submits, checks for, and retrieves data for cancer annotation
- cravat_submit.py $input $dropdown $output
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- This tool submits, checks for, and retrieves data for cancer annotation.
-
-
-