comparison vsnp_statistics.py @ 1:14e29f7d59ca draft

Uploaded
author greg
date Wed, 29 Apr 2020 16:56:10 -0400
parents c21d338dbdc4
children 321a8259e3f9
comparison
equal deleted inserted replaced
0:c21d338dbdc4 1:14e29f7d59ca
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 2
3 import argparse 3 import argparse
4 import gzip 4 import gzip
5 import multiprocessing
6 import numpy 5 import numpy
7 import os 6 import os
8 import pandas 7 import pandas
9 import queue 8 import shutil
10 9
11 INPUT_IDXSTATS_DIR = 'input_idxstats' 10 INPUT_IDXSTATS_DIR = 'input_idxstats'
12 INPUT_METRICS_DIR = 'input_metrics' 11 INPUT_METRICS_DIR = 'input_metrics'
13 INPUT_READS_DIR = 'input_reads' 12 INPUT_READS_DIR = 'input_reads'
14 OUTPUT_DIR = 'output'
15 QUALITYKEY = {'!':'0', '"':'1', '#':'2', '$':'3', '%':'4', '&':'5', "'":'6', '(':'7', ')':'8', '*':'9', '+':'10', ',':'11', '-':'12', '.':'13', '/':'14', '0':'15', '1':'16', '2':'17', '3':'18', '4':'19', '5':'20', '6':'21', '7':'22', '8':'23', '9':'24', ':':'25', ';':'26', '<':'27', '=':'28', '>':'29', '?':'30', '@':'31', 'A':'32', 'B':'33', 'C':'34', 'D':'35', 'E':'36', 'F':'37', 'G':'38', 'H':'39', 'I':'40', 'J':'41', 'K':'42', 'L':'43', 'M':'44', 'N':'45', 'O':'46', 'P':'47', 'Q':'48', 'R':'49', 'S':'50', 'T':'51', 'U':'52', 'V':'53', 'W':'54', 'X':'55', 'Y':'56', 'Z':'57', '_':'1', ']':'1', '[':'1', '\\':'1', '\n':'1', '`':'1', 'a':'1', 'b':'1', 'c':'1', 'd':'1', 'e':'1', 'f':'1', 'g':'1', 'h':'1', 'i':'1', 'j':'1', 'k':'1', 'l':'1', 'm':'1', 'n':'1', 'o':'1', 'p':'1', 'q':'1', 'r':'1', 's':'1', 't':'1', 'u':'1', 'v':'1', 'w':'1', 'x':'1', 'y':'1', 'z':'1', ' ':'1'} 13 QUALITYKEY = {'!':'0', '"':'1', '#':'2', '$':'3', '%':'4', '&':'5', "'":'6', '(':'7', ')':'8', '*':'9', '+':'10', ',':'11', '-':'12', '.':'13', '/':'14', '0':'15', '1':'16', '2':'17', '3':'18', '4':'19', '5':'20', '6':'21', '7':'22', '8':'23', '9':'24', ':':'25', ';':'26', '<':'27', '=':'28', '>':'29', '?':'30', '@':'31', 'A':'32', 'B':'33', 'C':'34', 'D':'35', 'E':'36', 'F':'37', 'G':'38', 'H':'39', 'I':'40', 'J':'41', 'K':'42', 'L':'43', 'M':'44', 'N':'45', 'O':'46', 'P':'47', 'Q':'48', 'R':'49', 'S':'50', 'T':'51', 'U':'52', 'V':'53', 'W':'54', 'X':'55', 'Y':'56', 'Z':'57', '_':'1', ']':'1', '[':'1', '\\':'1', '\n':'1', '`':'1', 'a':'1', 'b':'1', 'c':'1', 'd':'1', 'e':'1', 'f':'1', 'g':'1', 'h':'1', 'i':'1', 'j':'1', 'k':'1', 'l':'1', 'm':'1', 'n':'1', 'o':'1', 'p':'1', 'q':'1', 'r':'1', 's':'1', 't':'1', 'u':'1', 'v':'1', 'w':'1', 'x':'1', 'y':'1', 'z':'1', ' ':'1'}
16 READCOLUMNS = ['Sample', 'Reference', 'Fastq File', 'Size', 'Total Reads', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30'] 14
17 SEP = "\t" 15
16 def fastq_to_df(fastq_file, gzipped):
17 if gzipped.lower() == "true":
18 return pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^")
19 else:
20 return pandas.read_csv(open(fastq_file, "r"), header=None, sep="^")
18 21
19 22
20 def get_base_file_name(file_path): 23 def get_base_file_name(file_path):
21 base_file_name = os.path.basename(file_path) 24 base_file_name = os.path.basename(file_path)
22 if base_file_name.find(".") > 0: 25 if base_file_name.find(".") > 0:
50 return "%s%d bytes" % (prefix, size) 53 return "%s%d bytes" % (prefix, size)
51 return "%s%.1f %s" % (prefix, size, word) 54 return "%s%.1f %s" % (prefix, size, word)
52 return '??? bytes' 55 return '??? bytes'
53 56
54 57
55 def output_read_stats(gzipped, fastq_file, ofh, sampling_number=10000, output_sample=False, dbkey=None, collection=False): 58 def output_statistics(reads_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):
56 file_name_base = os.path.basename(fastq_file) 59 # Produce an Excel spreadsheet that
57 # Output a 2-column file where column 1 is 60 # contains a row for each sample.
58 # the labels and column 2 is the values. 61 columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',
59 if output_sample: 62 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',
60 # The Sample and Reference columns should be 63 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']
61 # output only once, so this block handles 64 data_frames = []
62 # paired reads, where the columns are not 65 for i, fastq_file in enumerate(reads_files):
63 # output for Read2. 66 idxstats_file = idxstats_files[i]
64 try: 67 metrics_file = metrics_files[i]
65 # Illumina read file names are something like: 68 file_name_base = os.path.basename(fastq_file)
66 # 13-1941-6_S4_L001_R1_600000_fastq_gz 69 # Read fastq_file into a data frame.
67 sample = file_name_base.split("_")[0] 70 fastq_df = fastq_to_df(fastq_file, gzipped)
68 except Exception: 71 total_reads = int(len(fastq_df.index) / 4)
69 sample = "" 72 current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)
70 # Sample 73 # Reference
71 ofh.write("Sample%s%s\n" % (SEP, sample)) 74 current_sample_df.at[file_name_base, 'Reference'] = dbkey
72 ofh.write("Reference%s%s\n" % (SEP, dbkey)) 75 # File Size
73 if collection: 76 current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))
74 read = "Read" 77 # Mean Read Length
78 sampling_size = 10000
79 if sampling_size > total_reads:
80 sampling_size = total_reads
81 fastq_df = fastq_df.iloc[3::4].sample(sampling_size)
82 dict_mean = {}
83 list_length = []
84 for index, row in fastq_df.iterrows():
85 base_qualities = []
86 for base in list(row.array[0]):
87 base_qualities.append(int(QUALITYKEY[base]))
88 dict_mean[index] = numpy.mean(base_qualities)
89 list_length.append(len(row.array[0]))
90 current_sample_df.at[file_name_base, 'Mean Read Length'] = "%.1f" % numpy.mean(list_length)
91 # Mean Read Quality
92 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])
93 current_sample_df.at[file_name_base, 'Mean Read Quality'] = "%.1f" % df_mean['ave'].mean()
94 # Reads Passing Q30
95 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])
96 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size)
97 current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30
98 # Total Reads
99 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads
100 # All Mapped Reads
101 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)
102 current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads
103 # Unmapped Reads
104 current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads
105 # Unmapped Reads Percentage of Total
106 if unmapped_reads > 0:
107 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads)
75 else: 108 else:
76 read = "Read1" 109 unmapped_reads_percentage = 0
77 else: 110 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage
78 read = "Read2" 111 # Reference with Coverage
79 # Read 112 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)
80 ofh.write("%s File%s%s\n" % (read, SEP, file_name_base)) 113 current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage
81 # File Size 114 # Average Depth of Coverage
82 ofh.write("%s File Size%s%s\n" % (read, SEP, nice_size(os.path.getsize(fastq_file)))) 115 current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage
83 if gzipped.lower() == "true": 116 # Good SNP Count
84 df = pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^") 117 current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count
85 else: 118 data_frames.append(current_sample_df)
86 df = pandas.read_csv(open(fastq_file, "r"), header=None, sep="^") 119 excel_df = pandas.concat(data_frames)
87 total_read_count = int(len(df.index) / 4) 120 excel_file_name = "output.xlsx"
88 # Readx Total Reads 121 writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
89 ofh.write("%s Total Reads%s%s\n" % (read, SEP, total_read_count)) 122 excel_df.to_excel(writer, sheet_name='Sheet1')
90 # Mean Read Length 123 writer.save()
91 sampling_size = int(sampling_number) 124 shutil.move(excel_file_name, output_file)
92 if sampling_size > total_read_count: 125
93 sampling_size = total_read_count 126
94 df = df.iloc[3::4].sample(sampling_size) 127 def process_idxstats_file(idxstats_file):
95 dict_mean = {} 128 all_mapped_reads = 0
96 list_length = [] 129 unmapped_reads = 0
97 for index, row in df.iterrows(): 130 with open(idxstats_file, "r") as fh:
98 base_qualities = [] 131 for i, line in enumerate(fh):
99 for base in list(row.array[0]): 132 items = line.split("\t")
100 base_qualities.append(int(QUALITYKEY[base])) 133 if i == 0:
101 dict_mean[index] = numpy.mean(base_qualities) 134 # NC_002945.4 4349904 213570 4047
102 list_length.append(len(row.array[0])) 135 all_mapped_reads = int(items[2])
103 ofh.write("%s Mean Read Length%s%s\n" % (read, SEP, "%.1f" % numpy.mean(list_length))) 136 elif i == 1:
104 # Mean Read Quality 137 # * 0 0 82774
105 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) 138 unmapped_reads = int(items[3])
106 ofh.write("%s Mean Read Quality%s%s\n" % (read, SEP, "%.1f" % df_mean['ave'].mean())) 139 return all_mapped_reads, unmapped_reads
107 # Reads Passing Q30 140
108 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) 141
109 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size) 142 def process_metrics_file(metrics_file):
110 ofh.write("%s reads passing Q30%s%s\n" % (read, SEP, reads_passing_q30)) 143 ref_with_coverage = '0%'
111 return total_read_count 144 avg_depth_of_coverage = 0
112 145 good_snp_count = 0
113 146 with open(metrics_file, "r") as ifh:
114 def output_statistics(task_queue, read2, collection, gzipped, dbkey, timeout): 147 for i, line in enumerate(ifh):
115 while True: 148 if i == 0:
116 try: 149 # Skip comments.
117 tup = task_queue.get(block=True, timeout=timeout) 150 continue
118 except queue.Empty: 151 items = line.split("\t")
119 break 152 if i == 1:
120 read_file, idxstats_file, metrics_file, output_file = tup 153 # MarkDuplicates 10.338671 98.74%
121 total_reads = 0 154 ref_with_coverage = items[3]
122 with open(output_file, "w") as ofh: 155 avg_depth_of_coverage = items[2]
123 total_reads += output_read_stats(gzipped, read_file, ofh, output_sample=True, dbkey=dbkey, collection=collection) 156 elif i == 2:
124 if read2 is not None: 157 # VCFfilter 611
125 total_reads += output_read_stats(gzipped, read2, ofh) 158 good_snp_count = items[1]
126 ofh.write("Total Reads%s%d\n" % (SEP, total_reads)) 159 return ref_with_coverage, avg_depth_of_coverage, good_snp_count
127 with open(idxstats_file, "r") as ifh:
128 unmapped_reads = 0
129 for i, line in enumerate(ifh):
130 items = line.split("\t")
131 if i == 0:
132 # NC_002945.4 4349904 213570 4047
133 ofh.write("All Mapped Reads%s%s\n" % (SEP, items[2]))
134 elif i == 1:
135 # * 0 0 82774
136 unmapped_reads = int(items[3])
137 ofh.write("Unmapped Reads%s%d\n" % (SEP, unmapped_reads))
138 percent_str = "Unmapped Reads Percentage of Total"
139 if unmapped_reads > 0:
140 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads)
141 ofh.write("%s%s%s\n" % (percent_str, SEP, unmapped_reads_percentage))
142 else:
143 ofh.write("%s%s0\n" % (percent_str, SEP))
144 with open(metrics_file, "r") as ifh:
145 for i, line in enumerate(ifh):
146 if i == 0:
147 # Skip comments.
148 continue
149 items = line.split("\t")
150 if i == 1:
151 # MarkDuplicates 10.338671 98.74%
152 ofh.write("Average Depth of Coverage%s%s\n" % (SEP, items[2]))
153 ofh.write("Reference with Coverage%s%s\n" % (SEP, items[3]))
154 elif i == 2:
155 # VCFfilter 611
156 ofh.write("Good SNP Count%s%s\n" % (SEP, items[1]))
157 task_queue.task_done()
158
159
160 def set_num_cpus(num_files, processes):
161 num_cpus = int(multiprocessing.cpu_count())
162 if num_files < num_cpus and num_files < processes:
163 return num_files
164 if num_cpus < processes:
165 half_cpus = int(num_cpus / 2)
166 if num_files < half_cpus:
167 return num_files
168 return half_cpus
169 return processes
170 160
171 161
172 if __name__ == '__main__': 162 if __name__ == '__main__':
173 parser = argparse.ArgumentParser() 163 parser = argparse.ArgumentParser()
174 164
175 parser.add_argument('--read1', action='store', dest='read1', required=False, default=None, help='Required: single read') 165 parser.add_argument('--read1', action='store', dest='read1', required=False, default=None, help='Required: single read')
176 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read') 166 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
177 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey') 167 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')
178 parser.add_argument('--gzipped', action='store', dest='gzipped', help='Input files are gzipped') 168 parser.add_argument('--gzipped', action='store', dest='gzipped', help='Input files are gzipped')
179 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', required=False, default=None, help='Output of samtools_idxstats') 169 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', required=False, default=None, help='Output of samtools_idxstats')
180 parser.add_argument('--output', action='store', dest='output', required=False, default=None, help='Output statisticsfile') 170 parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')
181 parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', required=False, default=None, help='Output of vsnp_add_zero_coverage') 171 parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', required=False, default=None, help='Output of vsnp_add_zero_coverage')
182 parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
183 172
184 args = parser.parse_args() 173 args = parser.parse_args()
174 print("args:\n%s\n" % str(args))
185 175
186 reads_files = [] 176 reads_files = []
187 idxstats_files = [] 177 idxstats_files = []
188 metrics_files = [] 178 metrics_files = []
189 output_files = [] 179 # Accumulate inputs.
190 if args.output is not None: 180 if args.read1 is not None:
191 # The inputs were not dataset collections, so 181 # The inputs are not dataset collections, so
192 # read1, read2 (possibly) and vsnp_azc will also 182 # read1, read2 (possibly) and vsnp_azc will also
193 # not be None. 183 # not be None.
194 collection = False
195 reads_files.append(args.read1) 184 reads_files.append(args.read1)
196 idxstats_files.append(args.samtools_idxstats) 185 idxstats_files.append(args.samtools_idxstats)
197 metrics_files.append(args.vsnp_azc) 186 metrics_files.append(args.vsnp_azc)
198 output_files.append(args.output) 187 if args.read2 is not None:
188 reads_files.append(args.read2)
189 idxstats_files.append(args.samtools_idxstats)
190 metrics_files.append(args.vsnp_azc)
199 else: 191 else:
200 collection = True
201 for file_name in sorted(os.listdir(INPUT_READS_DIR)): 192 for file_name in sorted(os.listdir(INPUT_READS_DIR)):
202 file_path = os.path.abspath(os.path.join(INPUT_READS_DIR, file_name)) 193 file_path = os.path.abspath(os.path.join(INPUT_READS_DIR, file_name))
203 reads_files.append(file_path) 194 reads_files.append(file_path)
204 base_file_name = get_base_file_name(file_path) 195 base_file_name = get_base_file_name(file_path)
205 output_files.append(os.path.abspath(os.path.join(OUTPUT_DIR, "%s.tabular" % base_file_name)))
206 for file_name in sorted(os.listdir(INPUT_IDXSTATS_DIR)): 196 for file_name in sorted(os.listdir(INPUT_IDXSTATS_DIR)):
207 file_path = os.path.abspath(os.path.join(INPUT_IDXSTATS_DIR, file_name)) 197 file_path = os.path.abspath(os.path.join(INPUT_IDXSTATS_DIR, file_name))
208 idxstats_files.append(file_path) 198 idxstats_files.append(file_path)
209 for file_name in sorted(os.listdir(INPUT_METRICS_DIR)): 199 for file_name in sorted(os.listdir(INPUT_METRICS_DIR)):
210 file_path = os.path.abspath(os.path.join(INPUT_METRICS_DIR, file_name)) 200 file_path = os.path.abspath(os.path.join(INPUT_METRICS_DIR, file_name))
211 metrics_files.append(file_path) 201 metrics_files.append(file_path)
212 202 output_statistics(reads_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
213 multiprocessing.set_start_method('spawn')
214 queue1 = multiprocessing.JoinableQueue()
215 num_files = len(output_files)
216 cpus = set_num_cpus(num_files, args.processes)
217 # Set a timeout for get()s in the queue.
218 timeout = 0.05
219
220 for i, output_file in enumerate(output_files):
221 read_file = reads_files[i]
222 idxstats_file = idxstats_files[i]
223 metrics_file = metrics_files[i]
224 queue1.put((read_file, idxstats_file, metrics_file, output_file))
225
226 # Complete the output_statistics task.
227 processes = [multiprocessing.Process(target=output_statistics, args=(queue1, args.read2, collection, args.gzipped, args.dbkey, timeout, )) for _ in range(cpus)]
228 for p in processes:
229 p.start()
230 for p in processes:
231 p.join()
232 queue1.join()
233
234 if queue1.empty():
235 queue1.close()
236 queue1.join_thread()