0
|
1 #!/usr/bin/env python
|
|
2
|
|
3 import argparse
|
|
4 import gzip
|
|
5 import numpy
|
|
6 import os
|
|
7 import pandas
|
1
|
8 import shutil
|
0
|
9
|
|
10 INPUT_IDXSTATS_DIR = 'input_idxstats'
|
|
11 INPUT_METRICS_DIR = 'input_metrics'
|
|
12 INPUT_READS_DIR = 'input_reads'
|
|
13 QUALITYKEY = {'!':'0', '"':'1', '#':'2', '$':'3', '%':'4', '&':'5', "'":'6', '(':'7', ')':'8', '*':'9', '+':'10', ',':'11', '-':'12', '.':'13', '/':'14', '0':'15', '1':'16', '2':'17', '3':'18', '4':'19', '5':'20', '6':'21', '7':'22', '8':'23', '9':'24', ':':'25', ';':'26', '<':'27', '=':'28', '>':'29', '?':'30', '@':'31', 'A':'32', 'B':'33', 'C':'34', 'D':'35', 'E':'36', 'F':'37', 'G':'38', 'H':'39', 'I':'40', 'J':'41', 'K':'42', 'L':'43', 'M':'44', 'N':'45', 'O':'46', 'P':'47', 'Q':'48', 'R':'49', 'S':'50', 'T':'51', 'U':'52', 'V':'53', 'W':'54', 'X':'55', 'Y':'56', 'Z':'57', '_':'1', ']':'1', '[':'1', '\\':'1', '\n':'1', '`':'1', 'a':'1', 'b':'1', 'c':'1', 'd':'1', 'e':'1', 'f':'1', 'g':'1', 'h':'1', 'i':'1', 'j':'1', 'k':'1', 'l':'1', 'm':'1', 'n':'1', 'o':'1', 'p':'1', 'q':'1', 'r':'1', 's':'1', 't':'1', 'u':'1', 'v':'1', 'w':'1', 'x':'1', 'y':'1', 'z':'1', ' ':'1'}
|
1
|
14
|
|
15
|
|
16 def fastq_to_df(fastq_file, gzipped):
|
|
17 if gzipped.lower() == "true":
|
|
18 return pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^")
|
|
19 else:
|
|
20 return pandas.read_csv(open(fastq_file, "r"), header=None, sep="^")
|
0
|
21
|
|
22
|
|
23 def get_base_file_name(file_path):
|
|
24 base_file_name = os.path.basename(file_path)
|
|
25 if base_file_name.find(".") > 0:
|
|
26 # Eliminate the extension.
|
|
27 return os.path.splitext(base_file_name)[0]
|
|
28 elif base_file_name.find("_") > 0:
|
|
29 # The dot extension was likely changed to
|
|
30 # the " character.
|
|
31 items = base_file_name.split("_")
|
|
32 return "_".join(items[0:-1])
|
|
33 else:
|
|
34 return base_file_name
|
|
35
|
|
36
|
|
37 def nice_size(size):
|
|
38 # Returns a readably formatted string with the size
|
|
39 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
|
|
40 prefix = ''
|
|
41 try:
|
|
42 size = float(size)
|
|
43 if size < 0:
|
|
44 size = abs(size)
|
|
45 prefix = '-'
|
|
46 except Exception:
|
|
47 return '??? bytes'
|
|
48 for ind, word in enumerate(words):
|
|
49 step = 1024 ** (ind + 1)
|
|
50 if step > size:
|
|
51 size = size / float(1024 ** ind)
|
|
52 if word == 'bytes': # No decimals for bytes
|
|
53 return "%s%d bytes" % (prefix, size)
|
|
54 return "%s%.1f %s" % (prefix, size, word)
|
|
55 return '??? bytes'
|
|
56
|
|
57
|
1
|
58 def output_statistics(reads_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):
|
|
59 # Produce an Excel spreadsheet that
|
|
60 # contains a row for each sample.
|
|
61 columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',
|
|
62 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',
|
|
63 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']
|
|
64 data_frames = []
|
|
65 for i, fastq_file in enumerate(reads_files):
|
|
66 idxstats_file = idxstats_files[i]
|
|
67 metrics_file = metrics_files[i]
|
|
68 file_name_base = os.path.basename(fastq_file)
|
|
69 # Read fastq_file into a data frame.
|
|
70 fastq_df = fastq_to_df(fastq_file, gzipped)
|
|
71 total_reads = int(len(fastq_df.index) / 4)
|
|
72 current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)
|
|
73 # Reference
|
|
74 current_sample_df.at[file_name_base, 'Reference'] = dbkey
|
|
75 # File Size
|
|
76 current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))
|
|
77 # Mean Read Length
|
|
78 sampling_size = 10000
|
|
79 if sampling_size > total_reads:
|
|
80 sampling_size = total_reads
|
|
81 fastq_df = fastq_df.iloc[3::4].sample(sampling_size)
|
|
82 dict_mean = {}
|
|
83 list_length = []
|
|
84 for index, row in fastq_df.iterrows():
|
|
85 base_qualities = []
|
|
86 for base in list(row.array[0]):
|
|
87 base_qualities.append(int(QUALITYKEY[base]))
|
|
88 dict_mean[index] = numpy.mean(base_qualities)
|
|
89 list_length.append(len(row.array[0]))
|
|
90 current_sample_df.at[file_name_base, 'Mean Read Length'] = "%.1f" % numpy.mean(list_length)
|
|
91 # Mean Read Quality
|
|
92 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])
|
|
93 current_sample_df.at[file_name_base, 'Mean Read Quality'] = "%.1f" % df_mean['ave'].mean()
|
|
94 # Reads Passing Q30
|
|
95 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])
|
|
96 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size)
|
|
97 current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30
|
|
98 # Total Reads
|
|
99 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads
|
|
100 # All Mapped Reads
|
|
101 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)
|
|
102 current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads
|
|
103 # Unmapped Reads
|
|
104 current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads
|
|
105 # Unmapped Reads Percentage of Total
|
|
106 if unmapped_reads > 0:
|
|
107 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads)
|
0
|
108 else:
|
1
|
109 unmapped_reads_percentage = 0
|
|
110 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage
|
|
111 # Reference with Coverage
|
|
112 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)
|
|
113 current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage
|
|
114 # Average Depth of Coverage
|
|
115 current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage
|
|
116 # Good SNP Count
|
|
117 current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count
|
|
118 data_frames.append(current_sample_df)
|
|
119 excel_df = pandas.concat(data_frames)
|
|
120 excel_file_name = "output.xlsx"
|
|
121 writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
|
|
122 excel_df.to_excel(writer, sheet_name='Sheet1')
|
|
123 writer.save()
|
|
124 shutil.move(excel_file_name, output_file)
|
0
|
125
|
|
126
|
1
|
127 def process_idxstats_file(idxstats_file):
|
|
128 all_mapped_reads = 0
|
|
129 unmapped_reads = 0
|
|
130 with open(idxstats_file, "r") as fh:
|
|
131 for i, line in enumerate(fh):
|
|
132 items = line.split("\t")
|
|
133 if i == 0:
|
|
134 # NC_002945.4 4349904 213570 4047
|
|
135 all_mapped_reads = int(items[2])
|
|
136 elif i == 1:
|
|
137 # * 0 0 82774
|
|
138 unmapped_reads = int(items[3])
|
|
139 return all_mapped_reads, unmapped_reads
|
0
|
140
|
|
141
|
1
|
142 def process_metrics_file(metrics_file):
|
|
143 ref_with_coverage = '0%'
|
|
144 avg_depth_of_coverage = 0
|
|
145 good_snp_count = 0
|
|
146 with open(metrics_file, "r") as ifh:
|
|
147 for i, line in enumerate(ifh):
|
|
148 if i == 0:
|
|
149 # Skip comments.
|
|
150 continue
|
|
151 items = line.split("\t")
|
|
152 if i == 1:
|
|
153 # MarkDuplicates 10.338671 98.74%
|
|
154 ref_with_coverage = items[3]
|
|
155 avg_depth_of_coverage = items[2]
|
|
156 elif i == 2:
|
|
157 # VCFfilter 611
|
|
158 good_snp_count = items[1]
|
|
159 return ref_with_coverage, avg_depth_of_coverage, good_snp_count
|
0
|
160
|
|
161
|
|
162 if __name__ == '__main__':
|
|
163 parser = argparse.ArgumentParser()
|
|
164
|
|
165 parser.add_argument('--read1', action='store', dest='read1', required=False, default=None, help='Required: single read')
|
|
166 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
|
|
167 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')
|
|
168 parser.add_argument('--gzipped', action='store', dest='gzipped', help='Input files are gzipped')
|
|
169 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', required=False, default=None, help='Output of samtools_idxstats')
|
1
|
170 parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')
|
0
|
171 parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', required=False, default=None, help='Output of vsnp_add_zero_coverage')
|
|
172
|
|
173 args = parser.parse_args()
|
1
|
174 print("args:\n%s\n" % str(args))
|
0
|
175
|
|
176 reads_files = []
|
|
177 idxstats_files = []
|
|
178 metrics_files = []
|
1
|
179 # Accumulate inputs.
|
|
180 if args.read1 is not None:
|
|
181 # The inputs are not dataset collections, so
|
0
|
182 # read1, read2 (possibly) and vsnp_azc will also
|
|
183 # not be None.
|
|
184 reads_files.append(args.read1)
|
|
185 idxstats_files.append(args.samtools_idxstats)
|
|
186 metrics_files.append(args.vsnp_azc)
|
1
|
187 if args.read2 is not None:
|
|
188 reads_files.append(args.read2)
|
|
189 idxstats_files.append(args.samtools_idxstats)
|
|
190 metrics_files.append(args.vsnp_azc)
|
0
|
191 else:
|
|
192 for file_name in sorted(os.listdir(INPUT_READS_DIR)):
|
|
193 file_path = os.path.abspath(os.path.join(INPUT_READS_DIR, file_name))
|
|
194 reads_files.append(file_path)
|
|
195 base_file_name = get_base_file_name(file_path)
|
|
196 for file_name in sorted(os.listdir(INPUT_IDXSTATS_DIR)):
|
|
197 file_path = os.path.abspath(os.path.join(INPUT_IDXSTATS_DIR, file_name))
|
|
198 idxstats_files.append(file_path)
|
|
199 for file_name in sorted(os.listdir(INPUT_METRICS_DIR)):
|
|
200 file_path = os.path.abspath(os.path.join(INPUT_METRICS_DIR, file_name))
|
|
201 metrics_files.append(file_path)
|
1
|
202 output_statistics(reads_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
|