0
|
1 #!/usr/bin/env python
|
|
2
|
|
3 import argparse
|
|
4 import gzip
|
|
5 import numpy
|
|
6 import os
|
|
7 import pandas
|
1
|
8 import shutil
|
0
|
9
|
|
10 INPUT_IDXSTATS_DIR = 'input_idxstats'
|
|
11 INPUT_METRICS_DIR = 'input_metrics'
|
|
12 INPUT_READS_DIR = 'input_reads'
|
3
|
13 QUALITYKEY = {'!': '0', '"': '1', '#': '2', '$': '3', '%': '4', '&': '5', "'": '6', '(': '7',
|
|
14 ')': '8', '*': '9', '+': '10', ',': '11', '-': '12', '.': '13', '/': '14', '0': '15',
|
|
15 '1': '16', '2': '17', '3': '18', '4': '19', '5': '20', '6': '21', '7': '22',
|
|
16 '8': '23', '9': '24', ':': '25', ';': '26', '<': '27', '=': '28', '>': '29',
|
|
17 '?': '30', '@': '31', 'A': '32', 'B': '33', 'C': '34', 'D': '35', 'E': '36',
|
|
18 'F': '37', 'G': '38', 'H': '39', 'I': '40', 'J': '41', 'K': '42', 'L': '43',
|
|
19 'M': '44', 'N': '45', 'O': '46', 'P': '47', 'Q': '48', 'R': '49', 'S': '50',
|
|
20 'T': '51', 'U': '52', 'V': '53', 'W': '54', 'X': '55', 'Y': '56', 'Z': '57',
|
|
21 '_': '1', ']': '1', '[': '1', '\\': '1', '\n': '1', '`': '1', 'a': '1', 'b': '1',
|
|
22 'c': '1', 'd': '1', 'e': '1', 'f': '1', 'g': '1', 'h': '1', 'i': '1', 'j': '1',
|
|
23 'k': '1', 'l': '1', 'm': '1', 'n': '1', 'o': '1', 'p': '1', 'q': '1', 'r': '1',
|
|
24 's': '1', 't': '1', 'u': '1', 'v': '1', 'w': '1', 'x': '1', 'y': '1', 'z': '1',
|
|
25 ' ': '1'}
|
1
|
26
|
|
27
|
|
28 def fastq_to_df(fastq_file, gzipped):
|
|
29 if gzipped.lower() == "true":
|
|
30 return pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^")
|
|
31 else:
|
|
32 return pandas.read_csv(open(fastq_file, "r"), header=None, sep="^")
|
0
|
33
|
|
34
|
|
35 def get_base_file_name(file_path):
|
|
36 base_file_name = os.path.basename(file_path)
|
|
37 if base_file_name.find(".") > 0:
|
|
38 # Eliminate the extension.
|
|
39 return os.path.splitext(base_file_name)[0]
|
|
40 elif base_file_name.find("_") > 0:
|
|
41 # The dot extension was likely changed to
|
|
42 # the " character.
|
|
43 items = base_file_name.split("_")
|
|
44 return "_".join(items[0:-1])
|
|
45 else:
|
|
46 return base_file_name
|
|
47
|
|
48
|
|
49 def nice_size(size):
|
|
50 # Returns a readably formatted string with the size
|
|
51 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
|
|
52 prefix = ''
|
|
53 try:
|
|
54 size = float(size)
|
|
55 if size < 0:
|
|
56 size = abs(size)
|
|
57 prefix = '-'
|
|
58 except Exception:
|
|
59 return '??? bytes'
|
|
60 for ind, word in enumerate(words):
|
|
61 step = 1024 ** (ind + 1)
|
|
62 if step > size:
|
|
63 size = size / float(1024 ** ind)
|
|
64 if word == 'bytes': # No decimals for bytes
|
|
65 return "%s%d bytes" % (prefix, size)
|
|
66 return "%s%.1f %s" % (prefix, size, word)
|
|
67 return '??? bytes'
|
|
68
|
|
69
|
1
|
70 def output_statistics(reads_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):
|
|
71 # Produce an Excel spreadsheet that
|
|
72 # contains a row for each sample.
|
|
73 columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',
|
|
74 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',
|
|
75 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']
|
|
76 data_frames = []
|
|
77 for i, fastq_file in enumerate(reads_files):
|
|
78 idxstats_file = idxstats_files[i]
|
|
79 metrics_file = metrics_files[i]
|
|
80 file_name_base = os.path.basename(fastq_file)
|
|
81 # Read fastq_file into a data frame.
|
|
82 fastq_df = fastq_to_df(fastq_file, gzipped)
|
|
83 total_reads = int(len(fastq_df.index) / 4)
|
|
84 current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)
|
|
85 # Reference
|
|
86 current_sample_df.at[file_name_base, 'Reference'] = dbkey
|
|
87 # File Size
|
|
88 current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))
|
|
89 # Mean Read Length
|
|
90 sampling_size = 10000
|
|
91 if sampling_size > total_reads:
|
|
92 sampling_size = total_reads
|
|
93 fastq_df = fastq_df.iloc[3::4].sample(sampling_size)
|
|
94 dict_mean = {}
|
|
95 list_length = []
|
|
96 for index, row in fastq_df.iterrows():
|
|
97 base_qualities = []
|
|
98 for base in list(row.array[0]):
|
|
99 base_qualities.append(int(QUALITYKEY[base]))
|
|
100 dict_mean[index] = numpy.mean(base_qualities)
|
|
101 list_length.append(len(row.array[0]))
|
|
102 current_sample_df.at[file_name_base, 'Mean Read Length'] = "%.1f" % numpy.mean(list_length)
|
|
103 # Mean Read Quality
|
|
104 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])
|
|
105 current_sample_df.at[file_name_base, 'Mean Read Quality'] = "%.1f" % df_mean['ave'].mean()
|
|
106 # Reads Passing Q30
|
|
107 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])
|
|
108 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size)
|
|
109 current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30
|
|
110 # Total Reads
|
|
111 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads
|
|
112 # All Mapped Reads
|
|
113 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)
|
|
114 current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads
|
|
115 # Unmapped Reads
|
|
116 current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads
|
|
117 # Unmapped Reads Percentage of Total
|
|
118 if unmapped_reads > 0:
|
|
119 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads)
|
0
|
120 else:
|
1
|
121 unmapped_reads_percentage = 0
|
|
122 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage
|
|
123 # Reference with Coverage
|
|
124 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)
|
|
125 current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage
|
|
126 # Average Depth of Coverage
|
|
127 current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage
|
|
128 # Good SNP Count
|
|
129 current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count
|
|
130 data_frames.append(current_sample_df)
|
|
131 excel_df = pandas.concat(data_frames)
|
|
132 excel_file_name = "output.xlsx"
|
|
133 writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
|
|
134 excel_df.to_excel(writer, sheet_name='Sheet1')
|
|
135 writer.save()
|
|
136 shutil.move(excel_file_name, output_file)
|
0
|
137
|
|
138
|
1
|
139 def process_idxstats_file(idxstats_file):
|
|
140 all_mapped_reads = 0
|
|
141 unmapped_reads = 0
|
|
142 with open(idxstats_file, "r") as fh:
|
|
143 for i, line in enumerate(fh):
|
|
144 items = line.split("\t")
|
|
145 if i == 0:
|
|
146 # NC_002945.4 4349904 213570 4047
|
|
147 all_mapped_reads = int(items[2])
|
|
148 elif i == 1:
|
|
149 # * 0 0 82774
|
|
150 unmapped_reads = int(items[3])
|
|
151 return all_mapped_reads, unmapped_reads
|
0
|
152
|
|
153
|
1
|
154 def process_metrics_file(metrics_file):
|
|
155 ref_with_coverage = '0%'
|
|
156 avg_depth_of_coverage = 0
|
|
157 good_snp_count = 0
|
|
158 with open(metrics_file, "r") as ifh:
|
|
159 for i, line in enumerate(ifh):
|
|
160 if i == 0:
|
|
161 # Skip comments.
|
|
162 continue
|
|
163 items = line.split("\t")
|
|
164 if i == 1:
|
|
165 # MarkDuplicates 10.338671 98.74%
|
|
166 ref_with_coverage = items[3]
|
|
167 avg_depth_of_coverage = items[2]
|
|
168 elif i == 2:
|
|
169 # VCFfilter 611
|
|
170 good_snp_count = items[1]
|
|
171 return ref_with_coverage, avg_depth_of_coverage, good_snp_count
|
0
|
172
|
|
173
|
|
174 if __name__ == '__main__':
|
|
175 parser = argparse.ArgumentParser()
|
|
176
|
|
177 parser.add_argument('--read1', action='store', dest='read1', required=False, default=None, help='Required: single read')
|
|
178 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
|
|
179 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')
|
|
180 parser.add_argument('--gzipped', action='store', dest='gzipped', help='Input files are gzipped')
|
|
181 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', required=False, default=None, help='Output of samtools_idxstats')
|
1
|
182 parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')
|
0
|
183 parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', required=False, default=None, help='Output of vsnp_add_zero_coverage')
|
|
184
|
|
185 args = parser.parse_args()
|
1
|
186 print("args:\n%s\n" % str(args))
|
0
|
187
|
|
188 reads_files = []
|
|
189 idxstats_files = []
|
|
190 metrics_files = []
|
1
|
191 # Accumulate inputs.
|
|
192 if args.read1 is not None:
|
|
193 # The inputs are not dataset collections, so
|
0
|
194 # read1, read2 (possibly) and vsnp_azc will also
|
|
195 # not be None.
|
|
196 reads_files.append(args.read1)
|
|
197 idxstats_files.append(args.samtools_idxstats)
|
|
198 metrics_files.append(args.vsnp_azc)
|
1
|
199 if args.read2 is not None:
|
|
200 reads_files.append(args.read2)
|
|
201 idxstats_files.append(args.samtools_idxstats)
|
|
202 metrics_files.append(args.vsnp_azc)
|
0
|
203 else:
|
|
204 for file_name in sorted(os.listdir(INPUT_READS_DIR)):
|
|
205 file_path = os.path.abspath(os.path.join(INPUT_READS_DIR, file_name))
|
|
206 reads_files.append(file_path)
|
|
207 base_file_name = get_base_file_name(file_path)
|
|
208 for file_name in sorted(os.listdir(INPUT_IDXSTATS_DIR)):
|
|
209 file_path = os.path.abspath(os.path.join(INPUT_IDXSTATS_DIR, file_name))
|
|
210 idxstats_files.append(file_path)
|
|
211 for file_name in sorted(os.listdir(INPUT_METRICS_DIR)):
|
|
212 file_path = os.path.abspath(os.path.join(INPUT_METRICS_DIR, file_name))
|
|
213 metrics_files.append(file_path)
|
1
|
214 output_statistics(reads_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
|