Mercurial > repos > greg > vsnp_statistics
annotate vsnp_statistics.py @ 15:e5d08707e81f draft
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 920fec3ef21170f77d5bd16f1b77bbe9d187f6d3"
author | greg |
---|---|
date | Mon, 02 Aug 2021 21:26:57 +0000 |
parents | 1becb6606626 |
children | 377c1a96aae9 |
rev | line source |
---|---|
0 | 1 #!/usr/bin/env python |
2 | |
3 import argparse | |
4 import gzip | |
5 import os | |
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
6 from functools import partial |
0 | 7 |
4 | 8 import numpy |
9 import pandas | |
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
10 from Bio import SeqIO |
0 | 11 |
12 | |
8 | 13 class Statistics: |
14 | |
15 def __init__(self, reference, fastq_file, file_size, total_reads, mean_read_length, mean_read_quality, reads_passing_q30): | |
16 self.reference = reference | |
17 self.fastq_file = fastq_file | |
18 self.file_size = file_size | |
19 self.total_reads = total_reads | |
20 self.mean_read_length = mean_read_length | |
21 self.mean_read_quality = mean_read_quality | |
22 self.reads_passing_q30 = reads_passing_q30 | |
23 | |
24 | |
0 | 25 def nice_size(size): |
26 # Returns a readably formatted string with the size | |
27 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] | |
28 prefix = '' | |
29 try: | |
30 size = float(size) | |
31 if size < 0: | |
32 size = abs(size) | |
33 prefix = '-' | |
34 except Exception: | |
35 return '??? bytes' | |
36 for ind, word in enumerate(words): | |
37 step = 1024 ** (ind + 1) | |
38 if step > size: | |
39 size = size / float(1024 ** ind) | |
40 if word == 'bytes': # No decimals for bytes | |
41 return "%s%d bytes" % (prefix, size) | |
42 return "%s%.1f %s" % (prefix, size, word) | |
43 return '??? bytes' | |
44 | |
45 | |
8 | 46 def get_statistics(dbkey, fastq_file, gzipped): |
47 sampling_size = 10000 | |
48 # Read fastq_file into a data fram to | |
49 # get the phred quality scores. | |
50 _open = partial(gzip.open, mode='rt') if gzipped else open | |
51 with _open(fastq_file) as fh: | |
52 identifiers = [] | |
53 seqs = [] | |
54 letter_annotations = [] | |
55 for seq_record in SeqIO.parse(fh, "fastq"): | |
56 identifiers.append(seq_record.id) | |
57 seqs.append(seq_record.seq) | |
58 letter_annotations.append(seq_record.letter_annotations["phred_quality"]) | |
59 # Convert lists to Pandas series. | |
60 s1 = pandas.Series(identifiers, name='id') | |
61 s2 = pandas.Series(seqs, name='seq') | |
62 # Gather Series into a data frame. | |
63 fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id']) | |
64 # Starting at row 3, keep every 4 row | |
65 # random sample specified number of rows. | |
66 file_size = nice_size(os.path.getsize(fastq_file)) | |
67 total_reads = int(len(fastq_df.index) / 4) | |
68 # Mean Read Length | |
69 if sampling_size > total_reads: | |
70 sampling_size = total_reads | |
71 fastq_df = fastq_df.iloc[3::4].sample(sampling_size) | |
72 dict_mean = {} | |
73 list_length = [] | |
74 i = 0 | |
75 for id, seq, in fastq_df.iterrows(): | |
76 dict_mean[id] = numpy.mean(letter_annotations[i]) | |
77 list_length.append(len(seq.array[0])) | |
78 i += 1 | |
79 mean_read_length = '%.1f' % numpy.mean(list_length) | |
80 # Mean Read Quality | |
81 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) | |
82 mean_read_quality = '%.1f' % df_mean['ave'].mean() | |
83 # Reads Passing Q30 | |
84 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) | |
85 reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size) | |
86 stats = Statistics(dbkey, os.path.basename(fastq_file), file_size, total_reads, mean_read_length, | |
87 mean_read_quality, reads_passing_q30) | |
88 return stats | |
89 | |
90 | |
91 def accrue_statistics(dbkey, read1, read2, gzipped): | |
92 read1_stats = get_statistics(dbkey, read1, gzipped) | |
93 if read2 is None: | |
94 read2_stats = None | |
95 else: | |
96 read2_stats = get_statistics(dbkey, read2, gzipped) | |
97 return read1_stats, read2_stats | |
98 | |
99 | |
100 def output_statistics(read1_stats, read2_stats, idxstats_file, metrics_file, output_file): | |
101 paired_reads = read2_stats is not None | |
102 if paired_reads: | |
103 columns = ['Reference', 'Read1 FASTQ', 'File Size', 'Reads', 'Mean Read Length', 'Mean Read Quality', | |
104 'Reads Passing Q30', 'Read2 FASTQ', 'File Size', 'Reads', 'Mean Read Length', 'Mean Read Quality', | |
105 'Reads Passing Q30', 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', | |
106 'Unmapped Reads Percentage of Total', 'Reference with Coverage', 'Average Depth of Coverage', | |
107 'Good SNP Count'] | |
108 else: | |
109 columns = ['Reference', 'FASTQ', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30', | |
110 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total', | |
111 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count'] | |
112 with open(output_file, "w") as outfh: | |
113 outfh.write("%s\n" % "\t".join(columns)) | |
114 line_items = [] | |
115 # Get the current stats and associated files. | |
116 # Get and output the statistics. | |
117 line_items.append(read1_stats.reference) | |
118 line_items.append(read1_stats.fastq_file) | |
119 line_items.append(read1_stats.file_size) | |
120 if paired_reads: | |
121 line_items.append(read1_stats.total_reads) | |
122 line_items.append(read1_stats.mean_read_length) | |
123 line_items.append(read1_stats.mean_read_quality) | |
124 line_items.append(read1_stats.reads_passing_q30) | |
125 if paired_reads: | |
126 line_items.append(read2_stats.fastq_file) | |
127 line_items.append(read2_stats.file_size) | |
128 line_items.append(read2_stats.total_reads) | |
129 line_items.append(read2_stats.mean_read_length) | |
130 line_items.append(read2_stats.mean_read_quality) | |
131 line_items.append(read2_stats.reads_passing_q30) | |
1 | 132 # Total Reads |
8 | 133 if paired_reads: |
134 total_reads = read1_stats.total_reads + read2_stats.total_reads | |
135 else: | |
136 total_reads = read1_stats.total_reads | |
137 line_items.append(total_reads) | |
1 | 138 # All Mapped Reads |
139 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file) | |
8 | 140 line_items.append(all_mapped_reads) |
141 line_items.append(unmapped_reads) | |
1 | 142 # Unmapped Reads Percentage of Total |
143 if unmapped_reads > 0: | |
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
144 unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads) |
0 | 145 else: |
1 | 146 unmapped_reads_percentage = 0 |
8 | 147 line_items.append(unmapped_reads_percentage) |
1 | 148 # Reference with Coverage |
149 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file) | |
8 | 150 line_items.append(ref_with_coverage) |
151 line_items.append(avg_depth_of_coverage) | |
152 line_items.append(good_snp_count) | |
153 outfh.write('%s\n' % '\t'.join(str(x) for x in line_items)) | |
0 | 154 |
155 | |
1 | 156 def process_idxstats_file(idxstats_file): |
157 all_mapped_reads = 0 | |
158 unmapped_reads = 0 | |
159 with open(idxstats_file, "r") as fh: | |
160 for i, line in enumerate(fh): | |
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
161 line = line.rstrip('\r\n') |
1 | 162 items = line.split("\t") |
163 if i == 0: | |
164 # NC_002945.4 4349904 213570 4047 | |
165 all_mapped_reads = int(items[2]) | |
166 elif i == 1: | |
167 # * 0 0 82774 | |
168 unmapped_reads = int(items[3]) | |
169 return all_mapped_reads, unmapped_reads | |
0 | 170 |
171 | |
1 | 172 def process_metrics_file(metrics_file): |
173 ref_with_coverage = '0%' | |
174 avg_depth_of_coverage = 0 | |
175 good_snp_count = 0 | |
176 with open(metrics_file, "r") as ifh: | |
177 for i, line in enumerate(ifh): | |
178 if i == 0: | |
179 # Skip comments. | |
180 continue | |
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
181 line = line.rstrip('\r\n') |
1 | 182 items = line.split("\t") |
183 if i == 1: | |
184 # MarkDuplicates 10.338671 98.74% | |
185 ref_with_coverage = items[3] | |
186 avg_depth_of_coverage = items[2] | |
187 elif i == 2: | |
188 # VCFfilter 611 | |
189 good_snp_count = items[1] | |
190 return ref_with_coverage, avg_depth_of_coverage, good_snp_count | |
0 | 191 |
192 | |
4 | 193 parser = argparse.ArgumentParser() |
0 | 194 |
4 | 195 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey') |
196 parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped') | |
197 parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file') | |
198 parser.add_argument('--read1', action='store', dest='read1', help='Required: single read') | |
199 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read') | |
200 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats') | |
8 | 201 parser.add_argument('--vsnp_azc_metrics', action='store', dest='vsnp_azc_metrics', help='Output of vsnp_add_zero_coverage') |
0 | 202 |
4 | 203 args = parser.parse_args() |
0 | 204 |
8 | 205 stats_list = [] |
4 | 206 idxstats_files = [] |
207 metrics_files = [] | |
208 # Accumulate inputs. | |
8 | 209 read1_stats, read2_stats = accrue_statistics(args.dbkey, args.read1, args.read2, args.gzipped) |
210 output_statistics(read1_stats, read2_stats, args.samtools_idxstats, args.vsnp_azc_metrics, args.output) |