Mercurial > repos > greg > vsnp_statistics
comparison vsnp_statistics.py @ 5:d0fbdeaaa488 draft
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
author | greg |
---|---|
date | Wed, 16 Jun 2021 17:38:47 +0000 |
parents | 2d6c6b01319e |
children | 1becb6606626 |
comparison
equal
deleted
inserted
replaced
4:2d6c6b01319e | 5:d0fbdeaaa488 |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 | 2 |
3 import argparse | 3 import argparse |
4 import csv | |
4 import gzip | 5 import gzip |
5 import os | 6 import os |
6 import shutil | 7 from functools import partial |
7 | 8 |
8 import numpy | 9 import numpy |
9 import pandas | 10 import pandas |
10 | 11 from Bio import SeqIO |
11 QUALITYKEY = {'!': '0', '"': '1', '#': '2', '$': '3', '%': '4', '&': '5', "'": '6', '(': '7', | |
12 ')': '8', '*': '9', '+': '10', ',': '11', '-': '12', '.': '13', '/': '14', '0': '15', | |
13 '1': '16', '2': '17', '3': '18', '4': '19', '5': '20', '6': '21', '7': '22', | |
14 '8': '23', '9': '24', ':': '25', ';': '26', '<': '27', '=': '28', '>': '29', | |
15 '?': '30', '@': '31', 'A': '32', 'B': '33', 'C': '34', 'D': '35', 'E': '36', | |
16 'F': '37', 'G': '38', 'H': '39', 'I': '40', 'J': '41', 'K': '42', 'L': '43', | |
17 'M': '44', 'N': '45', 'O': '46', 'P': '47', 'Q': '48', 'R': '49', 'S': '50', | |
18 'T': '51', 'U': '52', 'V': '53', 'W': '54', 'X': '55', 'Y': '56', 'Z': '57', | |
19 '_': '1', ']': '1', '[': '1', '\\': '1', '\n': '1', '`': '1', 'a': '1', 'b': '1', | |
20 'c': '1', 'd': '1', 'e': '1', 'f': '1', 'g': '1', 'h': '1', 'i': '1', 'j': '1', | |
21 'k': '1', 'l': '1', 'm': '1', 'n': '1', 'o': '1', 'p': '1', 'q': '1', 'r': '1', | |
22 's': '1', 't': '1', 'u': '1', 'v': '1', 'w': '1', 'x': '1', 'y': '1', 'z': '1', | |
23 ' ': '1'} | |
24 | |
25 | |
26 def fastq_to_df(fastq_file, gzipped): | |
27 if gzipped: | |
28 return pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^") | |
29 return pandas.read_csv(open(fastq_file, "r"), header=None, sep="^") | |
30 | 12 |
31 | 13 |
32 def nice_size(size): | 14 def nice_size(size): |
33 # Returns a readably formatted string with the size | 15 # Returns a readably formatted string with the size |
34 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] | 16 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] |
60 for i, fastq_file in enumerate(fastq_files): | 42 for i, fastq_file in enumerate(fastq_files): |
61 idxstats_file = idxstats_files[i] | 43 idxstats_file = idxstats_files[i] |
62 metrics_file = metrics_files[i] | 44 metrics_file = metrics_files[i] |
63 file_name_base = os.path.basename(fastq_file) | 45 file_name_base = os.path.basename(fastq_file) |
64 # Read fastq_file into a data frame. | 46 # Read fastq_file into a data frame. |
65 fastq_df = fastq_to_df(fastq_file, gzipped) | 47 _open = partial(gzip.open, mode='rt') if gzipped else open |
48 with _open(fastq_file) as fh: | |
49 identifiers = [] | |
50 seqs = [] | |
51 letter_annotations = [] | |
52 for seq_record in SeqIO.parse(fh, "fastq"): | |
53 identifiers.append(seq_record.id) | |
54 seqs.append(seq_record.seq) | |
55 letter_annotations.append(seq_record.letter_annotations["phred_quality"]) | |
56 # Convert lists to Pandas series. | |
57 s1 = pandas.Series(identifiers, name='id') | |
58 s2 = pandas.Series(seqs, name='seq') | |
59 # Gather Series into a data frame. | |
60 fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id']) | |
66 total_reads = int(len(fastq_df.index) / 4) | 61 total_reads = int(len(fastq_df.index) / 4) |
67 current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns) | 62 current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns) |
68 # Reference | 63 # Reference |
69 current_sample_df.at[file_name_base, 'Reference'] = dbkey | 64 current_sample_df.at[file_name_base, 'Reference'] = dbkey |
70 # File Size | 65 # File Size |
74 if sampling_size > total_reads: | 69 if sampling_size > total_reads: |
75 sampling_size = total_reads | 70 sampling_size = total_reads |
76 fastq_df = fastq_df.iloc[3::4].sample(sampling_size) | 71 fastq_df = fastq_df.iloc[3::4].sample(sampling_size) |
77 dict_mean = {} | 72 dict_mean = {} |
78 list_length = [] | 73 list_length = [] |
79 for index, row in fastq_df.iterrows(): | 74 i = 0 |
80 base_qualities = [] | 75 for id, seq, in fastq_df.iterrows(): |
81 for base in list(row.array[0]): | 76 dict_mean[id] = numpy.mean(letter_annotations[i]) |
82 base_qualities.append(int(QUALITYKEY[base])) | 77 list_length.append(len(seq.array[0])) |
83 dict_mean[index] = numpy.mean(base_qualities) | 78 i += 1 |
84 list_length.append(len(row.array[0])) | 79 current_sample_df.at[file_name_base, 'Mean Read Length'] = '%.1f' % numpy.mean(list_length) |
85 current_sample_df.at[file_name_base, 'Mean Read Length'] = "%.1f" % numpy.mean(list_length) | |
86 # Mean Read Quality | 80 # Mean Read Quality |
87 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) | 81 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) |
88 current_sample_df.at[file_name_base, 'Mean Read Quality'] = "%.1f" % df_mean['ave'].mean() | 82 current_sample_df.at[file_name_base, 'Mean Read Quality'] = '%.1f' % df_mean['ave'].mean() |
89 # Reads Passing Q30 | 83 # Reads Passing Q30 |
90 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) | 84 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) |
91 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size) | 85 reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size) |
92 current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30 | 86 current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30 |
93 # Total Reads | 87 # Total Reads |
94 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads | 88 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads |
95 # All Mapped Reads | 89 # All Mapped Reads |
96 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file) | 90 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file) |
97 current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads | 91 current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads |
98 # Unmapped Reads | 92 # Unmapped Reads |
99 current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads | 93 current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads |
100 # Unmapped Reads Percentage of Total | 94 # Unmapped Reads Percentage of Total |
101 if unmapped_reads > 0: | 95 if unmapped_reads > 0: |
102 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads) | 96 unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads) |
103 else: | 97 else: |
104 unmapped_reads_percentage = 0 | 98 unmapped_reads_percentage = 0 |
105 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage | 99 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage |
106 # Reference with Coverage | 100 # Reference with Coverage |
107 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file) | 101 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file) |
109 # Average Depth of Coverage | 103 # Average Depth of Coverage |
110 current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage | 104 current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage |
111 # Good SNP Count | 105 # Good SNP Count |
112 current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count | 106 current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count |
113 data_frames.append(current_sample_df) | 107 data_frames.append(current_sample_df) |
114 excel_df = pandas.concat(data_frames) | 108 output_df = pandas.concat(data_frames) |
115 excel_file_name = "output.xlsx" | 109 output_df.to_csv(output_file, sep='\t', quoting=csv.QUOTE_NONE, escapechar='\\') |
116 writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter') | |
117 excel_df.to_excel(writer, sheet_name='Sheet1') | |
118 writer.save() | |
119 shutil.move(excel_file_name, output_file) | |
120 | 110 |
121 | 111 |
122 def process_idxstats_file(idxstats_file): | 112 def process_idxstats_file(idxstats_file): |
123 all_mapped_reads = 0 | 113 all_mapped_reads = 0 |
124 unmapped_reads = 0 | 114 unmapped_reads = 0 |
125 with open(idxstats_file, "r") as fh: | 115 with open(idxstats_file, "r") as fh: |
126 for i, line in enumerate(fh): | 116 for i, line in enumerate(fh): |
117 line = line.rstrip('\r\n') | |
127 items = line.split("\t") | 118 items = line.split("\t") |
128 if i == 0: | 119 if i == 0: |
129 # NC_002945.4 4349904 213570 4047 | 120 # NC_002945.4 4349904 213570 4047 |
130 all_mapped_reads = int(items[2]) | 121 all_mapped_reads = int(items[2]) |
131 elif i == 1: | 122 elif i == 1: |
141 with open(metrics_file, "r") as ifh: | 132 with open(metrics_file, "r") as ifh: |
142 for i, line in enumerate(ifh): | 133 for i, line in enumerate(ifh): |
143 if i == 0: | 134 if i == 0: |
144 # Skip comments. | 135 # Skip comments. |
145 continue | 136 continue |
137 line = line.rstrip('\r\n') | |
146 items = line.split("\t") | 138 items = line.split("\t") |
147 if i == 1: | 139 if i == 1: |
148 # MarkDuplicates 10.338671 98.74% | 140 # MarkDuplicates 10.338671 98.74% |
149 ref_with_coverage = items[3] | 141 ref_with_coverage = items[3] |
150 avg_depth_of_coverage = items[2] | 142 avg_depth_of_coverage = items[2] |