0
|
1 #!/usr/bin/env python
|
|
2
|
|
3 """
|
|
4 Runs BWA on single-end or paired-end data.
|
|
5 Produces a SAM file containing the mappings.
|
|
6 Works with BWA version 0.5.9.
|
|
7
|
|
8 usage: bwa_wrapper.py [options]
|
|
9
|
|
10 See below for options
|
|
11 """
|
|
12
|
|
13 import optparse, os, shutil, subprocess, sys, tempfile
|
|
14 import glob
|
|
15 import gzip, zipfile, tarfile
|
|
16
|
|
17 def stop_err( msg ):
|
|
18 sys.stderr.write( '%s\n' % msg )
|
|
19 sys.exit()
|
|
20
|
|
21 def check_is_double_encoded( fastq ):
|
|
22 # check that first read is bases, not one base followed by numbers
|
|
23 bases = [ 'A', 'C', 'G', 'T', 'a', 'c', 'g', 't', 'N' ]
|
|
24 nums = [ '0', '1', '2', '3' ]
|
|
25 for line in file( fastq, 'rb'):
|
|
26 if not line.strip() or line.startswith( '@' ):
|
|
27 continue
|
|
28 if len( [ b for b in line.strip() if b in nums ] ) > 0:
|
|
29 return False
|
|
30 elif line.strip()[0] in bases and len( [ b for b in line.strip() if b in bases ] ) == len( line.strip() ):
|
|
31 return True
|
|
32 else:
|
|
33 raise Exception, 'First line in first read does not appear to be a valid FASTQ read in either base-space or color-space'
|
|
34 raise Exception, 'There is no non-comment and non-blank line in your FASTQ file'
|
|
35
|
|
36 def __main__():
|
|
37 #Parse Command Line
|
|
38 parser = optparse.OptionParser()
|
|
39 parser.add_option( '-t', '--threads', dest='threads', help='The number of threads to use' )
|
|
40 parser.add_option( '-c', '--color-space', dest='color_space', action='store_true', help='If the input files are SOLiD format' )
|
|
41 parser.add_option( '-r', '--ref', dest='ref', help='The reference genome to use or index' )
|
|
42 parser.add_option( '-f', '--input1', dest='fastq', help='The (forward) fastq file to use for the mapping' )
|
|
43 parser.add_option( '-u', '--output', dest='output', help='The file to save the output (SAM format)' )
|
|
44 parser.add_option( '-p', '--params', dest='params', help='Parameter setting to use (pre_set or full)' )
|
|
45 parser.add_option( '-s', '--fileSource', dest='fileSource', help='Whether to use a previously indexed reference sequence or one form history (indexed or history)' )
|
|
46 parser.add_option( '-n', '--maxEditDist', dest='maxEditDist', help='Maximum edit distance if integer' )
|
|
47 parser.add_option( '-m', '--fracMissingAligns', dest='fracMissingAligns', help='Fraction of missing alignments given 2% uniform base error rate if fraction' )
|
|
48 parser.add_option( '-o', '--maxGapOpens', dest='maxGapOpens', help='Maximum number of gap opens' )
|
|
49 parser.add_option( '-e', '--maxGapExtens', dest='maxGapExtens', help='Maximum number of gap extensions' )
|
|
50 parser.add_option( '-d', '--disallowLongDel', dest='disallowLongDel', help='Disallow a long deletion within specified bps' )
|
|
51 parser.add_option( '-i', '--disallowIndel', dest='disallowIndel', help='Disallow indel within specified bps' )
|
|
52 parser.add_option( '-l', '--seed', dest='seed', help='Take the first specified subsequences' )
|
|
53 parser.add_option( '-k', '--maxEditDistSeed', dest='maxEditDistSeed', help='Maximum edit distance to the seed' )
|
|
54 parser.add_option( '-M', '--mismatchPenalty', dest='mismatchPenalty', help='Mismatch penalty' )
|
|
55 parser.add_option( '-O', '--gapOpenPenalty', dest='gapOpenPenalty', help='Gap open penalty' )
|
|
56 parser.add_option( '-E', '--gapExtensPenalty', dest='gapExtensPenalty', help='Gap extension penalty' )
|
|
57 parser.add_option( '-R', '--suboptAlign', dest='suboptAlign', default=None, help='Proceed with suboptimal alignments even if the top hit is a repeat' )
|
|
58 parser.add_option( '-N', '--noIterSearch', dest='noIterSearch', help='Disable iterative search' )
|
|
59 parser.add_option( '-T', '--outputTopN', dest='outputTopN', help='Maximum number of alignments to output in the XA tag for reads paired properly' )
|
|
60 parser.add_option( '', '--outputTopNDisc', dest='outputTopNDisc', help='Maximum number of alignments to output in the XA tag for disconcordant read pairs (excluding singletons)' )
|
|
61 parser.add_option( '-S', '--maxInsertSize', dest='maxInsertSize', help='Maximum insert size for a read pair to be considered mapped good' )
|
|
62 parser.add_option( '-P', '--maxOccurPairing', dest='maxOccurPairing', help='Maximum occurrences of a read for pairings' )
|
|
63 parser.add_option( '', '--rgid', dest='rgid', help='Read group identifier' )
|
|
64 parser.add_option( '', '--rgcn', dest='rgcn', help='Sequencing center that produced the read' )
|
|
65 parser.add_option( '', '--rgds', dest='rgds', help='Description' )
|
|
66 parser.add_option( '', '--rgdt', dest='rgdt', help='Date that run was produced (ISO8601 format date or date/time, like YYYY-MM-DD)' )
|
|
67 parser.add_option( '', '--rgfo', dest='rgfo', help='Flow order' )
|
|
68 parser.add_option( '', '--rgks', dest='rgks', help='The array of nucleotide bases that correspond to the key sequence of each read' )
|
|
69 parser.add_option( '', '--rglb', dest='rglb', help='Library name' )
|
|
70 parser.add_option( '', '--rgpg', dest='rgpg', help='Programs used for processing the read group' )
|
|
71 parser.add_option( '', '--rgpi', dest='rgpi', help='Predicted median insert size' )
|
|
72 parser.add_option( '', '--rgpl', dest='rgpl', choices=[ 'CAPILLARY', 'LS454', 'ILLUMINA', 'SOLID', 'HELICOS', 'IONTORRENT' and 'PACBIO' ], help='Platform/technology used to produce the reads' )
|
|
73 parser.add_option( '', '--rgpu', dest='rgpu', help='Platform unit (e.g. flowcell-barcode.lane for Illumina or slide for SOLiD)' )
|
|
74 parser.add_option( '', '--rgsm', dest='rgsm', help='Sample' )
|
|
75 parser.add_option( '-D', '--dbkey', dest='dbkey', help='Dbkey for reference genome' )
|
|
76 parser.add_option( '-X', '--do_not_build_index', dest='do_not_build_index', action='store_true', help="Don't build index" )
|
|
77 parser.add_option( '-H', '--suppressHeader', dest='suppressHeader', help='Suppress header' )
|
|
78 parser.add_option( '-I', '--illumina1.3', dest='illumina13qual', help='Input FASTQ files have Illuina 1.3 quality scores' )
|
|
79 (options, args) = parser.parse_args()
|
|
80
|
|
81 tmp_input_dir = tempfile.mkdtemp()
|
|
82 tmp_output_dir= tempfile.mkdtemp()
|
|
83
|
|
84
|
|
85 myarchive = zipfile.ZipFile(options.fastq, 'r', allowZip64=True)
|
|
86 myarchive.extractall(tmp_input_dir)
|
|
87
|
|
88 for fastq in glob.glob(tmp_input_dir+'/*'):
|
|
89
|
|
90 sam_output_file=tmp_output_dir+'/'+os.path.splitext(os.path.basename(fastq))[0]+'.sam'
|
|
91 create_sam=open(sam_output_file, "w")
|
|
92 create_sam.close()
|
|
93
|
|
94 # output version # of tool
|
|
95 try:
|
|
96 tmp = tempfile.NamedTemporaryFile().name
|
|
97 tmp_stdout = open( tmp, 'wb' )
|
|
98 proc = subprocess.Popen( args='bwa 2>&1', shell=True, stdout=tmp_stdout )
|
|
99 tmp_stdout.close()
|
|
100 returncode = proc.wait()
|
|
101 stdout = None
|
|
102 for line in open( tmp_stdout.name, 'rb' ):
|
|
103 if line.lower().find( 'version' ) >= 0:
|
|
104 stdout = line.strip()
|
|
105 break
|
|
106 if stdout:
|
|
107 sys.stdout.write( 'BWA %s\n' % stdout )
|
|
108 else:
|
|
109 raise Exception
|
|
110 except:
|
|
111 sys.stdout.write( 'Could not determine BWA version\n' )
|
|
112
|
|
113 # check for color space fastq that's not double-encoded and exit if appropriate
|
|
114 if options.color_space:
|
|
115 if not check_is_double_encoded( options.fastq ):
|
|
116 stop_err( 'Your file must be double-encoded (it must be converted from "numbers" to "bases"). See the help section for details' )
|
|
117 #if options.genAlignType == 'paired':
|
|
118 #if not check_is_double_encoded( options.rfastq ):
|
|
119 #stop_err( 'Your reverse reads file must also be double-encoded (it must be converted from "numbers" to "bases"). See the help section for details' )
|
|
120
|
|
121 #fastq = options.fastq
|
|
122 #if options.rfastq:
|
|
123 #rfastq = options.rfastq
|
|
124
|
|
125 # set color space variable
|
|
126 if options.color_space:
|
|
127 color_space = '-c'
|
|
128 else:
|
|
129 color_space = ''
|
|
130
|
|
131 # make temp directory for placement of indices
|
|
132 tmp_index_dir = tempfile.mkdtemp()
|
|
133 tmp_dir = tempfile.mkdtemp()
|
|
134 # index if necessary
|
|
135 if options.fileSource == 'history' and not options.do_not_build_index:
|
|
136 ref_file = tempfile.NamedTemporaryFile( dir=tmp_index_dir )
|
|
137 ref_file_name = ref_file.name
|
|
138 ref_file.close()
|
|
139 os.symlink( options.ref, ref_file_name )
|
|
140 # determine which indexing algorithm to use, based on size
|
|
141 try:
|
|
142 size = os.stat( options.ref ).st_size
|
|
143 if size <= 2**30:
|
|
144 indexingAlg = 'is'
|
|
145 else:
|
|
146 indexingAlg = 'bwtsw'
|
|
147 except:
|
|
148 indexingAlg = 'is'
|
|
149 indexing_cmds = '%s -a %s' % ( color_space, indexingAlg )
|
|
150 cmd1 = 'bwa index %s %s' % ( indexing_cmds, ref_file_name )
|
|
151 try:
|
|
152 tmp = tempfile.NamedTemporaryFile( dir=tmp_index_dir ).name
|
|
153 tmp_stderr = open( tmp, 'wb' )
|
|
154 proc = subprocess.Popen( args=cmd1, shell=True, cwd=tmp_index_dir, stderr=tmp_stderr.fileno() )
|
|
155 returncode = proc.wait()
|
|
156 tmp_stderr.close()
|
|
157 # get stderr, allowing for case where it's very large
|
|
158 tmp_stderr = open( tmp, 'rb' )
|
|
159 stderr = ''
|
|
160 buffsize = 1048576
|
|
161 try:
|
|
162 while True:
|
|
163 stderr += tmp_stderr.read( buffsize )
|
|
164 if not stderr or len( stderr ) % buffsize != 0:
|
|
165 break
|
|
166 except OverflowError:
|
|
167 pass
|
|
168 tmp_stderr.close()
|
|
169 if returncode != 0:
|
|
170 raise Exception, stderr
|
|
171 except Exception, e:
|
|
172 # clean up temp dirs
|
|
173 if os.path.exists( tmp_index_dir ):
|
|
174 shutil.rmtree( tmp_index_dir )
|
|
175 if os.path.exists( tmp_dir ):
|
|
176 shutil.rmtree( tmp_dir )
|
|
177 stop_err( 'Error indexing reference sequence. ' + str( e ) )
|
|
178 else:
|
|
179 ref_file_name = options.ref
|
|
180 if options.illumina13qual:
|
|
181 illumina_quals = "-I"
|
|
182 else:
|
|
183 illumina_quals = ""
|
|
184
|
|
185 # set up aligning and generate aligning command options
|
|
186 if options.params == 'pre_set':
|
|
187 aligning_cmds = '-t %s %s %s' % ( options.threads, color_space, illumina_quals )
|
|
188 gen_alignment_cmds = ''
|
|
189 else:
|
|
190 if options.maxEditDist != '0':
|
|
191 editDist = options.maxEditDist
|
|
192 else:
|
|
193 editDist = options.fracMissingAligns
|
|
194 if options.seed != '-1':
|
|
195 seed = '-l %s' % options.seed
|
|
196 else:
|
|
197 seed = ''
|
|
198 if options.suboptAlign:
|
|
199 suboptAlign = '-R "%s"' % ( options.suboptAlign )
|
|
200 else:
|
|
201 suboptAlign = ''
|
|
202 if options.noIterSearch == 'true':
|
|
203 noIterSearch = '-N'
|
|
204 else:
|
|
205 noIterSearch = ''
|
|
206 aligning_cmds = '-n %s -o %s -e %s -d %s -i %s %s -k %s -t %s -M %s -O %s -E %s %s %s %s %s' % \
|
|
207 ( editDist, options.maxGapOpens, options.maxGapExtens, options.disallowLongDel,
|
|
208 options.disallowIndel, seed, options.maxEditDistSeed, options.threads,
|
|
209 options.mismatchPenalty, options.gapOpenPenalty, options.gapExtensPenalty,
|
|
210 suboptAlign, noIterSearch, color_space, illumina_quals )
|
|
211 #if options.genAlignType == 'paired':
|
|
212 #gen_alignment_cmds = '-a %s -o %s' % ( options.maxInsertSize, options.maxOccurPairing )
|
|
213 #if options.outputTopNDisc:
|
|
214 #gen_alignment_cmds += ' -N %s' % options.outputTopNDisc
|
|
215
|
|
216 gen_alignment_cmds = ''
|
|
217 if options.rgid:
|
|
218 if not options.rglb or not options.rgpl or not options.rgsm:
|
|
219 stop_err( 'If you want to specify read groups, you must include the ID, LB, PL, and SM tags.' )
|
|
220 readGroup = '@RG\tID:%s\tLB:%s\tPL:%s\tSM:%s' % ( options.rgid, options.rglb, options.rgpl, options.rgsm )
|
|
221 if options.rgcn:
|
|
222 readGroup += '\tCN:%s' % options.rgcn
|
|
223 if options.rgds:
|
|
224 readGroup += '\tDS:%s' % options.rgds
|
|
225 if options.rgdt:
|
|
226 readGroup += '\tDT:%s' % options.rgdt
|
|
227 if options.rgfo:
|
|
228 readGroup += '\tFO:%s' % options.rgfo
|
|
229 if options.rgks:
|
|
230 readGroup += '\tKS:%s' % options.rgks
|
|
231 if options.rgpg:
|
|
232 readGroup += '\tPG:%s' % options.rgpg
|
|
233 if options.rgpi:
|
|
234 readGroup += '\tPI:%s' % options.rgpi
|
|
235 if options.rgpu:
|
|
236 readGroup += '\tPU:%s' % options.rgpu
|
|
237 gen_alignment_cmds += ' -r "%s"' % readGroup
|
|
238 if options.outputTopN:
|
|
239 gen_alignment_cmds += ' -n %s' % options.outputTopN
|
|
240 # set up output files
|
|
241 tmp_align_out = tempfile.NamedTemporaryFile( dir=tmp_dir )
|
|
242 tmp_align_out_name = tmp_align_out.name
|
|
243 tmp_align_out.close()
|
|
244 tmp_align_out2 = tempfile.NamedTemporaryFile( dir=tmp_dir )
|
|
245 tmp_align_out2_name = tmp_align_out2.name
|
|
246 tmp_align_out2.close()
|
|
247 # prepare actual aligning and generate aligning commands
|
|
248 cmd2 = 'bwa aln %s %s %s > %s' % ( aligning_cmds, ref_file_name, fastq, tmp_align_out_name )
|
|
249 cmd2b = ''
|
|
250 cmd3 = 'bwa samse %s %s %s %s >> %s' % ( gen_alignment_cmds, ref_file_name, tmp_align_out_name, fastq, sam_output_file )
|
|
251 # perform alignments
|
|
252 buffsize = 1048576
|
|
253 try:
|
|
254 # need to nest try-except in try-finally to handle 2.4
|
|
255 try:
|
|
256 # align
|
|
257 try:
|
|
258 tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
|
|
259 tmp_stderr = open( tmp, 'wb' )
|
|
260 proc = subprocess.Popen( args=cmd2, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
|
|
261 returncode = proc.wait()
|
|
262 tmp_stderr.close()
|
|
263 # get stderr, allowing for case where it's very large
|
|
264 tmp_stderr = open( tmp, 'rb' )
|
|
265 stderr = ''
|
|
266 try:
|
|
267 while True:
|
|
268 stderr += tmp_stderr.read( buffsize )
|
|
269 if not stderr or len( stderr ) % buffsize != 0:
|
|
270 break
|
|
271 except OverflowError:
|
|
272 pass
|
|
273 tmp_stderr.close()
|
|
274 if returncode != 0:
|
|
275 raise Exception, stderr
|
|
276 except Exception, e:
|
|
277 raise Exception, 'Error aligning sequence. ' + str( e )
|
|
278 # and again if paired data
|
|
279 try:
|
|
280 if cmd2b:
|
|
281 tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
|
|
282 tmp_stderr = open( tmp, 'wb' )
|
|
283 proc = subprocess.Popen( args=cmd2b, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
|
|
284 returncode = proc.wait()
|
|
285 tmp_stderr.close()
|
|
286 # get stderr, allowing for case where it's very large
|
|
287 tmp_stderr = open( tmp, 'rb' )
|
|
288 stderr = ''
|
|
289 try:
|
|
290 while True:
|
|
291 stderr += tmp_stderr.read( buffsize )
|
|
292 if not stderr or len( stderr ) % buffsize != 0:
|
|
293 break
|
|
294 except OverflowError:
|
|
295 pass
|
|
296 tmp_stderr.close()
|
|
297 if returncode != 0:
|
|
298 raise Exception, stderr
|
|
299 except Exception, e:
|
|
300 raise Exception, 'Error aligning second sequence. ' + str( e )
|
|
301 # generate align
|
|
302 try:
|
|
303 tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
|
|
304 tmp_stderr = open( tmp, 'wb' )
|
|
305 proc = subprocess.Popen( args=cmd3, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
|
|
306 returncode = proc.wait()
|
|
307 tmp_stderr.close()
|
|
308 # get stderr, allowing for case where it's very large
|
|
309 tmp_stderr = open( tmp, 'rb' )
|
|
310 stderr = ''
|
|
311 try:
|
|
312 while True:
|
|
313 stderr += tmp_stderr.read( buffsize )
|
|
314 if not stderr or len( stderr ) % buffsize != 0:
|
|
315 break
|
|
316 except OverflowError:
|
|
317 pass
|
|
318 tmp_stderr.close()
|
|
319 if returncode != 0:
|
|
320 raise Exception, stderr
|
|
321 except Exception, e:
|
|
322 raise Exception, 'Error generating alignments. ' + str( e )
|
|
323 # remove header if necessary
|
|
324 if options.suppressHeader == 'true':
|
|
325 tmp_out = tempfile.NamedTemporaryFile( dir=tmp_dir)
|
|
326 tmp_out_name = tmp_out.name
|
|
327 tmp_out.close()
|
|
328 try:
|
|
329 shutil.move( sam_output_file, tmp_out_name )
|
|
330 except Exception, e:
|
|
331 raise Exception, 'Error moving output file before removing headers. ' + str( e )
|
|
332 fout = file( sam_output_file, 'w' )
|
|
333 for line in file( tmp_out.name, 'r' ):
|
|
334 if not ( line.startswith( '@HD' ) or line.startswith( '@SQ' ) or line.startswith( '@RG' ) or line.startswith( '@PG' ) or line.startswith( '@CO' ) ):
|
|
335 fout.write( line )
|
|
336 fout.close()
|
|
337 # check that there are results in the output file
|
|
338 if os.path.getsize( sam_output_file ) > 0:
|
|
339 sys.stdout.write( 'BWA run on single-end data')
|
|
340 else:
|
|
341 raise Exception, 'The output file is empty. You may simply have no matches, or there may be an error with your input file or settings.'
|
|
342 except Exception, e:
|
|
343 stop_err( 'The alignment failed.\n' + str( e ) )
|
|
344 finally:
|
|
345 # clean up temp dir
|
|
346 if os.path.exists( tmp_index_dir ):
|
|
347 shutil.rmtree( tmp_index_dir )
|
|
348 if os.path.exists( tmp_dir ):
|
|
349 shutil.rmtree( tmp_dir )
|
|
350
|
|
351 # put all in an archive
|
|
352 mytotalzipfile=zipfile.ZipFile(options.output, 'w', allowZip64=True)
|
|
353 os.chdir(tmp_output_dir)
|
|
354 for samfile in glob.glob(tmp_output_dir+'/*'):
|
|
355 mytotalzipfile.write(os.path.basename(samfile))
|
|
356
|
|
357
|
|
358 if __name__=="__main__": __main__()
|