Mercurial > repos > jjohnson > mothur_toolsuite
comparison mothur/tools/mothur/mothur_wrapper.py @ 15:a6189f58fedb
Mothur - updated for Mothur version 1.22.0
author | Jim Johnson <jj@umn.edu> |
---|---|
date | Tue, 08 Nov 2011 11:45:32 -0600 |
parents | 4f797d3eee3a |
children | 697156806162 |
comparison
equal
deleted
inserted
replaced
14:ee59e5cff3ba | 15:a6189f58fedb |
---|---|
2 | 2 |
3 """ | 3 """ |
4 http://www.mothur.org/ | 4 http://www.mothur.org/ |
5 | 5 |
6 Supports mothur version | 6 Supports mothur version |
7 mothur v.1.20.0 | 7 mothur v.1.22.0 |
8 | 8 |
9 Class encapsulating Mothur galaxy tool. | 9 Class encapsulating Mothur galaxy tool. |
10 Expect each invocation to include: | 10 Expect each invocation to include: |
11 Here is an example call to this script with an explanation before each param : | 11 Here is an example call to this script with an explanation before each param : |
12 mothur_wrapper.py | 12 mothur_wrapper.py |
184 #catchall | 184 #catchall |
185 cmd_dict['chimera.bellerophon'] = dict({'required' : ['fasta'], 'optional' : ['filter','correction','window','increment','processors']}) | 185 cmd_dict['chimera.bellerophon'] = dict({'required' : ['fasta'], 'optional' : ['filter','correction','window','increment','processors']}) |
186 cmd_dict['chimera.ccode'] = dict({'required' : ['fasta','reference'], 'optional' : ['filter','mask','window','numwanted','save','processors']}) | 186 cmd_dict['chimera.ccode'] = dict({'required' : ['fasta','reference'], 'optional' : ['filter','mask','window','numwanted','save','processors']}) |
187 cmd_dict['chimera.check'] = dict({'required' : ['fasta','reference'], 'optional' : ['ksize','svg','name','increment','save','processors']}) | 187 cmd_dict['chimera.check'] = dict({'required' : ['fasta','reference'], 'optional' : ['ksize','svg','name','increment','save','processors']}) |
188 cmd_dict['chimera.pintail'] = dict({'required' : ['fasta','reference'], 'optional' : ['conservation','quantile','filter','mask','window','increment','save','processors']}) | 188 cmd_dict['chimera.pintail'] = dict({'required' : ['fasta','reference'], 'optional' : ['conservation','quantile','filter','mask','window','increment','save','processors']}) |
189 cmd_dict['chimera.slayer'] = dict({'required' : ['fasta','reference'], 'optional' : ['name','search','window','increment','match','mismatch','numwanted','parents','minsim','mincov','iters','minbs','minsnp','divergence','realign','split','blastlocation','save','processors']}) | 189 cmd_dict['chimera.slayer'] = dict({'required' : ['fasta','reference'], 'optional' : ['name','group','search','window','increment','match','mismatch','numwanted','parents','minsim','mincov','iters','minbs','minsnp','divergence','realign','split','blastlocation','save','processors']}) |
190 cmd_dict['chimera.uchime'] = dict({'required' : ['fasta'], 'optional' : ['name','reference','abskew','chimealns','minh','mindiv','xn','dn','xa','chunks','minchunk','idsmoothwindow','minsmoothid','maxp','skipgaps','skipgaps2','minlen','maxlen','ucl','queryfract','processors']}) | 190 cmd_dict['chimera.uchime'] = dict({'required' : ['fasta'], 'optional' : ['name','group','reference','abskew','chimealns','minh','mindiv','xn','dn','xa','chunks','minchunk','idsmoothwindow','minsmoothid','maxp','skipgaps','skipgaps2','minlen','maxlen','ucl','queryfract','processors']}) |
191 cmd_dict['chop.seqs'] = dict({'required' : ['fasta','numbases'], 'optional' : ['countgaps','keep','short']}) | 191 cmd_dict['chop.seqs'] = dict({'required' : ['fasta','numbases'], 'optional' : ['countgaps','keep','short']}) |
192 cmd_dict['classify.otu'] = dict({'required' : ['list','taxonomy'],'optional' : ['name','cutoff','label','group','probs','basis','reftaxonomy']}) | 192 cmd_dict['classify.otu'] = dict({'required' : ['list','taxonomy'],'optional' : ['name','cutoff','label','group','probs','basis','reftaxonomy']}) |
193 cmd_dict['classify.seqs'] = dict({'required' : ['fasta','reference','taxonomy'],'optional' : ['name','search','ksize','method','match','mismatch','gapopen','gapextend','numwanted','probs','save','processors']}) | 193 cmd_dict['classify.seqs'] = dict({'required' : ['fasta','reference','taxonomy'],'optional' : ['name','search','ksize','method','match','mismatch','gapopen','gapextend','numwanted','probs','save','processors']}) |
194 #clear.memory ## not needed in galaxy framework | 194 #clear.memory ## not needed in galaxy framework |
195 cmd_dict['clearcut'] = dict({'required' : [['phylip','fasta']],'optional' : ['seed','norandom','shuffle','neighbor','expblen','expdist','ntrees','matrixout','kimura','jukes','protein','DNA']}) | 195 cmd_dict['clearcut'] = dict({'required' : [['phylip','fasta']],'optional' : ['seed','norandom','shuffle','neighbor','expblen','expdist','ntrees','matrixout','kimura','jukes','protein','DNA']}) |
199 cmd_dict['cluster.split'] = dict({'required' : [['fasta','phylip','column']] , 'optional' : ['name','method','splitmethod','taxonomy','taxlevel','showabund','cutoff','hard','large','precision','timing','processors']}) | 199 cmd_dict['cluster.split'] = dict({'required' : [['fasta','phylip','column']] , 'optional' : ['name','method','splitmethod','taxonomy','taxlevel','showabund','cutoff','hard','large','precision','timing','processors']}) |
200 cmd_dict['collect.shared'] = dict({'required' : ['shared'], 'optional' : ['calc','label','freq','groups','all']}) | 200 cmd_dict['collect.shared'] = dict({'required' : ['shared'], 'optional' : ['calc','label','freq','groups','all']}) |
201 cmd_dict['collect.single'] = dict({'required' : [['list', 'sabund', 'rabund', 'shared']], 'optional' : ['calc','abund','size','label','freq']}) | 201 cmd_dict['collect.single'] = dict({'required' : [['list', 'sabund', 'rabund', 'shared']], 'optional' : ['calc','abund','size','label','freq']}) |
202 cmd_dict['consensus.seqs'] = dict({'required' : ['fasta'], 'optional' : ['list','name','label','cutoff']}) | 202 cmd_dict['consensus.seqs'] = dict({'required' : ['fasta'], 'optional' : ['list','name','label','cutoff']}) |
203 cmd_dict['corr.axes'] = dict({'required' : [['shared','relabund','metadata'],'axes'], 'optional' : ['label','groups','method','numaxes']}) | 203 cmd_dict['corr.axes'] = dict({'required' : [['shared','relabund','metadata'],'axes'], 'optional' : ['label','groups','method','numaxes']}) |
204 cmd_dict['count.groups'] = dict({'required' : ['group','shared'], 'optional' : ['accnos','groups']}) | |
204 cmd_dict['count.seqs'] = dict({'required' : ['name'], 'optional' : ['group','groups']}) | 205 cmd_dict['count.seqs'] = dict({'required' : ['name'], 'optional' : ['group','groups']}) |
205 cmd_dict['degap.seqs'] = dict({'required' : ['fasta']}) | 206 cmd_dict['degap.seqs'] = dict({'required' : ['fasta']}) |
206 cmd_dict['deunique.seqs'] = dict({'required' : ['fasta','name'], 'optional' : []}) | 207 cmd_dict['deunique.seqs'] = dict({'required' : ['fasta','name'], 'optional' : []}) |
207 cmd_dict['deunique.tree'] = dict({'required' : ['tree','name'], 'optional' : []}) | 208 cmd_dict['deunique.tree'] = dict({'required' : ['tree','name'], 'optional' : []}) |
208 cmd_dict['dist.seqs'] = dict({'required' : ['fasta'], 'optional' : ['calc','countends','output','cutoff','oldfasta','column','processors']}) | 209 cmd_dict['dist.seqs'] = dict({'required' : ['fasta'], 'optional' : ['calc','countends','output','cutoff','oldfasta','column','processors']}) |
227 cmd_dict['indicator'] = dict({'required' : [['tree','design'],['shared','relabund']], 'optional' : ['groups','label','processors']}) | 228 cmd_dict['indicator'] = dict({'required' : [['tree','design'],['shared','relabund']], 'optional' : ['groups','label','processors']}) |
228 cmd_dict['libshuff'] = dict({'required' : ['phylip','group'],'optional' : ['groups','iters','form','sim','step','cutoff']}) | 229 cmd_dict['libshuff'] = dict({'required' : ['phylip','group'],'optional' : ['groups','iters','form','sim','step','cutoff']}) |
229 cmd_dict['list.seqs'] = dict({'required' : [['fasta','name','group','list','alignreport','taxonomy']]}) | 230 cmd_dict['list.seqs'] = dict({'required' : [['fasta','name','group','list','alignreport','taxonomy']]}) |
230 cmd_dict['make.fastq'] = dict({'required' : ['fasta','qfile'] , 'optional' : []}) | 231 cmd_dict['make.fastq'] = dict({'required' : ['fasta','qfile'] , 'optional' : []}) |
231 cmd_dict['make.group'] = dict({'required' : ['fasta','groups'], 'optional' : []}) | 232 cmd_dict['make.group'] = dict({'required' : ['fasta','groups'], 'optional' : []}) |
232 cmd_dict['make.shared'] = dict({'required' : ['list','group'], 'optional' : ['label','groups','ordergroup']}) | 233 cmd_dict['make.shared'] = dict({'required' : ['list','group'], 'optional' : ['label','groups']}) |
233 cmd_dict['mantel'] = dict({'required' : ['phylip','phylip2'] , 'optional' : ['method','iters']}) | 234 cmd_dict['mantel'] = dict({'required' : ['phylip','phylip2'] , 'optional' : ['method','iters']}) |
234 cmd_dict['merge.files'] = dict({'required' : ['input','output']}) | 235 cmd_dict['merge.files'] = dict({'required' : ['input','output']}) |
235 cmd_dict['merge.groups'] = dict({'required' : ['shared','design'], 'optional' : ['groups', 'label']}) | 236 cmd_dict['merge.groups'] = dict({'required' : ['shared','design'], 'optional' : ['groups', 'label']}) |
236 cmd_dict['metastats'] = dict({'required' : ['shared','design'], 'optional' : ['groups', 'label','iters','threshold','sets','processors']}) | 237 cmd_dict['metastats'] = dict({'required' : ['shared','design'], 'optional' : ['groups', 'label','iters','threshold','sets','processors']}) |
237 cmd_dict['nmds'] = dict({'required' : ['phylip'], 'optional' : ['axes','mindim','maxdim','iters','maxiters','epsilon']}) | 238 cmd_dict['nmds'] = dict({'required' : ['phylip'], 'optional' : ['axes','mindim','maxdim','iters','maxiters','epsilon']}) |
242 cmd_dict['parsimony'] = dict({'required' : ['tree'], 'optional' : ['group','groups','name','iters','random','processors']}) | 243 cmd_dict['parsimony'] = dict({'required' : ['tree'], 'optional' : ['group','groups','name','iters','random','processors']}) |
243 cmd_dict['pca'] = dict({'required' : [['shared','relabund']], 'optional' : ['label','groups','metric']}) | 244 cmd_dict['pca'] = dict({'required' : [['shared','relabund']], 'optional' : ['label','groups','metric']}) |
244 cmd_dict['pcoa'] = dict({'required' : ['phylip'], 'optional' : ['metric']}) | 245 cmd_dict['pcoa'] = dict({'required' : ['phylip'], 'optional' : ['metric']}) |
245 cmd_dict['phylo.diversity'] = dict({'required' : ['tree'],'optional' : ['group','name','groups','iters','freq','scale','rarefy','collect','summary','processors']}) | 246 cmd_dict['phylo.diversity'] = dict({'required' : ['tree'],'optional' : ['group','name','groups','iters','freq','scale','rarefy','collect','summary','processors']}) |
246 cmd_dict['phylotype'] = dict({'required' : ['taxonomy'],'optional' : ['name','cutoff','label']}) | 247 cmd_dict['phylotype'] = dict({'required' : ['taxonomy'],'optional' : ['name','cutoff','label']}) |
247 cmd_dict['pre.cluster'] = dict({'required' : ['fasta'], 'optional' : ['name','diffs']}) | 248 cmd_dict['pre.cluster'] = dict({'required' : ['fasta'], 'optional' : ['name','diffs','group']}) |
248 cmd_dict['rarefaction.shared'] = dict({'required' : ['shared'], 'optional' : ['calc','label','iters','groups','jumble']}) | 249 cmd_dict['rarefaction.shared'] = dict({'required' : ['shared'], 'optional' : ['calc','label','iters','groups','jumble']}) |
249 cmd_dict['rarefaction.single'] = dict({'required' : [['list', 'sabund', 'rabund', 'shared']], 'optional' : ['calc','abund','iters','label','freq','processors']}) | 250 cmd_dict['rarefaction.single'] = dict({'required' : [['list', 'sabund', 'rabund', 'shared']], 'optional' : ['calc','abund','iters','label','freq','processors']}) |
250 cmd_dict['remove.groups'] = dict({'required' : ['group'], 'optional' : ['groups','accnos','fasta','name','list','shared','taxonomy']}) | 251 cmd_dict['remove.groups'] = dict({'required' : ['group'], 'optional' : ['groups','accnos','fasta','name','list','shared','taxonomy']}) |
251 cmd_dict['remove.lineage'] = dict({'required' : ['taxonomy','taxon'],'optional' : ['fasta','name','group','list','alignreport','dups']}) | 252 cmd_dict['remove.lineage'] = dict({'required' : ['taxonomy','taxon'],'optional' : ['fasta','name','group','list','alignreport','dups']}) |
252 cmd_dict['remove.otus'] = dict({'required' : ['group','list','label'], 'optional' : ['groups','accnos']}) | 253 cmd_dict['remove.otus'] = dict({'required' : ['group','list','label'], 'optional' : ['groups','accnos']}) |
253 cmd_dict['remove.rare'] = dict({'required' : [['list','sabund','rabund','shared'],'nseqs'], 'optional' : ['group','groups','label','bygroup']}) | 254 cmd_dict['remove.rare'] = dict({'required' : [['list','sabund','rabund','shared'],'nseqs'], 'optional' : ['group','groups','label','bygroup']}) |
254 cmd_dict['remove.seqs'] = dict({'required' : ['accnos',['fasta','qfile','name','group','list','alignreport','taxonomy']], 'optional' : ['dups']}) | 255 cmd_dict['remove.seqs'] = dict({'required' : ['accnos',['fasta','qfile','name','group','list','alignreport','taxonomy']], 'optional' : ['dups']}) |
255 cmd_dict['reverse.seqs'] = dict({'required' : ['fasta']}) | 256 cmd_dict['reverse.seqs'] = dict({'required' : ['fasta']}) |
256 cmd_dict['screen.seqs'] = dict({'required' : ['fasta'], 'optional' : ['start','end','maxambig','maxhomop','minlength','maxlength','criteria','optimize','name','group','alignreport','processors']}) | 257 cmd_dict['screen.seqs'] = dict({'required' : ['fasta'], 'optional' : ['start','end','maxambig','maxhomop','minlength','maxlength','criteria','optimize','name','group','alignreport','processors']}) |
257 cmd_dict['sens.spec'] = dict({'required' : ['list',['column','phylip']] , 'optional' : ['label','cutoff','hard','precision']}) | 258 cmd_dict['sens.spec'] = dict({'required' : ['list',['column','phylip']] , 'optional' : ['label','cutoff','hard','precision']}) |
259 | |
260 cmd_dict['seq.error'] = dict({'required' : ['fasta','reference'] , 'optional' : ['name','qfile','report','ignorechimeras','threshold','processors']}) | |
261 | |
258 cmd_dict['sffinfo'] = dict({'required' : [['sff','sfftxt']], 'optional' : ['fasta','qfile','trim','sfftxt','flow','accnos']}) | 262 cmd_dict['sffinfo'] = dict({'required' : [['sff','sfftxt']], 'optional' : ['fasta','qfile','trim','sfftxt','flow','accnos']}) |
263 | |
264 cmd_dict['shhh.flows'] = dict({'required' : [['flow','files']], 'optional' : ['lookup','maxiter','mindelta','cutoff','sigma','order','processors']}) | |
265 | |
259 cmd_dict['split.abund'] = dict({'required' : ['fasta',['name','list']], 'optional' : ['cutoff','group','groups','label','accnos']}) | 266 cmd_dict['split.abund'] = dict({'required' : ['fasta',['name','list']], 'optional' : ['cutoff','group','groups','label','accnos']}) |
260 cmd_dict['split.groups'] = dict({'required' : ['fasta','group'], 'optional' : ['name','groups']}) | 267 cmd_dict['split.groups'] = dict({'required' : ['fasta','group'], 'optional' : ['name','groups']}) |
261 cmd_dict['sub.sample'] = dict({'required' : [['fasta','list','sabund','rabund','shared']], 'optional' : ['name','group','groups','label','size','persample']}) | 268 cmd_dict['sub.sample'] = dict({'required' : [['fasta','list','sabund','rabund','shared']], 'optional' : ['name','group','groups','label','size','persample']}) |
262 cmd_dict['summary.seqs'] = dict({'required' : ['fasta'], 'optional' : ['name','processors']}) | 269 cmd_dict['summary.seqs'] = dict({'required' : ['fasta'], 'optional' : ['name','processors']}) |
263 cmd_dict['summary.shared'] = dict({'required' : ['shared'], 'optional' : ['calc','label','groups','all','distance','processors']}) | 270 cmd_dict['summary.shared'] = dict({'required' : ['shared'], 'optional' : ['calc','label','groups','all','distance','processors']}) |
264 cmd_dict['summary.single'] = dict({'required' : [['list','sabund','rabund','shared']], 'optional' : ['calc','abund','size','label','groupmode']}) | 271 cmd_dict['summary.single'] = dict({'required' : [['list','sabund','rabund','shared']], 'optional' : ['calc','abund','size','label','groupmode']}) |
272 cmd_dict['summary.tax'] = dict({'required' : ['taxonomy'], 'optional' : ['name','group','reftaxonomy']}) | |
265 cmd_dict['tree.shared'] = dict({'required' : [['shared','phylip','column']], 'optional' : ['name','groups','calc','cutoff','precision','label']}) | 273 cmd_dict['tree.shared'] = dict({'required' : [['shared','phylip','column']], 'optional' : ['name','groups','calc','cutoff','precision','label']}) |
274 cmd_dict['trim.flows'] = dict({'required' : ['flow'], 'optional' : ['oligos','bdiffs','pdiffs','tdiffs','minflows','maxflows','fasta','signal','noise','maxhomop','order','processors']}) | |
266 cmd_dict['trim.seqs'] = dict({'required' : ['fasta'], 'optional' : ['name','group','oligos','qfile','qaverage','qthreshold','qwindowaverage','qwindowsize','rollaverage','qstepsize','qtrim','flip','maxambig','maxhomop','minlength','maxlength','bdiffs','pdiffs','tdiffs','allfiles','keepfirst','removelast','processors']}) | 275 cmd_dict['trim.seqs'] = dict({'required' : ['fasta'], 'optional' : ['name','group','oligos','qfile','qaverage','qthreshold','qwindowaverage','qwindowsize','rollaverage','qstepsize','qtrim','flip','maxambig','maxhomop','minlength','maxlength','bdiffs','pdiffs','tdiffs','allfiles','keepfirst','removelast','processors']}) |
267 cmd_dict['unifrac.unweighted'] = dict({'required' : ['tree'], 'optional' : ['name','group','groups','iters','distance','random','root','processors']}) | 276 cmd_dict['unifrac.unweighted'] = dict({'required' : ['tree'], 'optional' : ['name','group','groups','iters','distance','random','root','processors']}) |
268 cmd_dict['unifrac.weighted'] = dict({'required' : ['tree'], 'optional' : ['name','group','groups','iters','distance','random','root','processors']}) | 277 cmd_dict['unifrac.weighted'] = dict({'required' : ['tree'], 'optional' : ['name','group','groups','iters','distance','random','root','processors']}) |
269 cmd_dict['unique.seqs'] = dict({'required' : ['fasta'], 'optional' : ['name']}) | 278 cmd_dict['unique.seqs'] = dict({'required' : ['fasta'], 'optional' : ['name']}) |
270 cmd_dict['venn'] = dict({'required' : [['list','shared']], 'optional' : ['calc','label','groups','abund','nseqs','permute']}) | 279 cmd_dict['venn'] = dict({'required' : [['list','shared']], 'optional' : ['calc','label','groups','abund','nseqs','permute']}) |
314 parser.add_option( '--qwindowaverage', dest='qwindowaverage', type="int", help='Remove sequences that have a window average quality below the value' ) | 323 parser.add_option( '--qwindowaverage', dest='qwindowaverage', type="int", help='Remove sequences that have a window average quality below the value' ) |
315 parser.add_option( '--qwindowsize', dest='qwindowsize', type="int", help='WIndow size for qwindowaverage' ) | 324 parser.add_option( '--qwindowsize', dest='qwindowsize', type="int", help='WIndow size for qwindowaverage' ) |
316 parser.add_option( '--rollaverage', dest='rollaverage', type="int", help='Remove sequences that have a average quality below the value in a rolling window' ) | 325 parser.add_option( '--rollaverage', dest='rollaverage', type="int", help='Remove sequences that have a average quality below the value in a rolling window' ) |
317 parser.add_option( '--qstepsize', dest='qstepsize', type="int", help='Distance to move a rolling window for each step' ) | 326 parser.add_option( '--qstepsize', dest='qstepsize', type="int", help='Distance to move a rolling window for each step' ) |
318 parser.add_option( '--qtrim', dest='qtrim', help='For sequence below qthreshold, false to scrap file, true to trimmed and in trim file' ) | 327 parser.add_option( '--qtrim', dest='qtrim', help='For sequence below qthreshold, false to scrap file, true to trimmed and in trim file' ) |
328 parser.add_option( '--ignorechimeras', dest='ignorechimeras', help='ignorechimeras' ) | |
319 parser.add_option( '--flip', dest='flip', help='If true, reverse complement the sequences' ) | 329 parser.add_option( '--flip', dest='flip', help='If true, reverse complement the sequences' ) |
320 parser.add_option( '--maxambig', dest='maxambig', type="int", help='Number of ambiguous base calls to allow' ) | 330 parser.add_option( '--maxambig', dest='maxambig', type="int", help='Number of ambiguous base calls to allow' ) |
321 parser.add_option( '--maxhomop', dest='maxhomop', type="int", help='Maximun homopolymer length allowed' ) | 331 parser.add_option( '--maxhomop', dest='maxhomop', type="int", help='Maximun homopolymer length allowed' ) |
322 parser.add_option( '--minlength', dest='minlength', type="int", help='Minimun sequence length' ) | 332 parser.add_option( '--minlength', dest='minlength', type="int", help='Minimun sequence length' ) |
323 parser.add_option( '--maxlength', dest='maxlength', type="int", help='Maximun sequence length' ) | 333 parser.add_option( '--maxlength', dest='maxlength', type="int", help='Maximun sequence length' ) |
331 parser.add_option( '--accnos', dest='accnos', help='A file containing a list of names' ) | 341 parser.add_option( '--accnos', dest='accnos', help='A file containing a list of names' ) |
332 parser.add_option( '--groups', dest='groups', help='pairwise group labels' ) | 342 parser.add_option( '--groups', dest='groups', help='pairwise group labels' ) |
333 parser.add_option( '--group', dest='group', help='A file containing a list of names' ) | 343 parser.add_option( '--group', dest='group', help='A file containing a list of names' ) |
334 parser.add_option( '--list', dest='list', help='A file containing a list of names' ) | 344 parser.add_option( '--list', dest='list', help='A file containing a list of names' ) |
335 parser.add_option( '--alignreport', dest='alignreport', help='A align.report file ' ) | 345 parser.add_option( '--alignreport', dest='alignreport', help='A align.report file ' ) |
346 parser.add_option( '--report', dest='report', help='' ) | |
336 parser.add_option( '--taxonomy', dest='taxonomy', help='A Taxonomy file' ) | 347 parser.add_option( '--taxonomy', dest='taxonomy', help='A Taxonomy file' ) |
337 parser.add_option( '--reftaxonomy', dest='reftaxonomy', help='A Taxonomy file' ) | 348 parser.add_option( '--reftaxonomy', dest='reftaxonomy', help='A Taxonomy file' ) |
338 parser.add_option( '--taxon', dest='taxon', help='A Taxon' ) | 349 parser.add_option( '--taxon', dest='taxon', help='A Taxon' ) |
339 parser.add_option( '--taxlevel', dest='taxlevel', type="int", help='A Taxonomy level' ) | 350 parser.add_option( '--taxlevel', dest='taxlevel', type="int", help='A Taxonomy level' ) |
340 # parser.add_option( '--taxon', dest='taxon', action="callback", callback=remove_confidence_callback, help='A Taxon' ) | 351 # parser.add_option( '--taxon', dest='taxon', action="callback", callback=remove_confidence_callback, help='A Taxon' ) |
381 parser.add_option( '--size', dest='size', type='int', help='Size - sample size' ) | 392 parser.add_option( '--size', dest='size', type='int', help='Size - sample size' ) |
382 parser.add_option( '--groupmode', dest='groupmode', help='Collate groups into one result table' ) | 393 parser.add_option( '--groupmode', dest='groupmode', help='Collate groups into one result table' ) |
383 parser.add_option( '--all', dest='all', help='Calculate for all' ) | 394 parser.add_option( '--all', dest='all', help='Calculate for all' ) |
384 parser.add_option( '--freq', dest='freq', type="float", help='Frequency of sequences to choose, as fraction is 0.0 - 1.0 or iteration if int > 1' ) | 395 parser.add_option( '--freq', dest='freq', type="float", help='Frequency of sequences to choose, as fraction is 0.0 - 1.0 or iteration if int > 1' ) |
385 parser.add_option( '--iters', dest='iters', type='int', help='Iterations of randomizations' ) | 396 parser.add_option( '--iters', dest='iters', type='int', help='Iterations of randomizations' ) |
397 parser.add_option( '--maxiter', dest='maxiter', type='int', help='Iterations' ) | |
386 parser.add_option( '--maxiters', dest='maxiters', type='int', help='Iterations of randomizations' ) | 398 parser.add_option( '--maxiters', dest='maxiters', type='int', help='Iterations of randomizations' ) |
387 parser.add_option( '--jumble', dest='jumble', help='If false, just a collector curve across the samples' ) | 399 parser.add_option( '--jumble', dest='jumble', help='If false, just a collector curve across the samples' ) |
388 parser.add_option( '--conservation', dest='conservation', help='Template frequency information' ) | 400 parser.add_option( '--conservation', dest='conservation', help='Template frequency information' ) |
389 parser.add_option( '--quantile', dest='quantile', help='Template quantile information' ) | 401 parser.add_option( '--quantile', dest='quantile', help='Template quantile information' ) |
390 parser.add_option( '--parents', dest='parents', type='int', help='Number of Parents to investigate' ) | 402 parser.add_option( '--parents', dest='parents', type='int', help='Number of Parents to investigate' ) |
398 parser.add_option( '--divergence', dest='divergence', type='float', help='Divergence cutoff for chimera determination' ) | 410 parser.add_option( '--divergence', dest='divergence', type='float', help='Divergence cutoff for chimera determination' ) |
399 parser.add_option( '--sff', dest='sff', help='Sff file' ) | 411 parser.add_option( '--sff', dest='sff', help='Sff file' ) |
400 parser.add_option( '--svg', dest='svg', help='SVG' ) | 412 parser.add_option( '--svg', dest='svg', help='SVG' ) |
401 parser.add_option( '--sfftxt', dest='sfftxt', help='Generate a sff.txt file' ) | 413 parser.add_option( '--sfftxt', dest='sfftxt', help='Generate a sff.txt file' ) |
402 parser.add_option( '--flow', dest='flow', help='Generate a flowgram file' ) | 414 parser.add_option( '--flow', dest='flow', help='Generate a flowgram file' ) |
415 parser.add_option( '--minflows', dest='minflows', type='int', help='the minimum number of flows that each sequence must contain' ) | |
416 parser.add_option( '--maxflows', dest='maxflows', type='int', help='the number of flows after which all other flows should be ignored.' ) | |
417 parser.add_option( '--signal', dest='signal', type='float', help='threshold for intensity to be signal' ) | |
418 parser.add_option( '--noise', dest='noise', type='float', help='threshold for intensity to be noise' ) | |
419 parser.add_option( '--mindelta', dest='mindelta', type='float', help='threshold for determining how much change in the flowgram correction' ) | |
420 parser.add_option( '--sigma', dest='sigma', type='float', help='sigma option is used to set the dispersion of the data in the expectation-maximization' ) | |
421 parser.add_option( '--order', dest='order', help='flow order e.g. TACG' ) | |
422 parser.add_option( '--lookup', dest='lookup', help='lookup file that are needed to run shhh.seqs' ) | |
423 | |
403 parser.add_option( '--trim', dest='trim', help='Whether sequences and quality scores are trimmed to the clipQualLeft and clipQualRight values' ) | 424 parser.add_option( '--trim', dest='trim', help='Whether sequences and quality scores are trimmed to the clipQualLeft and clipQualRight values' ) |
404 parser.add_option( '--input', dest='input', help='' ) | 425 parser.add_option( '--input', dest='input', help='' ) |
405 parser.add_option( '--phylip', dest='phylip', help='' ) | 426 parser.add_option( '--phylip', dest='phylip', help='' ) |
406 parser.add_option( '--phylip2', dest='phylip2', help='' ) | 427 parser.add_option( '--phylip2', dest='phylip2', help='' ) |
407 parser.add_option( '--column', dest='column', help='' ) | 428 parser.add_option( '--column', dest='column', help='' ) |