Next changeset 1:15245deda141 (2018-10-16) |
Commit message:
Uploaded |
added:
bcftools.py beagle.py logging_module.py model.py shapeit.py vcf_phase.py vcf_phase.xml vcf_reader_func.py |
b |
diff -r 000000000000 -r 3830d29fca6a bcftools.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/bcftools.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
@@ -0,0 +1,131 @@ +import os +import sys +import logging +import subprocess + +sys.path.insert(0, os.path.abspath(os.path.join(os.pardir,'jared'))) + +from vcf_reader_func import checkFormat + +def check_bcftools_for_errors (bcftools_stderr): + ''' + Checks the bgzip stderr for errors + + Parameters + ---------- + bcftools_stderr : str + bcftools stderr + + Raises + ------ + IOError + If bcftools stderr returns an error + ''' + + # Expand as errors are discovered + if bcftools_stderr: + logging.error(vcftools_stderr) + raise Exception(vcftools_stderr) + +def call_bcftools (bcftools_call_args): + + # bcftools subprocess call + bcftools_call = subprocess.Popen(['bcftools'] + list(map(str, bcftools_call_args)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + # Wait for bcftools to finish + bcftools_out, bcftools_err = bcftools_call.communicate() + + check_bcftools_for_errors(bcftools_err) + + logging.info('bcftools call complete') + +def check_for_index (filename): + + # Assign the file format + file_format = checkFormat(filename) + + # Check if the file to be indexed is a vcf.gz + if file_format == 'bgzip': + # Check if the index (.tbi) exists + if os.path.isfile(filename + '.tbi'): + return True + + # Check if the file to be indexed is a bcf + elif file_format == 'bcf': + # Check if the index (.csi) exists + if os.path.isfile(filename + '.csi'): + return True + + # Return false if no index is found + return False + +def create_index (filename): + + # Assign the file format + file_format = checkFormat(filename) + + # Check if the file to be indexed is a vcf.gz + if file_format == 'bgzip': + # Create a index (.tbi) + call_bcftools(['index', '-t', filename]) + + # Check if the file to be indexed is a bcf + elif file_format == 'bcf': + # Create a index (.csi) + call_bcftools(['index', '-c', filename]) + + # Report if file cannot be indexed + else: + raise Exception('Error creating index for: %s. Only .bcf and .vcf.gz (bgzip) files are supported.' % filename) + +def convert_to_bcf (filename, output_prefix): + + # Holds the arguments to convert to BCF format + convert_args = ['convert', '-O', 'b'] + + # Stores the specified output_prefix to the BCF file + bcf_output = '%s.bcf' % output_prefix + + # Assigns the output file to the arguments + convert_args.extend(['-o', bcf_output]) + + # Assigns the specified input to the arguments + convert_args.append(filename) + + # Call bcftools + call_bcftools(convert_args) + + +def convert_to_vcf (filename, output_prefix): + + # Holds the arguments to convert to VCF format + convert_args = ['view', '-O', 'v'] + + # Stores the specified output_prefix to the VCF file + vcf_output = '%s.vcf' % output_prefix + + # Assigns the output file to the arguments + convert_args.extend(['-o', vcf_output]) + + # Assigns the specified input to the arguments + convert_args.append(filename) + + # Call bcftools + call_bcftools(convert_args) + +def convert_to_vcfgz (filename, output_prefix): + + # Holds the arguments to convert to VCFGZ format + convert_args = ['view', '-O', 'z'] + + # Stores the specified output_prefix to the VCFGZ file + vcfgz_output = '%s.vcf.gz' % output_prefix + + # Assigns the output file to the arguments + convert_args.extend(['-o', vcfgz_output]) + + # Assigns the specified input to the arguments + convert_args.append(filename) + + # Call bcftools + call_bcftools(convert_args) |
b |
diff -r 000000000000 -r 3830d29fca6a beagle.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/beagle.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
@@ -0,0 +1,163 @@ +import os +import sys +import subprocess +import shutil +import argparse +import glob +import logging + +sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, 'jared'))) + +from vcf_reader_func import checkFormat +from logging_module import initLogger, logArgs +from vcftools import bgzip_decompress_vcfgz +from bcftools import convert_to_bcf, check_for_index, create_index + +def delete_beagle_log (output_prefix): + ''' + Delete beagle log file + + This function is used to delete beagle's log file if an error is + encountered. A warning is produced if the log file cannot be found. + + Parameters + ---------- + output_prefix : str + Output file prefix + ''' + + # Check that log file exists, if not return warning + if not os.path.isfile(output_prefix + '.log'): + logging.warning('beagle log file %s.log does not exist' % output_prefix) + else: + os.remove(output_prefix + '.log') + +def check_beagle_for_errors (beagle_stderr, output_prefix): + ''' + Checks the beagle stdout for errors + + Parameters + ---------- + beagle_stderr : str + beagle stderr + output_prefix : str + Output file prefix + + Raises + ------ + Exception + If beagle stdout returns an error + ''' + + # Check if beagle completed without an error + if not beagle_stderr.strip(): + pass + + # Print missing data message if that is likely + elif 'ERROR: genotype is missing allele separator:' in str(beagle_stderr): + # Delete the beagle log file + delete_beagle_log(output_prefix) + + # Store reported error + error_reported = 'ERROR: genotype is missing allele separator' + # Store message for user about error + user_message = 'Please confirm the input has no missing data.' + # Report on the error + raise Exception(error_reported + '\n' + user_message) + + # Print output for beagle if error is detected + elif 'ERROR:' in str(beagle_stderr): + # Delete the beagle log file + delete_beagle_log(output_prefix) + + # Splits log into list of lines + beagle_stderr_lines = beagle_stderr.splitlines() + # Prints the error(s) + raise Exception('\n'.join((output_line for output_line in beagle_stderr_lines if output_line.startswith('ERROR:')))) + + # Print output if not completed and no error found. Unlikely to be used, but included. + else: + # Delete the beagle log file + delete_beagle_log(output_prefix) + + raise Exception(beagle_stderr) + + +def standard_beagle_call (beagle_path, beagle_call_args, output_prefix): + ''' + Calls beagle using subprocess + + This function is used to call beagle under standard conditions. The + functions then passes the stderr to check_beagle_for_errors to check + for errors. + + Parameters + ---------- + beagle_path : str + Path to beagle.jar + beagle_call_args : list + Argument list for beagle + output_prefix : str + Output file prefix + ''' + + # Assign location of beagle jar file + beagle_jar = os.path.join(beagle_path, 'beagle.jar') + + # Check that beagle.jar exists + if not os.path.isfile(beagle_jar): + raise IOError('beagle.jar not found. Path specified: %s' % beagle_path) + + logging.info('beagle phasing parameters assigned') + + # Phasing subprocess call + phase_call = subprocess.Popen(['java', '-jar', beagle_jar] + beagle_call_args, stdout = subprocess.PIPE, stderr = subprocess.PIPE) + phase_stdout, phase_stderr = phase_call.communicate() + + # Check if code is running in python 3 + if sys.version_info[0] == 3: + # Convert bytes to string + phase_stderr = phase_stderr.decode() + + # Check beagle call for errors + check_beagle_for_errors(phase_stderr, output_prefix) + + logging.info('beagle phasing complete') + +def call_beagle (beagle_path, beagle_call_args, output_prefix, output_format): + ''' + Automates beagle calls + + This function passes the argument list to standard_beagle_call. Once the + beagle call has finished, the function will automatically convert the + bgzip compressed output of beagle to BCF and VCF, if either format is + specified. + + Parameters + ---------- + beagle_path : str + Path to beagle.jar + beagle_call_args : list + Argument list for beagle + output_prefix : str + Output file prefix + output_format : str + Output file format + ''' + print beagle_call_args + # Standard call to beagle + standard_beagle_call(beagle_path, beagle_call_args, output_prefix) + + # Decompress if a VCF files is requested + if output_format == 'vcf': + bgzip_decompress_vcfgz(output_prefix + '.vcf.gz') + + # Convert to BCF if requested + elif output_format == 'bcf': + + # Check if there is an index file + if check_for_index(output_prefix + '.vcf.gz') == False: + # Create an index if not found + create_index(output_prefix + '.vcf.gz') + # Convert vcf.gz to bcf + convert_to_bcf(output_prefix + '.vcf.gz', output_prefix) |
b |
diff -r 000000000000 -r 3830d29fca6a logging_module.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/logging_module.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
@@ -0,0 +1,97 @@ +import sys +import logging + + + +def initLogger(filename='pipeline.log', filelevel='INFO', + streamlevel='WARNING', resetlog=True): + """General logger initialization for PPP functions. + + Messages from WARNING level and higher will be logged to file and to + stderr. By default, INFO level will also be written to logfile. Both + levels are flexible. + + Level groupings: + ERROR: Error messages should be generated by an exception call + WARNING: Non-terminal behavior that may be unusual (i.e. lists + with no values, blank strings) + INFO: Every major (user-facing) function should have the following: + -Message for function start + -List of input arguments and options + -Basic sanity checks (dimensions of input data) + -Statements before or after major function calls + -Message for function end + DEBUG: Mainly for developer use/debugging. Generate logs for + sub-functions that match INFO level for major functions. + Possible care should be used if there are lots of loops called. + + Use: Call with either the individual function (in __name__=="__main__" + statement) or in pipeline file. + + Parameters + ---------- + filename : str ("pipeline.log") + Name of file that log will be written to + + filelevel : {'INFO','DEBUG','WARNING','ERROR'} + Set minimum level of log messages that are written to log file. + Note that this acts as a de facto minumum for 'streamlevel' as well. + + streamlevel : {'WARNING','DEBUG','INFO','ERROR'} + Set minimum level of log messages that are output to stream. + + resetlog : bool (True) + If true, will overwrite logfile when opening. Set to false if log is + being initialized multiple times + + Returns + ------- + None + + Exceptions + ---------- + If filelevel or streamlevel are not a valid logging level + + """ + log_levels = ['DEBUG','INFO','WARNING','ERROR'] + if filelevel is not None and filelevel.upper() not in log_levels: + raise Exception('filelevel value %s is not a valid level' % + filelevel) + if streamlevel is not None and streamlevel.upper() not in log_levels: + raise Exception('streamlevel value %s is not a valid level' % + streamlevel) + fmt_def = "%(asctime)s - %(funcName)s - %(levelname)s: %(message)s" + fmt_notime = "%(funcName)s - %(levelname)s: %(message)s" + fmtr = logging.Formatter(fmt=fmt_def) + fmtr_notime = logging.Formatter(fmt=fmt_notime) + filelogger = logging.getLogger() + filelogger.setLevel('INFO') + if streamlevel is not None: + s_handler = logging.StreamHandler() + s_handler.setFormatter(fmtr_notime) + s_handler.setLevel(streamlevel) + filelogger.addHandler(s_handler) + logmode = 'a' + if resetlog: + logmode = 'w' + if filelevel is not None: + f_handler = logging.FileHandler(filename,mode=logmode) + f_handler.setFormatter(fmtr) + f_handler.setLevel(filelevel) + #filelogger.setLevel(filelevel) + filelogger.addHandler(f_handler) + #Formats exception messages to be sent to appropriate loggers + def exp_handler(etype,val,tb): + logging.error("%s" % (val), exc_info=(etype,val,tb)) + + sys.excepthook = exp_handler + + +def logArgs(args, func_name=None, print_nones=False): + header = "Arguments" + if func_name is not None: + header+=" for"+func_name + for arg in vars(args): + val = vars(args)[arg] + if val is not None or print_nones: + logging.info('Argument %s: %s' % (arg,val)) |
b |
diff -r 000000000000 -r 3830d29fca6a model.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/model.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
@@ -0,0 +1,236 @@ +import os +import sys +import json +import subprocess +import argparse +import logging +import itertools + +from collections import defaultdict + +# Insert Jared's directory path, required for calling Jared's functions. Change when directory structure changes. +sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, 'jared'))) + +from logging_module import initLogger + +class ModelFile(dict): + def __init__(self, *arg, **kw): + super(ModelFile, self).__init__(*arg, **kw) + self.inds = [] + self.ind_file = '' + self.exclude_file = '' + + def assign_inds (self, inds = []): + # Return error if inds is empty + if not inds: + raise IOError('No individuals found in the model file.') + # Store the individuals + self.inds = [str(ind) for ind in inds] + + def create_ind_file (self, file_ext = '', file_path = '', overwrite = False): + # Assign the filename for the population file + ind_filename = 'unique_individuals' + file_ext + + # If a path is assigned, create the file at the specified location + if file_path: + ind_filename = os.path.join(file_path, ind_filename) + + # Check if previous files should be overwriten + if not overwrite: + # Check if the file already exists + if os.path.isfile(ind_filename): + raise IOError('Individuals file exists.') + + # Create the population file + ind_file = open(ind_filename, 'w') + ind_file.write('%s\n' %'\n'.join(self.inds)) + ind_file.close() + + # Save the individuals filename + self.ind_file = ind_filename + + def delete_ind_file (self): + # Check if an individuals file was created + if self.ind_file: + + # Delete the individuals file + os.remove(self.ind_file) + + # Remove the filename + self.ind_file = '' + + def create_exclude_ind_file (self, inds_to_include = [], file_ext = '', file_path = '', overwrite = False): + # Assign the filename for the population file + ind_filename = 'exclude_individuals' + file_ext + + # If a path is assigned, create the file at the specified location + if file_path: + ind_filename = os.path.join(file_path, ind_filename) + + # Check if previous files should be overwriten + if not overwrite: + # Check if the file already exists + if os.path.isfile(ind_filename): + raise IOError('Individuals file exists.') + + # Create exclude list by removing included individuals + exclude_inds = list(set(self.inds) - set(inds_to_include)) + + # Create the population file + ind_file = open(ind_filename, 'w') + ind_file.write('%s\n' %'\n'.join(exclude_inds)) + ind_file.close() + + # Save the individuals filename + self.exclude_file = ind_filename + + def delete_ind_file (self): + # Check if an individuals file was created + if self.exclude_file: + + # Delete the individuals file + os.remove(self.exclude_file) + + # Remove the filename + self.exclude_file = '' + +class Model: + def __init__ (self, name): + self.name = name + self.tree = '' + self.npop = 0 + self.pop_list = [] + self.nind = defaultdict(int) + self.ind_dict = defaultdict(list) + self.pop_files = [] + self.ind_file = '' + + @property + def inds(self): + return list(itertools.chain.from_iterable(self.ind_dict.values())) + + def assign_tree (self, tree): + self.tree = str(tree) + + def assign_pop (self, pop, inds = []): + self.npop += 1 + self.pop_list.append(str(pop)) + if inds: + self.nind[pop] = len(inds) + self.ind_dict[pop] = [str(ind) for ind in inds] + + def create_pop_files (self, file_ext = '', file_path = '', overwrite = False): + for pop in self.pop_list: + # Assign the filename for the population file + pop_filename = pop + file_ext + + # If a path is assigned, create the file at the specified location + if file_path: + pop_filename = os.path.join(file_path, pop_filename) + + # Check if previous files should be overwriten + if not overwrite: + # Check if the file already exists + if os.path.isfile(pop_filename): + raise IOError('Population file exists.') + + # Create the population file + pop_file = open(pop_filename, 'w') + pop_file.write('%s\n' %'\n'.join(self.ind_dict[pop])) + pop_file.close() + + # Save the population filename + self.pop_files.append(pop_filename) + + def delete_pop_files (self): + # Check if pop files were created + if len(self.pop_files) != 0: + + # Loop the created pop files + for pop_file in self.pop_files: + # Delete the pop file + os.remove(pop_file) + + # Remove the filenames + self.pop_files = [] + + def create_ind_file (self, file_ext = '', file_path = '', overwrite = False): + # Assign the filename for the population file + ind_filename = 'individual.keep' + file_ext + + # If a path is assigned, create the file at the specified location + if file_path: + ind_filename = os.path.join(file_path, ind_filename) + + # Check if previous files should be overwriten + if not overwrite: + # Check if the file already exists + if os.path.isfile(ind_filename): + raise IOError('Individuals file exists.') + + # Create the population file + ind_file = open(ind_filename, 'w') + ind_file.write('%s\n' %'\n'.join(self.inds)) + ind_file.close() + + # Save the individuals filename + self.ind_file = ind_filename + + def delete_ind_file (self): + # Check if an individuals file was created + if self.ind_file: + + # Delete the individuals file + os.remove(self.ind_file) + + # Remove the filename + self.ind_file = '' + +def read_model_file (model_filename): + + # Check that the file exists + if not os.path.isfile(model_filename): + raise IOError + + # Create ModelFile object + models_to_return = ModelFile() + + # Check if using python 2 or 3 + if sys.version_info[0] == 2: + # Open the model file in python 2 + model_file = open(model_filename, 'rU') + else: + # Open the model file in python 3 + model_file = open(model_filename, 'r', newline=None) + + # Parse the model file using the json reader + models_dict = json.load(model_file) + + # List to store all unique individuals (i.e. individuals in all models) + individual_list = [] + + # Loop the parsed models + for model_dict in models_dict: + + # Create the model + model = Model(model_dict['name']) + + # Loop the populations in the model + for pop, pop_dict in model_dict['pops'].items(): + + # Assign the population ans it's individuals to the model + model.assign_pop(pop, pop_dict['inds']) + # Assign the individuals to the unique individual list + individual_list.extend(pop_dict['inds']) + + # Remove duplicates from the unique individual list + individual_list = list(set(individual_list)) + + # Save the model + models_to_return[str(model.name)] = model + + # Store the unique individuals within the ModelFile object + models_to_return.assign_inds(individual_list) + + # Return the models + return models_to_return |
b |
diff -r 000000000000 -r 3830d29fca6a shapeit.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/shapeit.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
@@ -0,0 +1,160 @@ +import os +import sys +import subprocess +import shutil +import argparse +import glob +import logging + +sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, 'jared'))) + +from vcf_reader_func import checkFormat +from logging_module import initLogger, logArgs +from plink import convert_haps_to_vcf +#from vcftools import bgzip_decompress_vcfgz +#from bcftools import convert_to_bcf, check_for_index, create_index + +def check_shapeit_for_errors (shapeit_stdout, output_prefix): + ''' + Checks the shapeit stdout for errors + + Parameters + ---------- + shapeit_stdout : str + shapeit stdout + output_prefix : str + Output filename prefix + + Raises + ------ + Exception + If shapeit stdout returns an error + ''' + + # Returns True if the job completed without error + if 'Running time:' in str(shapeit_stdout): + pass + + # Print output if not completed and no error found. Unlikely to be used, but included. + else: + # Remove intermediate files before reporting the error + remove_intermediate_files(output_prefix, error_intermediates = True) + raise Exception(str(shapeit_stdout)) + +def remove_intermediate_files (output_prefix, error_intermediates = False): + ''' + Removes shapeit intermediate files + + This function is used to remove the various intermediate files created + by shapeit. The exact intermediate files to be removed are defined by + the error-state of shapeit. The function will also return warnings if + the intermediate files were not found. + + Parameters + ---------- + output_prefix : str + Output filename prefix + error_intermediates : bool, optional + Defines if shapeit encountered an error + + ''' + if error_intermediates: + + # Check that the log file was created, give a warning otherwise + if not os.path.isfile(output_prefix + '.phase.log'): + logging.warning('shapeit intermediate file %s.phase.log does not exist' % output_prefix) + else: + # Remove shapeit log file + os.remove(output_prefix + '.phase.log') + + else: + + # Check that the phase.ind.mm file was created, give a warning otherwise + if not os.path.isfile(output_prefix + '.phase.ind.mm'): + logging.warning('shapeit intermediate file %s.phase.ind.mm does not exist' % output_prefix) + else: + # Remove shapeit phase.ind.mm file + os.remove(output_prefix + '.phase.ind.mm') + + # Check that the phase.snp.mm file was created, give a warning otherwise + if not os.path.isfile(output_prefix + '.phase.snp.mm'): + logging.warning('shapeit intermediate file %s.phase.snp.mm does not exist' % output_prefix) + else: + # Remove shapeit phase.snp.mm file + os.remove(output_prefix + '.phase.snp.mm') + + # Check that the haps file was created, give a warning otherwise + if not os.path.isfile(output_prefix + '.haps'): + logging.warning('shapeit intermediate file %s.haps does not exist' % output_prefix) + else: + # Remove shapeit haps file + os.remove(output_prefix + '.haps') + + # Check that the sample file was created, give a warning otherwise + if not os.path.isfile(output_prefix + '.sample'): + logging.warning('shapeit intermediate file %s.sample does not exist' % output_prefix) + else: + # Remove shapeit sample file + os.remove(output_prefix + '.sample') + + logging.info('shapeit-related files removed') + +def standard_shapeit_call (shapeit_call_args, output_prefix): + ''' + Calls shapeit using subprocess + + This function is used to call shapeit and passes the resulting stdout + to check_shapeit_for_errors to check for errors. The function also + passes output_prefix to check_shapeit_for_errors to delete shapeit + intermediate files if shapeit results in an error. + + Parameters + ---------- + shapeit_call_args : list + Argument list for shapeit + output_prefix : str + Output filename prefix + + ''' + + logging.info('shapeit phasing parameters assigned') + + # Phasing subprocess call + phase_call = subprocess.Popen(['shapeit'] + shapeit_call_args, stdout = subprocess.PIPE, stderr = subprocess.PIPE) + phase_stdout, phase_stderr = phase_call.communicate() + + # Check if code is running in python 3 + if sys.version_info[0] == 3: + # Convert bytes to string + phase_stdout = phase_stdout.decode() + + # Check shapeit call for errors + check_shapeit_for_errors(phase_stdout, output_prefix) + + logging.info('shapeit phasing complete (HAPS format)') + +def call_shapeit (shapeit_call_args, output_prefix, output_format): + ''' + Calls shapeit and automates file conversions + + The function is used to call shapeit and also automates conversion to + VCF, VCF.GZ, and BCF using plink2 + + Parameters + ---------- + shapeit_call_args : list + Argument list for shapeit + output_prefix : str + Output filename prefix + output_format : str + Output file format + + ''' + + # Standard call to beagle + standard_shapeit_call(shapeit_call_args, output_prefix) + + # Convert haps-format to vcf + convert_haps_to_vcf(output_prefix, output_format) + + logging.info('HAPS conversion to VCF complete') |
b |
diff -r 000000000000 -r 3830d29fca6a vcf_phase.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vcf_phase.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
b'@@ -0,0 +1,542 @@\n+import os\n+import sys\n+import copy\n+import shutil\n+import argparse\n+import glob\n+import logging\n+\n+sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, \'jared\')))\n+\n+from vcf_reader_func import checkFormat\n+from logging_module import initLogger, logArgs\n+from model import read_model_file\n+from beagle import call_beagle\n+from shapeit import call_shapeit, remove_intermediate_files\n+from bcftools import pipe_bcftools_to_chr, chr_subset_file, concatenate\n+\n+def phase_argument_parser(passed_arguments):\n+ \'\'\'Phase Argument Parser - Assigns arguments for vcftools from command line.\n+ Depending on the argument in question, a default value may be specified\'\'\'\n+\n+ def parser_confirm_file ():\n+ \'\'\'Custom action to confirm file exists\'\'\'\n+ class customAction(argparse.Action):\n+ def __call__(self, parser, args, value, option_string=None):\n+ if not os.path.isfile(value):\n+ raise IOError(\'%s not found\' % value)\n+ setattr(args, self.dest, value)\n+ return customAction\n+\n+ def metavar_list (var_list):\n+ \'\'\'Create a formmated metavar list for the help output\'\'\'\n+ return \'{\' + \', \'.join(var_list) + \'}\'\n+\n+ phase_parser = argparse.ArgumentParser()\n+\n+ # Input arguments\n+ phase_parser.add_argument(\'--vcf\', help = "Input VCF filename", type = str, required = True, action = parser_confirm_file())\n+\n+ # Model file arguments\n+ phase_parser.add_argument(\'--model-file\', help = \'Defines the model file\', type = str, action = parser_confirm_file())\n+ phase_parser.add_argument(\'--model\', help = \'Defines the model to analyze\', type = str)\n+\n+ # General arguments\n+ phase_parser.add_argument(\'--overwrite\', help = "Overwrite previous output files", action = \'store_true\')\n+ phase_parser.add_argument(\'--beagle-path\', help = "Defines path to locate beagle.jar", type = str, default = \'bin/\')\n+\n+ # Phase algorithm argument\n+ phasing_list = [\'beagle\', \'shapeit\']\n+ phasing_default = \'beagle\'\n+ phase_parser.add_argument(\'--phase-algorithm\', metavar = metavar_list(phasing_list), help = \'Specifies the phase algorithm to be used\', type = str, choices = phasing_list, default = phasing_default)\n+\n+ # Common phasing arguments\n+ phase_parser.add_argument(\'--Ne\', help = \'Defines the effective population size\', type = int)\n+ phase_parser.add_argument(\'--random-seed\', help="Defines the random seed value for the random number generator", type = int)\n+ phase_parser.add_argument(\'--genetic-map\', help = \'Genetic map filename\', type = str, action = parser_confirm_file())\n+ phase_parser.add_argument(\'--phase-chr\', help = \'Selects a single chromosome to phase\', type = str)\n+ phase_parser.add_argument(\'--phase-from-bp\', help = \'Lower bound of sites to include (Only usable with a single chromosome)\', type = int)\n+ phase_parser.add_argument(\'--phase-to-bp\', help = \'Upper bound of sites to include (Only usable with a single chromosome)\', type = int)\n+\n+ # Shapeit-specific options\n+ phase_parser.add_argument(\'--shapeit-burn-iter\', help = \'Number of the burn-in iterations (shapeit)\', type = int)\n+ phase_parser.add_argument(\'--shapeit-prune-iter\', help = \'Number of pruning iterations (shapeit)\', type = int)\n+ phase_parser.add_argument(\'--shapeit-main-iter\', help = \'Number of main iterations (shapeit)\', type = int)\n+ phase_parser.add_argument(\'--shapeit-states\', help = \'Number of conditioning states for haplotype estimation (shapeit)\', type = int)\n+ phase_parser.add_argument(\'--shapeit-window\', help = \'Model window size in Mb (shapeit)\', type = float)\n+\n+ # Beagle-specific options\n+ phase_parser.add_argument(\'--beagle-burn-iter\', help = \'Number of the burn-in iterations (beagle)\', type = int)\n+ phase_parser.add_argument(\'--beagle-iter\', help = \'Number of iterations after burn-in (beagle)\', type = int)\n+ phase_parser.add_argument(\'--beagle-states\', help = \'Number of model stat'..b'ase_args.out:\n+ shutil.move(phased_output, phase_args.out)\n+ shutil.move(phased_output + \'.log\', phase_args.out + \'.log\')\n+\n+ logging.info(\'shapeit log file created\')\n+\n+ # Check if a chr subset file was created\n+ if phase_args.phase_chr and len(chrs_in_vcf) > 1:\n+ # Delete the chromosome-specific input\n+ os.remove(shapeit_input_vcf)\n+\n+ logging.info(\'Chr %s subset deleted\' % phase_args.phase_chr)\n+\n+ # Remove intermediate files created by shapeit\n+ remove_intermediate_files(phase_args.out_prefix)\n+\n+ # Check if multiple shapeit runs are required\n+ else:\n+\n+ # List to store the phased filenames\n+ phased_filename_list = []\n+\n+ # List to store the phased logs\n+ phased_log_list = []\n+\n+ logging.info(\'Multi-chr shapeit phasing assigned\')\n+\n+ for chr_in_vcf in chrs_in_vcf:\n+\n+ logging.info(\'Chr %s assigned\' % chr_in_vcf)\n+\n+ # Copy the arguments for this run\n+ chr_call_args = copy.deepcopy(phase_call_args)\n+\n+ # Assign the chromosome-specific output prefix\n+ chr_out_prefix = phase_args.out_prefix + \'.\' + chr_in_vcf\n+\n+ # Assign the expected chromosome-specific output filename\n+ chr_out_filename = \'%s.%s\' % (chr_out_prefix, phase_args.out_format)\n+\n+ # Store the output filename\n+ phased_filename_list.append(chr_out_filename)\n+\n+ # Assign the chromosome-specific input prefix\n+ chr_input_prefix = phase_args.vcf + \'.\' + chr_in_vcf\n+\n+ # Assign the expected chromosome-specific input filename\n+ chr_in_filename = chr_input_prefix + \'.vcf.gz\'\n+\n+ # Create the chromosome-specific input\n+ chr_subset_file(phase_args.vcf,\n+ chr_in_vcf,\n+ chr_input_prefix,\n+ \'vcf.gz\')\n+\n+ # Assigns the input and output arguments for shapeit\n+ chr_call_args.extend([\'--input-vcf\', chr_in_filename,\n+ \'--output-max\', chr_out_prefix,\n+ \'--output-log\', chr_out_prefix + \'.phase.log\'])\n+\n+ # Call shapeit wrapper\n+ call_shapeit(list(map(str, chr_call_args)), chr_out_prefix, phase_args.out_format)\n+\n+ # Combine log files\n+ concatenate_logs([chr_out_prefix + \'.phase.log\', chr_out_prefix + \'.log\'], chr_out_filename + \'.log\')\n+\n+ # Store the filename of the combined logs\n+ phased_log_list.append(chr_out_filename + \'.log\')\n+\n+ # Delete the chromosome-specific input\n+ os.remove(chr_in_filename)\n+\n+ # Remove intermediate files created by shapeit\n+ remove_intermediate_files(chr_out_prefix)\n+\n+ # Concatenate the vcf files\n+ concatenate(phased_filename_list, phase_args.out_prefix, phase_args.out_format)\n+\n+ logging.info(\'Concatenated chromosomes\')\n+\n+ # Assign expected concatenated output filename\n+ phased_output = \'%s.%s\' % (phase_args.out_prefix, phase_args.out_format)\n+\n+ # Combine the log files\n+ concatenate_logs(phased_log_list, phased_output + \'.log\')\n+\n+ # Rename output to phase_args.out, if specified\n+ if phase_args.out:\n+ shutil.move(phased_output, phase_args.out)\n+ shutil.move(phased_output + \'.log\', phase_args.out + \'.log\')\n+\n+ logging.info(\'Multi-chr shapeit log created\')\n+\n+ # Reverts the VCF input file\n+ if vcfname_renamed:\n+ os.rename(phase_args.vcf, phase_args.vcf[:-len(vcfname_ext)])\n+\n+if __name__ == "__main__":\n+ initLogger()\n+ run()\n' |
b |
diff -r 000000000000 -r 3830d29fca6a vcf_phase.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vcf_phase.xml Mon Oct 15 18:15:47 2018 -0400 |
[ |
@@ -0,0 +1,166 @@ +<tool id="vcf_phase" name="Phase VCF" version="1.0.0.1"> + + <description>files with BEAGLE or SHAPEIT</description> + + <requirements> + <requirement type="package" >pandas</requirement> + <requirement type="package" >pysam</requirement> + <requirement type="package" >shapeit</requirement> + <requirement type="package" >beagle</requirement> + </requirements> + + <command><![CDATA[ + #if $input.is_of_type('vcf_bgzip') + ln -fs $input input.vcf.gz && + #end if + #if $input.is_of_type('vcf') + ln -fs $input input.vcf && + #end if + python $__tool_directory__/vcf_phase.py + #if $input.is_of_type('vcf_bgzip') + --vcf input.vcf.gz + #end if + #if $input.is_of_type('vcf') + --vcf input.vcf + #end if + #if $model_file + --model-file $model_file + --model $model + #end if + --phase-algorithm $phase.phase_algorithm + #if $phase.beagle_burn_iter + --beagle-burn-iter $common.beagle_burn_iter + #end if + #if $phase.beagle_burn_iter + --beagle-burn-iter $phase.beagle_burn_iter + #end if + #if $phase.phase_algorithm == 'beagle' + --beagle-path $__tool_data_path__/shared/jars/ + #if $phase.beagle_iter + --beagle-iter $phase.beagle_iter + #end if + #if $phase.beagle_states + --beagle-states $phase.beagle_states + #end if + #if $phase.beagle_window + --beagle-window $phase.beagle_window + #end if + #if $phase.beagle_overlap + --beagle-overlap $phase.beagle_overlap + #end if + #if $phase.beagle_error + --beagle-error $phase.beagle_error + #end if + #if $phase.beagle_step + --beagle-step $phase.beagle_step + #end if + #if $phase.beagle_nsteps + --beagle-nsteps $phase.beagle_nsteps + #end if + #end if + #if $phase.phase_algorithm == 'shapeit' + #if $phase.shapeit_burn_iter + --shapeit-burn-iter $phase.shapeit_burn_iter + #end if + #if $phase.shapeit_prune_iter + --shapeit-prune-iter $phase.shapeit_prune_iter + #end if + #if $phase.shapeit_main_iter + --shapeit-main-iter $phase.shapeit_main_iter + #end if + #if $phase.shapeit_states + --shapeit-states $phase.shapeit_states + #end if + #if $phase.shapeit_window + --shapeit-window $phase.shapeit_window + #end if + #end if + #if $common.genetic_map + --genetic-map $common.genetic_map + #end if + #if $common.ne + --Ne $common.ne + #end if + #if $common.random_seed + --random-seed $common.random_seed + #end if + #if $common.phase_chr + --phase-chr $common.phase_chr + #end if + #if $common.phase_from_bp + --phase-from-bp $common.phase_from_bp + #end if + #if $common.phase_to_bp + --phase-to-bp $common.phase_to_bp + #end if + --out $output + --out-format $out_format + ]]></command> + + <inputs> + + <param format="vcf,vcf_bgzip" name="input" type="data" label="VCF Input"/> + + <param format="model" name="model_file" type="data" label="Model Input" optional="True"/> + <param name="model" type="select" label="Select Model" refresh_on_change="True"> + <options> + <filter type="data_meta" ref="model_file" key="models"/> + </options> + </param> + + <conditional name="phase"> + <param name="phase_algorithm" type="select" label="Phase Algorithm" refresh_on_change='True'> + <option value="beagle" selected="True" >Beagle</option> + <option value="shapeit">SHAPEIT</option> + </param> + <when value="beagle"> + <param name="beagle_burn_iter" type="integer" label="Burn-in iterations" optional="True"/> + <param name="beagle_iter" type="integer" label="Post burn-in iterations" optional="True"/> + <param name="beagle_states" type="integer" label="Model states for genotype estimation" optional="True"/> + <param name="beagle_window" type="float" label="Sliding window size (cM)" optional="True"/> + <param name="beagle_overlap" type="float" label="Overlap between neighboring sliding windows (cM)" optional="True"/> + <param name="beagle_error" type="float" label="HMM allele mismatch probability" optional="True"/> + <param name="beagle_step" type="float" label="Step length (cM)" optional="True" help="Used for identifying short IBS segments"/> + <param name="beagle_nsteps" type="integer" label="Number of consecutive steps" optional="True" help="Used for identifying long IBS segments"/> + </when> + <when value="shapeit"> + <param name="shapeit_burn_iter" type="integer" label="Burn-in iterations" optional="True"/> + <param name="shapeit_prune_iter" type="integer" label="Pruning iterations" optional="True"/> + <param name="shapeit_main_iter" type="integer" label="Main iterations" optional="True"/> + <param name="shapeit_states" type="integer" label="Conditioning states for haplotype estimation" optional="True"/> + <param name="shapeit_window" type="float" label="Model window size (Mb)" optional="True"/> + </when> + </conditional> + + <section name="common" title="Common Parameters" expanded="True"> + <param name="random_seed" type="integer" label="Seed value" optional="True"/> + <param format="text" name="genetic_map" type="data" label="Genetic Map" optional="True"/> + <param name="ne" type="integer" label="Effective population size" optional="True"/> + <param name="phase_chr" type="text" label="Chromosome to phase" optional="True"/> + <param name="phase_from_bp" type="integer" label="Lower bound of sites to include" help="Only usable with a single chromosome" optional="True"/> + <param name="phase_to_bp" type="integer" label="Upper bound of sites to include" help="Only usable with a single chromosome" optional="True"/> + </section> + + <param name="out_format" type="select" label="Output Format"> + <option value="vcf">VCF File</option> + <option value="vcf.gz" selected="True">bgzipped-VCF File</option> + <option value="bcf">BCF File</option> + </param> + + </inputs> + <outputs> + + <data name="output" format="vcf_bgzip"> + <change_format> + <when input="out_format" value="vcf" format="vcf"/> + <when input="out_format" value="bcf" format="bcf"/> + </change_format> + </data> + + </outputs> + + <help> + VCF Phaser Help Text + </help> + +</tool> |
b |
diff -r 000000000000 -r 3830d29fca6a vcf_reader_func.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/vcf_reader_func.py Mon Oct 15 18:15:47 2018 -0400 |
[ |
b'@@ -0,0 +1,474 @@\n+import sys\n+import pysam\n+import logging\n+import struct\n+from random import sample\n+from collections import defaultdict\n+import os\n+import gzip\n+\n+def checkIfGzip(filename):\n+ try:\n+ gf = gzip.open(filename)\n+ gl = gf.readline()\n+ gf.close()\n+ vcf_check = b\'##fileformat=VCF\'\n+ if gl[0:3] == b\'BCF\':\n+ return \'bcf\'\n+ elif gl[:len(vcf_check)] == vcf_check:\n+ return checkHeader(filename)\n+ else:\n+ return \'other\'\n+ except:\n+ return \'nozip\'\n+\n+def checkHeader(filename):\n+ f = open(filename,\'rb\')\n+ l = f.readline()\n+ f.close()\n+ BGZF_HEADER=b\'\\x1f\\x8b\\x08\\x04\\x00\\x00\\x00\\x00\\x00\\xff\\x06\\x00\\x42\\x43\\x02\\x00\'\n+ #BGZF_HEADER=b\'\\x1f\\x8b\\x08\\x04\\x00\\x00\\x00\\x00\\x00\\xff\'\n+ GZF_HEADER=b\'\\x1f\\x8b\'\n+ if l[:len(BGZF_HEADER)] == BGZF_HEADER:\n+ return \'bgzip\'\n+ if l[:len(GZF_HEADER)] == GZF_HEADER:\n+ return \'gzip\'\n+ return \'nozip\'\n+\n+def checkFormat(vcfname):\n+ """Checks header of given file for compression type\n+\n+\n+ Given a filename, opens file and reads first line to check if\n+ file has BGZF or GZIP header. May be extended to check for BCF format\n+\n+ Parameters\n+ ----------\n+ filename : str\n+ Name of file to be checked\n+\n+ Returns\n+ -------\n+ extension : str {\'bgzip\',\'gzip\',\'vcf\',\'other\'}\n+ File extension as indicated by header\n+\n+ """\n+ typ = checkIfGzip(vcfname)\n+ if typ != \'nozip\':\n+ return typ\n+ f = open(vcfname)\n+ l = f.readline()\n+ f.close()\n+ VCF_TAG=\'##fileformat=VCF\'\n+ if l[:len(VCF_TAG)] == VCF_TAG:\n+ return \'vcf\'\n+ return \'other\'\n+\n+def checkIfCpG(record,fasta_ref,offset=0,add_chr=False):\n+ dr = None\n+ pos = record.pos\n+ c = record.chrom\n+ if record.alts is None:\n+ return False\n+ if add_chr:\n+ c = \'chr\'+record.chrom\n+ if record.ref == \'C\' and \'T\' in record.alts:\n+ seq = fasta_ref.fetch(c,pos-1,pos+1)\n+ if seq[0].upper() != \'C\':\n+ logging.warning(\'%s %d has bad base %s\' % (record.chrom,record.pos,seq[0]))\n+ #raise Exception("checkIfCpG function not lining up properly")\n+ if seq[1].upper() == \'G\':\n+ return True\n+ return False\n+ elif record.ref == \'G\' and \'A\' in record.alts:\n+ seq = fasta_ref.fetch(c,pos-2,pos)\n+ if seq[1].upper() != \'G\':\n+ logging.warning(\'%s %d has bad base %s\' % (record.chrom,record.pos,seq[1]))\n+ #raise Exception("checkIfCpg function not lining up on negative strand")\n+ if seq[0].upper() == \'C\':\n+ return True\n+ return False\n+ return False\n+\n+def checkForDuplicates(rec_list,pass_list):\n+ for i in range(len(rec_list)-1):\n+ if rec_list[i].pos == rec_list[i+1].pos:\n+ pass_list[i] = False\n+ pass_list[i+1] = False\n+\n+def checkForMultiallele(rec_list,pass_list):\n+ for i in range(len(rec_list)):\n+ if i != len(rec_list)-1 and rec_list[i].pos == rec_list[i+1].pos:\n+ pass_list[i] = False\n+ pass_list[i+1] = False\n+ if len(rec_list[i].alleles) > 2:\n+ pass_list[i] = False\n+\n+def flipChrom(chrom):\n+ if chrom[0:3] == \'chr\':\n+ return chrom[0:3]\n+ return \'chr\'+chrom\n+\n+def getAlleleCountDict(rec):\n+ alleles = defaultdict(int)\n+ total_sites = 0\n+ missing_inds = 0\n+ for j in range(len(rec.samples)):\n+ samp = rec.samples[j]\n+ if None in samp.alleles:\n+ missing_inds += 1\n+ for k in range(len(samp.alleles)):\n+ b = samp.alleles[k]\n+ if b is not None:\n+ alleles[b] += 1\n+ total_sites+=1\n+ return alleles, total_sites, missing_inds\n+\n+def isInformative(rec, mincount=2, alleles=None):\n+ count = 0\n+ if alleles is None:\n+ alleles, total_sites, missing_inds = getAlleleCountDict(rec)\n+ if len(alleles) != 2:\n+ return False\n+ i1,i2 = alleles.keys'..b'path to compressed VCF file\n+ """\n+ cvcfname = vcfname+".gz"\n+ pysam.tabix_compress(vcfname,cvcfname,force=forceflag)\n+ pysam.tabix_index(cvcfname,preset="vcf",force=True)\n+ if remove:\n+ os.remove(vcfname)\n+ return cvcfname\n+\n+def vcfRegionName(prefix, region, ext, oneidx=False,\n+ halfopen=True, sep=\'-\'):\n+ chrom = region.toStr(halfopen, oneidx, sep)\n+ return prefix+\'_\'+chrom+\'.\'+ext\n+\n+def getRecordsInRegion(region, record_list):\n+ sub_list = []\n+ for i in range(len(record_list)):\n+ loc = region.containsRecord(record_list[i])\n+ if loc == "in":\n+ sub_list.append(record_list[i])\n+ elif loc == "after":\n+ break\n+ return sub_list\n+\n+\n+\n+\n+\n+#def getVcfReader(args):\n+def getVcfReader(vcfname, compress_flag=False, subsamp_num=None,\n+ subsamp_fn=None, subsamp_list=None, index=None):\n+ """Returns a reader for a given input VCF file.\n+\n+ Given a filename, filetype, compression option, and optional Subsampling\n+ options, will return a pysam.VariantFile object for iteration and\n+ a flag as to whether this file is compressed or uncompressed.\n+\n+ Parameters\n+ ----------\n+ vcfname : str\n+ Filename for VCF file. The extension of this file will be used to\n+ determine whether it is compressed or not unless `var_ext` is set.\n+ var_ext : str (None)\n+ Extension for VCF file if it is not included in the filename.\n+ compress_flag : bool (False)\n+ If filetype is uncompressed and this is set to true, will run\n+ compressVcf function.\n+ subsamp_num : int (None)\n+ If set, will randomly select `subsamp_num` individuals (not\n+ genotypes) from the input VCF file and return a reader with\n+ only those data.\n+ subsamp_fn : str (None)\n+ If set, will return a reader with only data from the samples listed\n+ in the file provided. Cannot be used with other subsampling options.\n+ subsamp_list : list (None)\n+ If set, will return reader with records containing only\n+ individuals named in the list. Cannot be used with other subsampling\n+ options.\n+\n+ Returns\n+ -------\n+ vcf_reader : pysam.VariantFile\n+ A reader that can be iterated through for variant records. If\n+ compressed, it will be able to use the pysam fetch method, otherwise\n+ it must be read through sequentially\n+ reader_uncompressed : bool\n+ If True, VCF reader is uncompressed. This means the fetch method\n+ cannot be used and region access must be done using the\n+ "getRecordListUnzipped" method.\n+\n+ """\n+ ext = checkFormat(vcfname)\n+ if ext in [\'gzip\',\'other\'] :\n+ raise Exception((\'Input file %s is gzip-formatted, must be either \'\n+ \'uncompressed or zipped with bgzip\' % vcfname))\n+ file_uncompressed = (ext == \'vcf\')\n+ reader_uncompressed = (file_uncompressed and not compress_flag)\n+ if compress_flag and file_uncompressed:\n+ vcfname = compressVcf(vcfname)\n+ #subsamp_list = None\n+ if subsamp_num is not None:\n+ if subsamp_list is not None:\n+ raise Exception(\'Multiple subsampling options called in getVcfReader\')\n+ subsamp_list = getSubsampleList(vcfname, subsamp_num)\n+ elif subsamp_fn is not None:\n+ if subsamp_list is not None:\n+ raise Exception(\'Multiple subsampling options called in getVcfReader\')\n+ subsamp_file = open(subsamp_fn,\'r\')\n+ subsamp_list = [l.strip() for l in subsamp_file.readlines()]\n+ subsamp_file.close()\n+ if index is None:\n+ vcf_reader = pysam.VariantFile(vcfname)\n+ else:\n+ vcf_reader = pysam.VariantFile(vcfname, index_filename=index)\n+ if subsamp_list is not None:\n+ logging.debug(\'Subsampling %d individuals from VCF file\' %\n+ (len(subsamp_list)))\n+ vcf_reader.subset_samples(subsamp_list)\n+ return vcf_reader, reader_uncompressed\n' |