changeset 16:65d45265a31b draft

Uploaded
author in_silico
date Thu, 16 Aug 2018 15:06:21 -0400
parents 0835042eb731
children 9985359fa7ff
files cravat_convert/__pycache__/base_converter.cpython-36.pyc cravat_convert/__pycache__/vcf_converter.cpython-36.pyc cravat_convert/base_converter.py cravat_convert/cravat_convert.py cravat_convert/cravat_convert.xml cravat_convert/vcf_converter.py cravat_submit/cravat_submit.py cravat_submit/cravat_submit.xml
diffstat 8 files changed, 365 insertions(+), 205 deletions(-) [+]
line wrap: on
line diff
Binary file cravat_convert/__pycache__/base_converter.cpython-36.pyc has changed
Binary file cravat_convert/__pycache__/vcf_converter.cpython-36.pyc has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cravat_convert/base_converter.py	Thu Aug 16 15:06:21 2018 -0400
@@ -0,0 +1,22 @@
+class BaseConverter(object):
+    def __init__(self):
+        self.format_name = None
+    def check_format(self,*args,**kwargs):
+        err_msg = 'Converter for %s format has no method check_format' %\
+            self.format_name
+        raise NotImplementedError(err_msg)
+    def setup(self,*args,**kwargs):
+        err_msg = 'Converter for %s format has no method setup' %\
+            self.format_name
+        raise NotImplementedError(err_msg)
+    def convert_line(self,*args,**kwargs):
+        err_msg = 'Converter for %s format has no method convert_line' %\
+            self.format_name
+        raise NotImplementedError(err_msg)
+
+
+class BadFormatError(Exception):
+    def __init__(self, message, errors=None):
+        super(BadFormatError, self).__init__(message)
+        # Support for custom error codes, if added later
+        self.errors = errors
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cravat_convert/cravat_convert.py	Thu Aug 16 15:06:21 2018 -0400
@@ -0,0 +1,80 @@
+from __future__ import print_function
+import os
+import argparse
+from vcf_converter import CravatConverter
+
+def get_vcf_mapping():
+    """ : VCF Headers mapped to their index position in a row of VCF values.
+        : These are only the mandatory columns, per the VCF spec.
+    """
+    return {
+        'CHROM': 0,
+        'POS': 1,
+        'ID': 2,
+        'REF': 3,
+        'ALT': 4,
+        'QUAL': 5,
+        'FILTER': 6,
+        'INFO': 7
+    }
+
+
+def get_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--input',
+                            '-i',
+                            required = True,
+                            help='Input path to a VCF file for conversion',)
+    parser.add_argument('--output',
+                            '-o',
+                            default = None,
+                            help = 'Output path to write the cravat file to')
+    return parser.parse_args()
+
+
+def convert(in_path, out_path=None, cr_sep='\t', cr_newline='\n'):
+    """ : Convert a VCF file to a Cravat file.
+        : Arguments:
+            : in_path: <str> path to input vcf file
+            : out_path: <str> path to output cravat file. Will defualt to cravat_converted.txt in the input directory.
+            : cr_sep: <str> the value delimiter for the output cravat file. Default value of '\\t'.
+            : out_newline: <str> the newline delimiter in the output cravat file. Default of '\\n'
+    """
+    if not out_path:
+        base, _ = os.path.split(in_path)
+        out_path = os.path.join(base, "cravat_converted.txt")
+    
+    with open(in_path, 'r') as in_file, \
+    open(out_path, 'w') as out_file:
+
+        # cr_count will be used to generate the 'TR' field of the cravat rows (first header)
+        cr_count = 0
+        # VCF lines are always assumed to be '+' strand, as VCF doesn't specify that attribute
+        strand = '+'
+        # VCF converter. Adjusts position, reference, and alternate for Cravat formatting.
+        converter = CravatConverter()
+        # A dictionary of mandatory vcf headers mapped to their row indices
+        vcf_mapping = get_vcf_mapping()
+
+        for line in in_file:
+            if line.startswith("#"):
+                continue
+            line = line.strip().split()
+            # row is dict of VCF headers mapped to corresponding values of this line
+            row = { header: line[index] for header, index in vcf_mapping.items() }
+            for alt in row["ALT"].split(","):
+                new_pos, new_ref, new_alt = converter.extract_vcf_variant(strand, row["POS"], row["REF"], alt)
+                new_pos, new_ref, new_alt = str(new_pos), str(new_ref), str(new_alt)
+                cr_line = cr_sep.join([
+                    'TR' + str(cr_count), row['CHROM'], new_pos, strand, new_ref, new_alt, row['ID']
+                ])
+                out_file.write(cr_line + cr_newline)
+                cr_count += 1
+
+
+if __name__ == "__main__":
+    cli_args = get_args()
+    if cli_args.output == None:
+        base, _ = os.path.split(cli_args.input)
+        cli_args.output = os.path.join(base, "cravat_converted.txt") 
+    convert(in_path = cli_args.input, out_path = cli_args.output)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cravat_convert/cravat_convert.xml	Thu Aug 16 15:06:21 2018 -0400
@@ -0,0 +1,20 @@
+<tool id="cravat_convert" name="CRAVAT Convert" version="1.0.0">
+    <description>Converts a VCF format file to a Cravat format file</description>
+    <command interpreter="python">cravat_convert.py -i $input -o $output</command>
+  
+    <inputs>
+        <param format="tabular" name="input" type="data" label="Source file"/>
+    </inputs>
+  
+    <outputs>
+        <data format="tabular" name="output" />
+    </outputs>
+
+    <!-- <tests></tests> -->
+
+    <help>
+        Converts a VCF format file to a Cravat format file
+    </help>
+
+</tool>
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cravat_convert/vcf_converter.py	Thu Aug 16 15:06:21 2018 -0400
@@ -0,0 +1,243 @@
+"""
+A module originally obtained from the cravat package. Modified to use in the vcf
+converter galaxy tool.
+
+
+Register of changes made (Chris Jacoby):
+    1) Changed imports as galaxy tool won't have access to complete cravat python package
+    2) Defined BadFormatError in BaseConverted file, as I didn't have the BadFormatError module
+"""
+
+from base_converter import BaseConverter, BadFormatError
+import re
+
+class CravatConverter(BaseConverter):
+    
+    def __init__(self):
+        self.format_name = 'vcf'
+        self.samples = []
+        self.var_counter = 0
+        self.addl_cols = [{'name':'phred',
+                           'title':'Phred',
+                           'type':'string'},
+                          {'name':'filter',
+                           'title':'VCF filter',
+                           'type':'string'},
+                          {'name':'zygosity',
+                           'title':'Zygosity',
+                           'type':'string'},
+                          {'name':'alt_reads',
+                           'title':'Alternate reads',
+                           'type':'int'},
+                          {'name':'tot_reads',
+                           'title':'Total reads',
+                           'type':'int'},
+                          {'name':'af',
+                           'title':'Variant allele frequency',
+                           'type':'float'}]
+    
+    def check_format(self, f): 
+        return f.readline().startswith('##fileformat=VCF')
+    
+    def setup(self, f):
+        
+        vcf_line_no = 0
+        for line in f:
+            vcf_line_no += 1
+            if len(line) < 6:
+                continue
+            if line[:6] == '#CHROM':
+                toks = re.split('\s+', line.rstrip())
+                if len(toks) > 8:
+                    self.samples = toks[9:]
+                break
+    
+    def convert_line(self, l):
+        if l.startswith('#'): return None
+        self.var_counter += 1
+        toks = l.strip('\r\n').split('\t')
+        all_wdicts = []
+        if len(toks) < 8:
+            raise BadFormatError('Wrong VCF format')
+        [chrom, pos, tag, ref, alts, qual, filter, info] = toks[:8]
+        if tag == '':
+            raise BadFormatError('ID column is blank')
+        elif tag == '.':
+            tag = 'VAR' + str(self.var_counter)
+        if chrom[:3] != 'chr':
+            chrom = 'chr' + chrom
+        alts = alts.split(',')
+        len_alts = len(alts)
+        if len(toks) == 8:
+            for altno in range(len_alts):
+                wdict = None
+                alt = alts[altno]
+                newpos, newref, newalt = self.extract_vcf_variant('+', pos, ref, alt)
+                wdict = {'tags':tag,
+                         'chrom':chrom,
+                         'pos':newpos,
+                         'ref_base':newref,
+                         'alt_base':newalt,
+                         'sample_id':'no_sample',
+                         'phred': qual,
+                         'filter': filter}
+                all_wdicts.append(wdict)
+        elif len(toks) > 8:
+            sample_datas = toks[9:]
+            genotype_fields = {}
+            genotype_field_no = 0
+            for genotype_field in toks[8].split(':'):
+                genotype_fields[genotype_field] = genotype_field_no
+                genotype_field_no += 1
+            if not ('GT' in genotype_fields):
+                raise BadFormatError('No GT Field')
+            gt_field_no = genotype_fields['GT']
+            for sample_no in range(len(sample_datas)):
+                sample = self.samples[sample_no]
+                sample_data = sample_datas[sample_no].split(':')
+                gts = {}
+                for gt in sample_data[gt_field_no].replace('/', '|').split('|'):
+                    if gt == '.':
+                        continue
+                    else:
+                        gts[int(gt)] = True
+                for gt in sorted(gts.keys()):
+                    wdict = None
+                    if gt == 0:
+                        continue
+                    else:
+                        alt = alts[gt - 1]
+                        newpos, newref, newalt = self.extract_vcf_variant('+', pos, ref, alt)
+                        zyg = self.homo_hetro(sample_data[gt_field_no])
+                        depth, alt_reads, af = self.extract_read_info(sample_data, gt, gts, genotype_fields)
+                            
+                        wdict = {'tags':tag,
+                                 'chrom':chrom,
+                                 'pos':newpos,
+                                 'ref_base':newref,
+                                 'alt_base':newalt,
+                                 'sample_id':sample,
+                                 'phred': qual,
+                                 'filter': filter,
+                                 'zygosity': zyg,
+                                 'tot_reads': depth,
+                                 'alt_reads': alt_reads,
+                                 'af': af,                                
+                                 }
+                        all_wdicts.append(wdict)
+        return all_wdicts
+ 
+    #The vcf genotype string has a call for each allele separated by '\' or '/'
+    #If the call is the same for all allels, return 'hom' otherwise 'het'
+    def homo_hetro(self, gt_str):
+        if '.' in gt_str:
+            return '';
+        
+        gts = gt_str.strip().replace('/', '|').split('|')
+        for gt in gts:
+            if gt != gts[0]:
+                return 'het'
+        return 'hom'            
+                        
+    #Extract read depth, allele count, and allele frequency from optional VCR information
+    def extract_read_info (self, sample_data, gt, gts, genotype_fields): 
+        depth = ''
+        alt_reads = ''
+        ref_reads = ''
+        af = ''
+        
+        #AD contains 2 values usually ref count and alt count unless there are 
+        #multiple alts then it will have alt 1 then alt 2.
+        if 'AD' in genotype_fields and genotype_fields['AD'] <= len(sample_data): 
+            if 0 in gts.keys():
+                #if part of the genotype is reference, then AD will have #ref reads, #alt reads
+                ref_reads = sample_data[genotype_fields['AD']].split(',')[0]
+                alt_reads = sample_data[genotype_fields['AD']].split(',')[1]
+            elif gt == max(gts.keys()):    
+                #if geontype has multiple alt bases, then AD will have #alt1 reads, #alt2 reads
+                alt_reads = sample_data[genotype_fields['AD']].split(',')[1]
+            else:
+                alt_reads = sample_data[genotype_fields['AD']].split(',')[0]                            
+                             
+        if 'DP' in genotype_fields and genotype_fields['DP'] <= len(sample_data): 
+            depth = sample_data[genotype_fields['DP']] 
+        elif alt_reads != '' and ref_reads != '':
+            #if DP is not present but we have alt and ref reads count, dp = ref+alt
+            depth = int(alt_reads) + int(ref_reads)   
+
+        if 'AF' in genotype_fields and genotype_fields['AF'] <= len(sample_data):
+            af = float(sample_data[genotype_fields['AF']] )
+        elif depth != '' and alt_reads != '':
+            #if AF not specified, calc it from alt and ref reads
+            af = float(alt_reads) / float(depth)
+ 
+        return depth, alt_reads, af
+            
+    def extract_vcf_variant (self, strand, pos, ref, alt):
+
+        reflen = len(ref)
+        altlen = len(alt)
+        
+        # Returns without change if same single nucleotide for ref and alt. 
+        if reflen == 1 and altlen == 1 and ref == alt:
+            return pos, ref, alt
+        
+        # Trimming from the start and then the end of the sequence 
+        # where the sequences overlap with the same nucleotides
+        new_ref2, new_alt2, new_pos = \
+            self.trimming_vcf_input(ref, alt, pos, strand)
+                
+        if new_ref2 == '':
+            new_ref2 = '-'
+        if new_alt2 == '':
+            new_alt2 = '-'
+        
+        return new_pos, new_ref2, new_alt2
+    
+    # This function looks at the ref and alt sequences and removes 
+    # where the overlapping sequences contain the same nucleotide.
+    # This trims from the end first but does not remove the first nucleotide 
+    # because based on the format of VCF input the 
+    # first nucleotide of the ref and alt sequence occur 
+    # at the position specified.
+    #     End removed first, not the first nucleotide
+    #     Front removed and position changed
+    def trimming_vcf_input(self, ref, alt, pos, strand):
+        pos = int(pos)
+        reflen = len(ref)
+        altlen = len(alt)
+        minlen = min(reflen, altlen)
+        new_ref = ref
+        new_alt = alt
+        new_pos = pos
+        # Trims from the end. Except don't remove the first nucleotide. 
+        # 1:6530968 CTCA -> GTCTCA becomes C -> GTC.
+        for nt_pos in range(0, minlen - 1): 
+            if ref[reflen - nt_pos - 1] == alt[altlen - nt_pos - 1]:
+                new_ref = ref[:reflen - nt_pos - 1]
+                new_alt = alt[:altlen - nt_pos - 1]
+            else:
+                break    
+        new_ref_len = len(new_ref)
+        new_alt_len = len(new_alt)
+        minlen = min(new_ref_len, new_alt_len)
+        new_ref2 = new_ref
+        new_alt2 = new_alt
+        # Trims from the start. 1:6530968 G -> GT becomes 1:6530969 - -> T.
+        for nt_pos in range(0, minlen):
+            if new_ref[nt_pos] == new_alt[nt_pos]:
+                if strand == '+':
+                    new_pos += 1
+                elif strand == '-':
+                    new_pos -= 1
+                new_ref2 = new_ref[nt_pos + 1:]
+                new_alt2 = new_alt[nt_pos + 1:]
+            else:
+                new_ref2 = new_ref[nt_pos:]
+                new_alt2 = new_alt[nt_pos:]
+                break  
+        return new_ref2, new_alt2, new_pos
+
+
+if __name__ == "__main__":
+    c = CravatConverter()
\ No newline at end of file
--- a/cravat_submit/cravat_submit.py	Mon Jul 30 13:32:05 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,171 +0,0 @@
-from __future__ import print_function
-import requests
-import json
-import time
-from urllib import urlretrieve
-# from urllib.request import urlretrieve
-import sys
-import csv
-import argparse
-
-"""
-Tool's email:
-usernmae: cravatgalaxy@gmail.com
-password: chicken_quesadilla
-"""
-
-email = 'cravatgalaxy@gmail.com'
-
-class CravatSubmissionException(Exception):
-    def __init__(self, message):
-        super(CravatSubmissionException, self).__init__(message)
-
-class CravatSubmission(object):
-
-    def get_cmd_args(self, argv):
-        parser = argparse.ArgumentParser()
-        parser.add_argument('path',
-                                help="Path to python module")
-        parser.add_argument('--input',
-                                '-i',
-                                required = True,
-                                help='Input path to a cravat file for querying',)
-        parser.add_argument('--output',
-                                '-o',
-                                default = None,
-                                help = 'Output path to write results from query')
-        parser.add_argument('--analysis',
-                                '-a',
-                                required=True,
-                                help = "Cravat analysis. Should be 'VEST', 'CHASM', 'NONE', or 'VEST;CHASM'")
-        return parser.parse_args(argv)
-
-    def is_valid_analysis(self, analysis):
-        """: Test if analysis is a recognized value"""
-        analyses = ["VEST", "CHASM", "VEST;CHASM", ""]
-        return analysis in analyses
-
-    def is_skippable(self, s):
-        """: Test if a line (str or list/tuple) is skippable, a.k.a. a header or blank line"""
-        if not isinstance(s, str):
-            raise CravatSubmissionException("is_skippable accepts a string")
-        skippable = s == "" \
-            or s[0] == "#" \
-            or s.startswith('"#For more information on CRAVAT') \
-            or s.isspace()
-        return skippable
-
-    def parse(self, s, sep='\t'):
-        """: Convert string line to an array of values"""
-        return s.strip().split(sep)
-
-    def unparse(self, array, sep='\t', newline='\n'):
-        """: Convert an array of values to a writable string line"""
-        return sep.join([str(i) for i in array]) + newline
-
-    def get_headers(self, path, pattern='Input line', sep='\t'):
-        """: Get the headers from a Results/Details file obtained from by a finished Cravat submission"""
-        with open(path, 'r') as f:
-            for line in f:
-                if line.startswith(pattern):
-                    return self.parse(line)
-            return None
-
-    def create_index(self, path, prop='Input line'):
-        """
-        : Create an index of seek/tell positions in file associated to a line value. Used to record
-        : the location of lines betwen two files that are associated with each other without reading entire
-        : files into memory.
-        """
-        headers = self.get_headers(path)
-        if prop not in headers:
-            raise CravatSubmissionException("Index retrievel property not found in headers")
-        prop_loc = headers.index(prop)
-        index = {}
-        with open(path, 'r') as f:
-            pos = 0
-            line = f.readline()
-            while line != "":
-                if not self.is_skippable(line):
-                    parsed = self.parse(line)
-                    if not parsed == headers:
-                        index[parsed[prop_loc]] = pos
-                pos = f.tell()
-                line = f.readline()
-        return index
-
-    def get_header_val_dict(self, headers, vals):
-        """: Associate an array of header keys to an array of values."""
-        return { header:val for (header, val) in zip(headers, vals) }
-
-    def write_results(self, results_path, details_path, out_path, write_headers=True):
-        """
-        : Using the paths to the Results and Details file from a Cravat Sumbission,
-        : write the output file.
-        """
-        results_headers = self.get_headers(results_path)
-        details_headers = self.get_headers(details_path)
-        if results_headers == None \
-        or details_headers == None:
-            raise CravatSubmissionException("Unable to intepret headers in Results or Details submission files")
-        headers = results_headers
-        headers.extend(filter(lambda x: x not in headers, details_headers))
-        results_index = self.create_index(results_path)
-        details_index = self.create_index(details_path)
-        with open(results_path, 'r') as results_file, \
-        open(details_path, 'r') as details_file, \
-        open(out_path, 'w') as out_file:
-            if write_headers:
-                out_file.write(self.unparse(headers))
-            for line_id, file_pos in results_index.items():
-                results_file.seek(file_pos)
-                results_vals = self.parse(results_file.readline())
-                results_dict = self.get_header_val_dict(results_headers, results_vals)
-                if line_id in details_index:
-                    details_file.seek(details_index[line_id])
-                    details_vals = self.parse(details_file.readline())
-                    details_dict = self.get_header_val_dict(details_headers, details_vals)
-                    # On a repeated entry, the Details value will overwrite Results value
-                    results_dict.update(details_dict)
-                line = [ results_dict.get(header, 'None') for header in headers ]
-                out_file.write(self.unparse(line))
-                
-    def submit(self, in_path, analysis):
-        """: Make a POST request to submit a job to production CRAVAT server."""
-        if not self.is_valid_analysis(analysis):
-            raise ValueError("Did not get valid analyses.")
-        # Create post request to submit job to  CRAVAT production server
-        submit = requests.post('http://cravat.us/CRAVAT/rest/service/submit',
-                                files={'inputfile' : open(in_path)},
-                                data={'email' : email,
-                                'analyses' : analysis})
-        # Check job run status in loop until status is 'Success'
-        jobid = json.loads(submit.text)['jobid']
-        while True:
-            check = requests.get('http://cravat.us/CRAVAT/rest/service/status', params={'jobid': jobid})
-            status = json.loads(check.text)['status']
-            print(status)
-            if status == 'Success':
-                break
-            else:
-                time.sleep(2)
-        # Download completed job results to local files
-        timestamp = time.strftime("%Y-%m-%d_%H-%M-%S_")
-        results_path = 'Z_Variant_Result' + timestamp + '.tsv'
-        details_path = 'Z_Additional_Details' + timestamp + '.tsv'
-        urlretrieve("http://cravat.us/CRAVAT/results/" + jobid + "/" + "Variant.Result.tsv",
-            filename=results_path)
-        urlretrieve("http://cravat.us/CRAVAT/results/" + jobid + "/" + "Variant_Additional_Details.Result.tsv",
-            filename=details_path)
-        return results_path, details_path
-
-if __name__ == "__main__":
-    submission = CravatSubmission()
-    cmd_args = submission.get_cmd_args(sys.argv)
-    # Galaxy converts semi-colons to X's. Switch it back
-    analysis = cmd_args.analysis
-    if analysis == "VESTXCHASM":
-        analysis = "VEST;CHASM"
-    results_path, details_path = submission.submit(cmd_args.input, analysis)
-    #submission.write_results('Results_test.tsv', 'Details_test.tsv', 'Out_test.tsv')
-    submission.write_results(results_path, details_path, cmd_args.output)
\ No newline at end of file
--- a/cravat_submit/cravat_submit.xml	Mon Jul 30 13:32:05 2018 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-<tool id="cravat_submit" name="CRAVAT Submit, Check, and Retrieve" version="0.1.0">
-    <description>Submits, checks for, and retrieves data for cancer annotation</description>
-  <command interpreter="python">cravat_submit.py $input $dropdown $output</command>
-  
-  
-  <inputs>
-  
-    <param format="tabular" name="input" type="data" label="Source file"> </param>
-    <param format="tabular" name="dropdown" type="select" label="Analysis Program">
-      <option value="None">None</option>
-      <option value="VEST">VEST</option>
-      <option value="CHASM">CHASM</option>
-      <option value="VEST;CHASM">VEST and CHASM</option>
-    </param>
-    
-    
-  </inputs>
-  
-  <outputs>
-    <data format="tabular" name="output" />
-  </outputs>
-
-  <tests>
-    <test>
-      <param name="input" value="fa_gc_content_input.fa"/>
-      <output name="out_file1" file="fa_gc_content_output.txt"/>
-    </test>
-  </tests>
-
-  <help>
- This tool submits, checks for, and retrieves data for cancer annotation.
-  </help>
-
-</tool>