Mercurial > repos > galaxyp > map_peptides_to_bed
comparison map_peptides_to_bed.py @ 0:51f8f9041724 draft
planemo upload for repository https://github.com/galaxyproteomics/tools-galaxyp/tree/master/tools/map_peptides_to_bed commit e04ed4b4960d6109a85c1cc68a2bf4931c8751ef-dirty
| author | galaxyp |
|---|---|
| date | Mon, 25 Jan 2016 15:32:49 -0500 |
| parents | |
| children | db90662d26f9 |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:51f8f9041724 |
|---|---|
| 1 #!/usr/bin/env python | |
| 2 """ | |
| 3 # | |
| 4 #------------------------------------------------------------------------------ | |
| 5 # University of Minnesota | |
| 6 # Copyright 2014, Regents of the University of Minnesota | |
| 7 #------------------------------------------------------------------------------ | |
| 8 # Author: | |
| 9 # | |
| 10 # James E Johnson | |
| 11 # | |
| 12 #------------------------------------------------------------------------------ | |
| 13 """ | |
| 14 | |
| 15 """ | |
| 16 Input: list of protein_accessions, peptide_sequence | |
| 17 GFF3 with fasta | |
| 18 Output: GFF3 of peptides | |
| 19 | |
| 20 Filter: Must cross splice boundary | |
| 21 | |
| 22 """ | |
| 23 | |
| 24 import sys,re,os.path | |
| 25 import tempfile | |
| 26 import optparse | |
| 27 from optparse import OptionParser | |
| 28 from Bio.Seq import reverse_complement, transcribe, back_transcribe, translate | |
| 29 | |
| 30 class BedEntry( object ): | |
| 31 def __init__(self, line): | |
| 32 self.line = line | |
| 33 try: | |
| 34 fields = line.rstrip('\r\n').split('\t') | |
| 35 (chrom,chromStart,chromEnd,name,score,strand,thickStart,thickEnd,itemRgb,blockCount,blockSizes,blockStarts) = fields[0:12] | |
| 36 seq = fields[12] if len(fields) > 12 else None | |
| 37 self.chrom = chrom | |
| 38 self.chromStart = int(chromStart) | |
| 39 self.chromEnd = int(chromEnd) | |
| 40 self.name = name | |
| 41 self.score = int(score) | |
| 42 self.strand = strand | |
| 43 self.thickStart = int(thickStart) | |
| 44 self.thickEnd = int(thickEnd) | |
| 45 self.itemRgb = itemRgb | |
| 46 self.blockCount = int(blockCount) | |
| 47 self.blockSizes = [int(x) for x in blockSizes.split(',')] | |
| 48 self.blockStarts = [int(x) for x in blockStarts.split(',')] | |
| 49 self.seq = seq | |
| 50 except Exception, e: | |
| 51 print >> sys.stderr, "Unable to read Bed entry" % e | |
| 52 exit(1) | |
| 53 def __str__(self): | |
| 54 return '%s\t%d\t%d\t%s\t%d\t%s\t%d\t%d\t%s\t%d\t%s\t%s%s' % ( | |
| 55 self.chrom, self.chromStart, self.chromEnd, self.name, self.score, self.strand, self.thickStart, self.thickEnd, self.itemRgb, self.blockCount, | |
| 56 ','.join([str(x) for x in self.blockSizes]), | |
| 57 ','.join([str(x) for x in self.blockStarts]), | |
| 58 '\t%s' % self.seq if self.seq else '') | |
| 59 def get_splice_junctions(self): | |
| 60 splice_juncs = [] | |
| 61 for i in range(self.blockCount - 1): | |
| 62 splice_junc = "%s:%d_%d" % (self.chrom, self.chromStart + self.blockSizes[i], self.chromStart + self.blockStarts[i+1]) | |
| 63 splice_juncs.append(splice_junc) | |
| 64 return splice_juncs | |
| 65 def get_exon_seqs(self): | |
| 66 exons = [] | |
| 67 for i in range(self.blockCount): | |
| 68 # splice_junc = "%s:%d_%d" % (self.chrom, self.chromStart + self.blockSizes[i], self.chromStart + self.blockStarts[i+1]) | |
| 69 exons.append(self.seq[self.blockStarts[i]:self.blockStarts[i] + self.blockSizes[i]]) | |
| 70 if self.strand == '-': #reverse complement | |
| 71 exons.reverse() | |
| 72 for i,s in enumerate(exons): | |
| 73 exons[i] = reverse_complement(s) | |
| 74 return exons | |
| 75 def get_spliced_seq(self): | |
| 76 seq = ''.join(self.get_exon_seqs()) | |
| 77 return seq | |
| 78 def get_translation(self,sequence=None): | |
| 79 translation = None | |
| 80 seq = sequence if sequence else self.get_spliced_seq() | |
| 81 if seq: | |
| 82 seqlen = len(seq) / 3 * 3; | |
| 83 if seqlen >= 3: | |
| 84 translation = translate(seq[:seqlen]) | |
| 85 return translation | |
| 86 def get_translations(self): | |
| 87 translations = [] | |
| 88 seq = self.get_spliced_seq() | |
| 89 if seq: | |
| 90 for i in range(3): | |
| 91 translation = self.get_translation(sequence=seq[i:]) | |
| 92 if translation: | |
| 93 translations.append(translation) | |
| 94 return translations | |
| 95 ## (start,end) | |
| 96 def get_subrange(self,tstart,tstop): | |
| 97 chromStart = self.chromStart | |
| 98 chromEnd = self.chromEnd | |
| 99 r = range(self.blockCount) | |
| 100 if self.strand == '-': | |
| 101 r.reverse() | |
| 102 bStart = 0 | |
| 103 for x in r: | |
| 104 bEnd = bStart + self.blockSizes[x] | |
| 105 ## print >> sys.stderr, "%d chromStart: %d chromEnd: %s bStart: %s bEnd: %d" % (x,chromStart,chromEnd,bStart,bEnd) | |
| 106 if bStart <= tstart < bEnd: | |
| 107 if self.strand == '+': | |
| 108 chromStart = self.chromStart + self.blockStarts[x] + (tstart - bStart) | |
| 109 else: | |
| 110 chromEnd = self.chromStart + self.blockStarts[x] + self.blockSizes[x] - (tstart - bStart) | |
| 111 if bStart <= tstop < bEnd: | |
| 112 if self.strand == '+': | |
| 113 chromEnd = self.chromStart + self.blockStarts[x] + (tstop - bStart) | |
| 114 else: | |
| 115 chromStart = self.chromStart + self.blockStarts[x] + self.blockSizes[x] - (tstop - bStart) | |
| 116 bStart += self.blockSizes[x] | |
| 117 return(chromStart,chromEnd) | |
| 118 #get the blocks for sub range | |
| 119 def get_blocks(self,chromStart,chromEnd): | |
| 120 tblockCount = 0 | |
| 121 tblockSizes = [] | |
| 122 tblockStarts = [] | |
| 123 for x in range(self.blockCount): | |
| 124 bStart = self.chromStart + self.blockStarts[x] | |
| 125 bEnd = bStart + self.blockSizes[x] | |
| 126 if bStart > chromEnd: | |
| 127 break | |
| 128 if bEnd < chromStart: | |
| 129 continue | |
| 130 cStart = max(chromStart,bStart) | |
| 131 tblockStarts.append(cStart - chromStart) | |
| 132 tblockSizes.append(min(chromEnd,bEnd) - cStart) | |
| 133 tblockCount += 1 | |
| 134 print >> sys.stderr, "tblockCount: %d tblockStarts: %s tblockSizes: %s" % (tblockCount,tblockStarts,tblockSizes) | |
| 135 return (tblockCount,tblockSizes,tblockStarts) | |
| 136 | |
| 137 ## [[start,end,seq,blockCount,blockSizes,blockStarts],[start,end,seq,blockCount,blockSizes,blockStarts],[start,end,seq,blockCount,blockSizes,blockStarts]] | |
| 138 ## filter: ignore translation if stop codon in first exon after ignore_left_bp | |
| 139 def get_filterd_translations(self,untrimmed=False,filtering=True,ignore_left_bp=0,ignore_right_bp=0): | |
| 140 translations = [None,None,None,None,None,None] | |
| 141 seq = self.get_spliced_seq() | |
| 142 ignore = (ignore_left_bp if self.strand == '+' else ignore_right_bp) / 3 | |
| 143 block_sum = sum(self.blockSizes) | |
| 144 exon_sizes = self.blockSizes | |
| 145 if self.strand == '-': | |
| 146 exon_sizes.reverse() | |
| 147 splice_sites = [sum(exon_sizes[:x]) / 3 for x in range(1,len(exon_sizes))] | |
| 148 print >> sys.stderr, "splice_sites: %s" % splice_sites | |
| 149 junc = splice_sites[0] if len(splice_sites) > 0 else exon_sizes[0] | |
| 150 if seq: | |
| 151 for i in range(3): | |
| 152 translation = self.get_translation(sequence=seq[i:]) | |
| 153 if translation: | |
| 154 tstart = 0 | |
| 155 tstop = len(translation) | |
| 156 if not untrimmed: | |
| 157 tstart = translation.rfind('*',0,junc) + 1 | |
| 158 stop = translation.find('*',junc) | |
| 159 tstop = stop if stop >= 0 else len(translation) | |
| 160 if filtering and tstart > ignore: | |
| 161 continue | |
| 162 trimmed = translation[tstart:tstop] | |
| 163 #get genomic locations for start and end | |
| 164 offset = (block_sum - i) % 3 | |
| 165 print >> sys.stderr, "tstart: %d tstop: %d offset: %d" % (tstart,tstop,offset) | |
| 166 if self.strand == '+': | |
| 167 chromStart = self.chromStart + i + (tstart * 3) | |
| 168 chromEnd = self.chromEnd - offset - (len(translation) - tstop) * 3 | |
| 169 else: | |
| 170 chromStart = self.chromStart + offset + (len(translation) - tstop) * 3 | |
| 171 chromEnd = self.chromEnd - i - (tstart * 3) | |
| 172 #get the blocks for this translation | |
| 173 tblockCount = 0 | |
| 174 tblockSizes = [] | |
| 175 tblockStarts = [] | |
| 176 for x in range(self.blockCount): | |
| 177 bStart = self.chromStart + self.blockStarts[x] | |
| 178 bEnd = bStart + self.blockSizes[x] | |
| 179 if bStart > chromEnd: | |
| 180 break | |
| 181 if bEnd < chromStart: | |
| 182 continue | |
| 183 cStart = max(chromStart,bStart) | |
| 184 tblockStarts.append(cStart - chromStart) | |
| 185 tblockSizes.append(min(chromEnd,bEnd) - cStart) | |
| 186 tblockCount += 1 | |
| 187 print >> sys.stderr, "tblockCount: %d tblockStarts: %s tblockSizes: %s" % (tblockCount,tblockStarts,tblockSizes) | |
| 188 translations[i] = [chromStart,chromEnd,trimmed,tblockCount,tblockSizes,tblockStarts] | |
| 189 return translations | |
| 190 def get_seq_id(self,seqtype='unk:unk',reference='',frame=None): | |
| 191 ## Ensembl fasta ID format | |
| 192 # >ID SEQTYPE:STATUS LOCATION GENE TRANSCRIPT | |
| 193 # >ENSP00000328693 pep:splice chromosome:NCBI35:1:904515:910768:1 gene:ENSG00000158815:transcript:ENST00000328693 gene_biotype:protein_coding transcript_biotype:protein_coding | |
| 194 frame_name = '' | |
| 195 chromStart = self.chromStart | |
| 196 chromEnd = self.chromEnd | |
| 197 strand = 1 if self.strand == '+' else -1 | |
| 198 if frame != None: | |
| 199 block_sum = sum(self.blockSizes) | |
| 200 offset = (block_sum - frame) % 3 | |
| 201 frame_name = '_' + str(frame + 1) | |
| 202 if self.strand == '+': | |
| 203 chromStart += frame | |
| 204 chromEnd -= offset | |
| 205 else: | |
| 206 chromStart += offset | |
| 207 chromEnd -= frame | |
| 208 location = "chromosome:%s:%s:%s:%s:%s" % (reference,self.chrom,chromStart,chromEnd,strand) | |
| 209 seq_id = "%s%s %s %s" % (self.name,frame_name,seqtype,location) | |
| 210 return seq_id | |
| 211 def get_line(self, start_offset = 0, end_offset = 0): | |
| 212 if start_offset or end_offset: | |
| 213 s_offset = start_offset if start_offset else 0 | |
| 214 e_offset = end_offset if end_offset else 0 | |
| 215 if s_offset > self.chromStart: | |
| 216 s_offset = self.chromStart | |
| 217 chrStart = self.chromStart - s_offset | |
| 218 chrEnd = self.chromEnd + e_offset | |
| 219 blkSizes = self.blockSizes | |
| 220 blkSizes[0] += s_offset | |
| 221 blkSizes[-1] += e_offset | |
| 222 blkStarts = self.blockStarts | |
| 223 for i in range(1,self.blockCount): | |
| 224 blkStarts[i] += s_offset | |
| 225 items = [str(x) for x in [self.chrom,chrStart,chrEnd,self.name,self.score,self.strand,self.thickStart,self.thickEnd,self.itemRgb,self.blockCount,','.join([str(x) for x in blkSizes]),','.join([str(x) for x in blkStarts])]] | |
| 226 return '\t'.join(items) + '\n' | |
| 227 return self.line | |
| 228 | |
| 229 def __main__(): | |
| 230 #Parse Command Line | |
| 231 parser = optparse.OptionParser() | |
| 232 parser.add_option( '-t', '--translated_bed', dest='translated_bed', default=None, help='A bed file with added 13th column having a translation' ) | |
| 233 parser.add_option( '-i', '--input', dest='input', default=None, help='Tabular file with peptide_sequence column' ) | |
| 234 parser.add_option( '-p', '--peptide_column', type='int', dest='peptide_column', default=1, help='column ordinal with peptide sequence' ) | |
| 235 parser.add_option( '-n', '--name_column', type='int', dest='name_column', default=None, help='column ordinal with protein name' ) | |
| 236 parser.add_option( '-s', '--start_column', type='int', dest='start_column', default=None, help='column with peptide start position in protein' ) | |
| 237 parser.add_option( '-B', '--bed', dest='bed', default=None, help='Output a bed file with added 13th column having translation' ) | |
| 238 ## parser.add_option( '-G', '--gff3', dest='gff', default=None, help='Output translations to a GFF3 file' ) | |
| 239 ## parser.add_option( '-f', '--fasta', dest='fasta', default=None, help='Protein fasta' ) | |
| 240 parser.add_option( '-T', '--gffTags', dest='gffTags', action='store_true', default=False, help='Add #gffTags to bed output for IGV' ) | |
| 241 parser.add_option( '-d', '--debug', dest='debug', action='store_true', default=False, help='Turn on wrapper debugging to stderr' ) | |
| 242 (options, args) = parser.parse_args() | |
| 243 # Input files | |
| 244 if options.input != None: | |
| 245 try: | |
| 246 inputPath = os.path.abspath(options.input) | |
| 247 inputFile = open(inputPath, 'r') | |
| 248 except Exception, e: | |
| 249 print >> sys.stderr, "failed: %s" % e | |
| 250 exit(2) | |
| 251 else: | |
| 252 inputFile = sys.stdin | |
| 253 inputBed = None | |
| 254 if options.translated_bed != None: | |
| 255 inputBed = open(os.path.abspath(options.translated_bed),'r') | |
| 256 peptide_column = options.peptide_column - 1 | |
| 257 name_column = options.name_column - 1 if options.name_column else None | |
| 258 start_column = options.start_column - 1 if options.start_column else None | |
| 259 # Read in peptides | |
| 260 # peps[prot_name] = [seq] | |
| 261 prot_peps = dict() | |
| 262 unassigned_peps = set() | |
| 263 try: | |
| 264 for i, line in enumerate( inputFile ): | |
| 265 ## print >> sys.stderr, "%3d\t%s" % (i,line) | |
| 266 if line.startswith('#'): | |
| 267 continue | |
| 268 fields = line.rstrip('\r\n').split('\t') | |
| 269 ## print >> sys.stderr, "%3d\t%s" % (i,fields) | |
| 270 if peptide_column < len(fields): | |
| 271 peptide = fields[peptide_column] | |
| 272 prot_name = fields[name_column] if name_column is not None and name_column < len(fields) else None | |
| 273 if prot_name: | |
| 274 offset = fields[start_column] if start_column is not None and start_column < len(fields) else -1 | |
| 275 if prot_name not in prot_peps: | |
| 276 prot_peps[prot_name] = dict() | |
| 277 prot_peps[prot_name][peptide] = offset | |
| 278 else: | |
| 279 unassigned_peps.add(peptide) | |
| 280 if options.debug: | |
| 281 print >> sys.stderr, "prot_peps: %s" % prot_peps | |
| 282 print >> sys.stderr, "unassigned_peps: %s" % unassigned_peps | |
| 283 except Exception, e: | |
| 284 print >> sys.stderr, "failed: Error reading %s - %s" % (options.input if options.input else 'stdin',e) | |
| 285 exit(1) | |
| 286 # Output files | |
| 287 bed_fh = None | |
| 288 ## gff_fh = None | |
| 289 ## gff_fa_file = None | |
| 290 gff_fa = None | |
| 291 outFile = None | |
| 292 if options.bed: | |
| 293 bed_fh = open(options.bed,'w') | |
| 294 bed_fh.write('track name="%s" type=bedDetail description="%s" \n' % ('novel_junction_peptides','test')) | |
| 295 if options.gffTags: | |
| 296 bed_fh.write('#gffTags\n') | |
| 297 ## if options.gff: | |
| 298 ## gff_fh = open(options.gff,'w') | |
| 299 ## gff_fh.write("##gff-version 3.2.1\n") | |
| 300 ## if options.reference: | |
| 301 ## gff_fh.write("##genome-build %s %s\n" % (options.refsource if options.refsource else 'unknown', options.reference)) | |
| 302 try: | |
| 303 for i, line in enumerate( inputBed ): | |
| 304 ## print >> sys.stderr, "%3d:\t%s" % (i,line) | |
| 305 if line.startswith('track'): | |
| 306 continue | |
| 307 entry = BedEntry(line) | |
| 308 if entry.name in prot_peps: | |
| 309 for (peptide,offset) in prot_peps[entry.name].iteritems(): | |
| 310 if offset < 0: | |
| 311 offset = entry.seq.find(peptide) | |
| 312 if options.debug: | |
| 313 print >> sys.stderr, "%s\t%s\t%d\t%s\n" % (entry.name, peptide,offset,entry.seq) | |
| 314 if offset >= 0: | |
| 315 tstart = offset * 3 | |
| 316 tstop = tstart + len(peptide) * 3 | |
| 317 if options.debug: | |
| 318 print >> sys.stderr, "%d\t%d\t%d" % (offset,tstart,tstop) | |
| 319 (pepStart,pepEnd) = entry.get_subrange(tstart,tstop) | |
| 320 if options.debug: | |
| 321 print >> sys.stderr, "%d\t%d\t%d" % (offset,pepStart,pepEnd) | |
| 322 if bed_fh: | |
| 323 entry.thickStart = pepStart | |
| 324 entry.thickEnd = pepEnd | |
| 325 bedfields = str(entry).split('\t') | |
| 326 if options.gffTags: | |
| 327 bedfields[3] = "ID=%s;Name=%s" % (entry.name,peptide) | |
| 328 bed_fh.write("%s\t%s\t%s\n" % ('\t'.join(bedfields[:12]),peptide,entry.seq)) | |
| 329 except Exception, e: | |
| 330 print >> sys.stderr, "failed: Error reading %s - %s" % (options.input if options.input else 'stdin',e) | |
| 331 | |
| 332 if __name__ == "__main__" : __main__() | |
| 333 |
