comparison PsiCLASS-1.0.2/SubexonInfo.cpp @ 0:903fc43d6227 draft default tip

Uploaded
author lsong10
date Fri, 26 Mar 2021 16:52:45 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:903fc43d6227
1 // report the total depth for the segment
2 // Format: ./a.out alignments.bam splice_site
3 #include <stdio.h>
4 #include <string.h>
5 #include <stdlib.h>
6
7 #define __STDC_FORMAT_MACROS
8 #include <inttypes.h>
9
10 #include <algorithm>
11 #include <vector>
12 #include <math.h>
13
14 #include "alignments.hpp"
15 #include "blocks.hpp"
16 #include "stats.hpp"
17
18 #define ABS(x) ((x)<0?-(x):(x))
19
20 char usage[] = "./subexon-info alignment.bam intron.splice [options]\n"
21 "options:\n"
22 "\t--minDepth INT: the minimum coverage depth considered as part of a subexon (default: 2)\n"
23 "\t--noStats: do not compute the statistical scores (default: not used)\n" ;
24 char buffer[4096] ;
25
26 int gMinDepth ;
27
28 bool CompSplitSite( struct _splitSite a, struct _splitSite b )
29 {
30 if ( a.chrId < b.chrId )
31 return true ;
32 else if ( a.chrId > b.chrId )
33 return false ;
34 else if ( a.pos != b.pos )
35 return a.pos < b.pos ;
36 else if ( a.type != b.type )
37 return a.type < b.type ; // We want the start of exons comes first, since we are scanning the genome from left to right,
38 // so we can terminate the extension early and create a single-base subexon later.
39 else
40 return a.oppositePos < b.oppositePos ;
41 }
42
43 bool CompBlocksByAvgDepth( struct _block a, struct _block b )
44 {
45 double avgA = a.depthSum / (double)( a.end - a.start + 1 ) ;
46 double avgB = b.depthSum / (double)( b.end - b.start + 1 ) ;
47 return avgA < avgB ;
48 }
49
50 bool CompBlocksByRatio( struct _block a, struct _block b )
51 {
52 return a.ratio < b.ratio ;
53 }
54
55 int CompDouble( const void *p1, const void *p2 )
56 {
57 double a = *(double *)p1 ;
58 double b = *(double *)p2 ;
59 if ( a > b )
60 return 1 ;
61 else if ( a < b )
62 return -1 ;
63 else
64 return 0 ;
65 }
66
67 // Clean up the split sites;
68 void FilterAndSortSplitSites( std::vector<struct _splitSite> &sites )
69 {
70 std::sort( sites.begin(), sites.end(), CompSplitSite ) ;
71 int i, j, k, l ;
72 int size = sites.size() ;
73
74 for ( i = 0 ; i < size ; )
75 {
76 for ( j = i + 1 ; j < size ; ++j )
77 if ( sites[j].chrId != sites[i].chrId || sites[j].pos != sites[i].pos || sites[j].type != sites[i].type )
78 break ;
79 int maxSupport = 0 ;
80 for ( k = i ; k < j ; ++k )
81 if ( sites[k].support > maxSupport )
82 maxSupport = sites[k].support ;
83
84 int strandCnt[2] = {0, 0} ;
85 char strand = sites[i].strand ;
86 for ( k = i ; k < j ; ++k )
87 {
88 if ( sites[k].strand == '-' )
89 strandCnt[0] += sites[k].support ;
90 else if ( sites[k].strand == '+' )
91 strandCnt[1] += sites[k].support ;
92
93 }
94 if ( strandCnt[0] > strandCnt[1] )
95 strand = '-' ;
96 else if ( strandCnt[1] > strandCnt[0] )
97 strand = '+' ;
98
99 bool allOneExceptMax = false ;
100 if ( maxSupport >= 20 )
101 {
102 allOneExceptMax = true ;
103 for ( k = i ; k < j ; ++k )
104 {
105 if ( sites[k].support == maxSupport )
106 continue ;
107 if ( sites[k].support > 1 )
108 {
109 allOneExceptMax = false ;
110 break ;
111 }
112 }
113 }
114
115 for ( k = i ; k < j ; ++k )
116 {
117 if ( ( sites[k].support < 0.01 * maxSupport && sites[k].support <= 3 )
118 || sites[k].support < 0.001 * maxSupport || sites[k].strand != strand // The introns from the different strand are filtered.
119 || ( sites[k].support < 0.02 * maxSupport && sites[k].mismatchSum >= 2 * sites[k].support )
120 || ( allOneExceptMax && sites[k].support == 1 )
121 || ( sites[k].support <= 2 && sites[k].mismatchSum >= 2 * sites[k].support )
122 || ( maxSupport >= 2 && sites[k].support == 1 && ( ABS( sites[k].oppositePos - sites[k].pos - 1 ) >= 10000 || sites[k].mismatchSum != 0 ) ) )
123 {
124 for ( l = i - 1 ; l >= 0 && sites[l].chrId == sites[i].chrId && sites[l].pos >= sites[k].oppositePos ; --l )
125 {
126 if ( sites[l].pos == sites[k].oppositePos && sites[l].oppositePos == sites[k].pos )
127 {
128 sites[l].support = -1 ;
129 sites[k].support = -1 ;
130 break ;
131 }
132 }
133
134 for ( l = j ; l < size && sites[l].chrId == sites[i].chrId && sites[l].pos <= sites[k].oppositePos ; ++l )
135 {
136 if ( sites[l].pos == sites[k].oppositePos && sites[l].oppositePos == sites[k].pos )
137 {
138 sites[l].support = -1 ;
139 sites[k].support = -1 ;
140 break ;
141 }
142 }
143 }
144 /*else if ( sites[k].support <= 1 && sites[k].oppositePos - sites[k].pos + 1 >= 30000 )
145 {
146 for ( l = j ; l < size && sites[l].chrId == sites[i].chrId && sites[l].pos <= sites[k].oppositePos ; ++l )
147 {
148 if ( sites[l].pos == sites[k].oppositePos && sites[l].oppositePos == sites[k].pos )
149 {
150 if ( l - j >= 20 )
151 {
152 sites[l].support = -1 ;
153 sites[k].support = -1 ;
154 break ;
155 }
156 }
157 }
158
159 }*/
160 }
161 i = j ;
162 }
163
164 k = 0 ;
165 for ( i = 0 ; i < size ; ++i )
166 {
167 if ( sites[i].support > 0 )
168 {
169 sites[k] = sites[i] ;
170 if ( sites[k].strand == '?' )
171 sites[k].strand = '.' ;
172 ++k ;
173 }
174 }
175 sites.resize( k ) ;
176 }
177
178 // Remove the same sites.
179 void KeepUniqSplitSites( std::vector< struct _splitSite> &sites )
180 {
181 int i, j ;
182 int size = sites.size() ;
183 int k = 0 ;
184 for ( i = 0 ; i < size ; )
185 {
186 for ( j = i + 1 ; j < size ; ++j )
187 if ( sites[j].chrId != sites[i].chrId || sites[j].pos != sites[i].pos || sites[j].type != sites[i].type )
188 break ;
189 sites[k] = sites[i] ;
190 ++k ;
191 /*else
192 {
193 if ( sites[i].type != sites[k-1].type )
194 {
195 printf( "%d\n", sites[i].pos ) ;
196 }
197 }*/
198 i = j ;
199 }
200 sites.resize( k ) ;
201
202 // For the sites that corresponds to the start of an exon, we remove the adjust to it and
203 /*for ( i = 1 ; i < size ; ++i )
204 {
205 if ( sites[i].pos == sites[i - 1].pos && sites[i - 1].type == 2 )
206 {
207 if ( sites[i].type == 1 )
208 ++sites[i].pos ;
209 }
210 }*/
211 }
212
213 // Filter split sites that are extremely close to each other but on different strand.
214 void FilterNearSplitSites( std::vector< struct _splitSite> &sites )
215 {
216 int i, j ;
217 int size = sites.size() ;
218 int k = 0 ;
219 for ( i = 0 ; i < size - 1 ; ++i )
220 {
221 if ( sites[i].support < 0 || sites[i].type != sites[i + 1].type || sites[i].chrId != sites[i + 1].chrId )
222 continue ;
223 if ( sites[i + 1].pos - sites[i].pos <= 7 &&
224 ( sites[i + 1].strand != sites[i].strand ||
225 sites[i].strand == '?' ) )
226 {
227 int tag = i ;
228 if ( sites[i + 1].support < sites[i].support )
229 tag = i + 1 ;
230 sites[tag].support = -1 ;
231 int direction ;
232 if ( sites[tag].oppositePos < sites[tag].pos )
233 direction = -1 ;
234 else
235 direction = 1 ;
236
237 for ( j = tag ; j >= 0 && j < size ; j += direction )
238 if ( sites[j].pos == sites[tag].oppositePos && sites[j].oppositePos == sites[tag].pos )
239 {
240 sites[j].support = -1 ;
241 break ;
242 }
243 }
244 }
245
246 for ( i = 0 ; i < size ; ++i )
247 {
248 if ( sites[i].support > 0 )
249 {
250 sites[k] = sites[i] ;
251 ++k ;
252 }
253 }
254 sites.resize( k ) ;
255 }
256
257 void FilterRepeatSplitSites( std::vector<struct _splitSite> &sites )
258 {
259 int i, j ;
260 int size = sites.size() ;
261 int k = 0 ;
262 for ( i = 0 ; i < size ; )
263 {
264 for ( j = i + 1 ; j < size ; ++j )
265 {
266 if ( sites[j].pos != sites[i].pos || sites[j].type != sites[i].type || sites[i].chrId != sites[j].chrId )
267 break ;
268 }
269 int max = -1 ;
270 int maxtag = 0 ;
271 for ( k = i ; k < j ; ++k )
272 {
273 if ( sites[k].uniqSupport > max )
274 {
275 max = sites[k].uniqSupport ;
276 maxtag = k ;
277 }
278 else if ( sites[k].uniqSupport == max && sites[k].support > sites[maxtag].support )
279 {
280 maxtag = k ;
281 }
282 }
283
284 if ( max > -1 )
285 {
286 if ( sites[maxtag].uniqSupport > sites[maxtag].support * 0.1 )
287 {
288 for ( k = i ; k < j ; ++k )
289 if ( sites[k].uniqSupport < 0.05 * sites[k].support )
290 {
291 sites[k].support = -1 ;
292
293 int direction ;
294 if ( sites[k].oppositePos < sites[k].pos )
295 direction = -1 ;
296 else
297 direction = 1 ;
298 int l ;
299 for ( l = k ; l >= 0 && l < size ; l += direction )
300 if ( sites[l].pos == sites[k].oppositePos && sites[l].oppositePos == sites[k].pos )
301 {
302 sites[l].support = -1 ;
303 break ;
304 }
305 }
306 }
307 else
308 {
309 for ( k = i ; k < j ; ++k )
310 {
311 if ( sites[k].support <= 10 )
312 {
313 sites[k].support = -1 ;
314
315 int direction ;
316 if ( sites[k].oppositePos < sites[k].pos )
317 direction = -1 ;
318 else
319 direction = 1 ;
320 int l ;
321 for ( l = k ; l >= 0 && l < size ; l += direction )
322 if ( sites[l].pos == sites[k].oppositePos && sites[l].oppositePos == sites[k].pos )
323 {
324 sites[l].support = -1 ;
325 break ;
326 }
327 }
328 }
329 }
330 }
331
332 i = j ;
333 }
334
335 k = 0 ;
336 for ( i = 0 ; i < size ; ++i )
337 {
338 if ( sites[i].support > 0 )
339 {
340 sites[k] = sites[i] ;
341 ++k ;
342 }
343 }
344 sites.resize( k ) ;
345
346 }
347
348
349 // When k=1, the gamma distribution becomes exponential distribution, and can be optimized analytically..
350 // Maximize: sum_i z_i( log( 1/theta e^{-x_i / theta} )
351 /*double ThetaOfExponentialDistribution( double *x, double *z, int n )
352 {
353 double sumZ = 0;
354 double sumZX = 0 ;
355 for ( i = 0 ; i < n ; ++i )
356 {
357 sumZ += z[i] ;
358 sumZX += z[i] * x[i] ;
359 }
360 }*/
361
362 // for boundK, if it is positive, it represent the upper bound. If it is negative, -boundK will be the lower bound for k.
363 // if boundK==0, there is no extra bound.
364 // The same logic for boundProduct, which bounds k*theta
365 void GradientDescentGammaDistribution( double &k, double &theta, double initK, double initTheta, double lowerBoundK, double upperBoundK,
366 double lowerBoundMean, double upperBoundMean, double *x, double *z, int n )
367 {
368 int i ;
369 k = initK ;
370 theta = initTheta ;
371 double c = 0.5 ;
372 int iterCnt = 1 ;
373
374 double sumZ = 0 ;
375 double sumZX = 0 ;
376 double sumZLogX = 0 ;
377 double Hessian[2][2] ; // 0 for k, 1 for theta
378 double inverseHessian[2][2] ;
379 int tmp ;
380
381 double positiveKRecord = -5, positiveThetaRecord = -5 ; // record the value of k, theta when theta, k becomes non-positive.
382
383 for ( i = 0 ; i < n ; ++i )
384 {
385 sumZ += z[i] ;
386 sumZX += z[i] * x[i] ;
387 sumZLogX += z[i] * log( x[i] ) ;
388 }
389
390 while ( 1 )
391 {
392 double gradK = 0 ;
393 double gradTheta = 0 ;
394
395 double prevK = k ;
396 double prevTheta = theta ;
397 double digammaK = digammal( k ) ;
398
399 gradK = sumZ * ( -log( theta ) - digammaK ) + sumZLogX ;
400 gradTheta = -sumZ * ( k / theta ) + sumZX / ( theta * theta ) ;
401
402 Hessian[0][0] = -sumZ * trigamma( k, &tmp ) ;
403 Hessian[0][1] = -sumZ / theta ; // \partial l / ( \partial k \partial theta)
404 Hessian[1][0] = -sumZ / theta ;
405 Hessian[1][1] = sumZ * k / ( theta * theta ) - 2 * sumZX / ( theta * theta * theta ) ;
406
407 double det = Hessian[0][0] * Hessian[1][1] - Hessian[0][1] * Hessian[1][0] ;
408 /*printf( "%s iter %d:\n", __func__, iterCnt ) ;
409 printf( "%lf %lf %lf %lf\n", sumZ, k, theta, sumZX ) ;
410 printf( "%lf %lf %lf\n", gradK, gradTheta, det ) ;
411 printf( "%lf %lf %lf %lf\n", Hessian[0][0], Hessian[0][1], Hessian[1][0], Hessian[1][1] ) ;*/
412 if ( det <= 1e-4 && det >=-1e-4 )
413 {
414 k = k + c / iterCnt * gradK ;
415 theta = theta + c / iterCnt * gradTheta ;
416 }
417 else
418 {
419 inverseHessian[0][0] = Hessian[1][1] / det ;
420 inverseHessian[0][1] = -Hessian[0][1] / det ;
421 inverseHessian[1][0] = -Hessian[1][0] / det ;
422 inverseHessian[1][1] = Hessian[0][0] / det ;
423 //printf( "%lf %lf %lf %lf: %lf\n=====\n", inverseHessian[0][0], inverseHessian[0][1], inverseHessian[1][0], inverseHessian[1][1],
424 // Hessian[1][0] * inverseHessian[0][1] + Hessian[1][1] * inverseHessian[1][1] ) ;
425 double step = 0.5 ;
426 k = k - step * ( inverseHessian[0][0] * gradK + inverseHessian[0][1] * gradTheta ) ;
427 theta = theta - step * ( inverseHessian[1][0] * gradK + inverseHessian[1][1] * gradTheta ) ;
428
429 bool flag = false ;
430 if ( k <= 1e-6 )
431 {
432 step = ( prevK - 1e-6 ) / ( inverseHessian[0][0] * gradK + inverseHessian[0][1] * gradTheta ) ;
433
434 if ( ABS( theta - positiveThetaRecord ) < 1e-5 )
435 flag = true ;
436 positiveThetaRecord = theta ;
437 }
438 if ( theta <= 1e-6 )
439 {
440 double tmp = ( prevTheta - 1e-6 ) / ( inverseHessian[1][0] * gradK + inverseHessian[1][1] * gradTheta ) ;
441 if ( tmp < step )
442 step = tmp ;
443
444 if ( ABS( k - positiveKRecord ) < 1e-5 )
445 flag = true ;
446 positiveKRecord = k ;
447 }
448
449 if ( step != 0.5 )
450 {
451 k = prevK - step * ( inverseHessian[0][0] * gradK + inverseHessian[0][1] * gradTheta ) ;
452 theta = prevTheta - step * ( inverseHessian[1][0] * gradK + inverseHessian[1][1] * gradTheta ) ;
453 }
454
455 /*if ( flag )
456 {
457 k = prevK ;
458 theta = prevTheta ;
459 break ;
460 }*/
461 }
462
463 if ( upperBoundK > 0 && k > upperBoundK )
464 k = upperBoundK ;
465 else if ( lowerBoundK > 0 && k < lowerBoundK )
466 k = lowerBoundK ;
467
468 if ( upperBoundMean > 0 && k * theta > upperBoundMean )
469 {
470 theta = upperBoundMean / k ;
471 }
472 else if ( lowerBoundMean > 0 && k * theta < lowerBoundMean )
473 {
474 theta = lowerBoundMean / k ;
475 }
476
477 if ( k <= 1e-6 )
478 {
479 k = 1e-6 ;
480 }
481
482 if ( theta <= 1e-6 )
483 {
484 theta = 1e-6 ;
485 }
486
487 double diff = ABS( prevK - k ) + ABS( prevTheta - theta ) ;
488 if ( diff < 1e-5 )
489 break ;
490
491 ++iterCnt ;
492 //if ( det <= 1e-4 && det >=-1e-4 && k >= 5000 ) //&& diff < 1 )
493 if ( k >= 10000 ) //&& diff < 1 )
494 {
495 k = prevK ;
496 theta = prevTheta ;
497 break ;
498 }
499 if ( iterCnt == 1000 )
500 break ;
501 }
502 }
503
504 double MixtureGammaEM( double *x, int n, double &pi, double *k, double *theta, int tries, double meanBound[2], int iter = 1000 )
505 {
506 int i ;
507 double *z = new double[n] ; // the expectation that it assigned to model 0.
508 double *oneMinusZ = new double[n] ;
509 int t = 0 ;
510 double history[5] = {-1, -1, -1, -1, -1} ;
511 double maxX = -1 ;
512 double sumX = 0 ;
513 if ( n <= 0 )
514 return 0 ;
515
516 for ( i = 0 ; i < n ; ++i )
517 {
518 sumX += x[i] ;
519 if ( x[i] > maxX )
520 maxX = x[i] ;
521 }
522 if ( maxX > meanBound[1] && meanBound[1] >= 0 )
523 maxX = meanBound[1] ;
524
525 /*if ( meanBound[1] == -1 )
526 {
527 // The EM for coverage
528 maxX = 10.0 ;
529 }*/
530
531 while ( 1 )
532 {
533 double npi, nk[2], ntheta[2] ;
534 double sum = 0 ;
535 for ( i = 0 ; i < n ; ++i )
536 {
537 //double lf0 = -k[0] * log( theta[0] ) + ( k[0] - 1 ) * log( cov[i]) - cov[i] / theta[0] - lgamma( k[0] );
538 //double lf1 = -k[1] * log( theta[1] ) + ( k[1] - 1 ) * log( cov[i]) - cov[i] / theta[1] - lgamma( k[1] );
539 //z[i] = exp( lf0 + log( pi ) ) / ( exp( lf0 + log( pi ) ) + exp( lf1 + log( 1 - pi ) ) ) ;
540 if ( pi != 0 )
541 z[i] = MixtureGammaAssignment( x[i], pi, k, theta ) ;
542 else
543 z[i] = 0 ;
544 /*if ( isnan( z[i] ) )
545 {
546 printf( "nan: %lf %lf %lf %lf\n", x[i], pi, k, theta ) ;
547 }*/
548 oneMinusZ[i] = 1 - z[i] ;
549 sum += z[i] ;
550 }
551
552 // compute new pi.
553 npi = sum / n ;
554
555 // Use gradient descent to compute new k and theta.
556 if ( 1 ) //pi > 0 )
557 {
558 double bound ;
559 if ( meanBound[1] != -1 ) // the EM for ratio
560 {
561 bound = ( theta[1] * k[1] > 1 ) ? 1 : ( theta[1] * k[1] ) / ( 1 + tries );
562 GradientDescentGammaDistribution( nk[0], ntheta[0], k[0], theta[0], k[1], -1, -1, bound, x, z, n ) ; // It seems setting an upper bound 1 for k[0] is not a good idea.
563 }
564 else
565 {
566 bound = ( theta[1] * k[1] > 1 ) ? 1 : ( theta[1] * k[1] ) / ( 1 + tries ) ;
567 GradientDescentGammaDistribution( nk[0], ntheta[0], k[0], theta[0], k[1], -1, meanBound[0], bound, x, z, n ) ; // It seems setting an upper bound 1 for k[0] is not a good idea.
568 }
569 GradientDescentGammaDistribution( nk[1], ntheta[1], k[1], theta[1], -1, k[0], theta[0] * k[0], maxX, x, oneMinusZ, n ) ;
570 }
571 else
572 {
573 GradientDescentGammaDistribution( nk[1], ntheta[1], k[1], theta[1], 0, 0, 0, 0, x, oneMinusZ, n ) ;
574 }
575
576 double diff ;
577 if ( isnan( npi ) || isnan( nk[0] ) || isnan( nk[1] ) || isnan( ntheta[0] ) || isnan( ntheta[1] ) )
578 {
579 delete[] z ;
580 delete[] oneMinusZ ;
581 return -1 ;
582 }
583 diff = ABS( nk[0] - k[0] ) + ABS( nk[1] - k[1] )
584 + ABS( ntheta[0] - theta[0] ) + ABS( ntheta[1] - theta[1] ) ; // pi is fully determined by these 4 parameters.
585 if ( diff < 1e-4 )
586 break ;
587 diff = ABS( nk[0] - history[1] ) + ABS( nk[1] - history[2] )
588 + ABS( ntheta[0] - history[3] ) + ABS( ntheta[1] - history[4] ) ; // pi is fully determined by these 4 parameters.
589 if ( diff < 1e-4 )
590 break ;
591
592 history[0] = pi ;
593 history[1] = k[0] ;
594 history[2] = k[1] ;
595 history[3] = theta[0] ;
596 history[4] = theta[1] ;
597
598 pi = npi ;
599 k[0] = nk[0] ;
600 k[1] = nk[1] ;
601 theta[0] = ntheta[0] ;
602 theta[1] = ntheta[1] ;
603
604 /*double logLikelihood = 0 ;
605 for ( i = 0 ; i < n ; ++i )
606 logLikelihood += log( pi * exp( LogGammaDensity( x[i], k[0], theta[0]) ) +
607 (1 - pi ) * exp( LogGammaDensity( x[i], k[1], theta[1] ) ) ) ;*/
608
609 //printf( "%d: %lf %lf %lf %lf %lf\n", t, pi, k[0], theta[0], k[1], theta[1] ) ;
610
611 ++t ;
612 if ( iter != -1 && t >= iter )
613 break ;
614 }
615 delete[] z ;
616 delete[] oneMinusZ ;
617 return 0 ;
618 }
619
620 bool IsParametersTheSame( double *k, double *theta )
621 {
622 if ( ABS( k[0] - k[1] ) < 1e-2 && ABS( theta[0] - theta[1] ) < 1e-2 )
623 return true ;
624 return false ;
625 }
626
627 int RatioAndCovEM( double *covRatio, double *cov, int n, double &piRatio, double kRatio[2],
628 double thetaRatio[2], double &piCov, double kCov[2], double thetaCov[2] )
629 {
630 int i ;
631 piRatio = 0.6 ; // mixture coefficient for model 0 and 1
632 kRatio[0] = 0.9 ;
633 kRatio[1] = 0.45 ;
634 thetaRatio[0] = 0.05 ;
635 thetaRatio[1] = 1 ;
636 double meanBound[2] = {-1, 1} ; // [0] is for the lower bound of the noise model, [1] is for the upper bound of the true model
637
638 /*double *filteredCovRatio = new double[n] ;// ignore the ratio that is greater than 5.
639 int m = 0 ;
640 for ( i = 0 ; i < n ; ++i )
641 if ( covRatio[i] < 1.0 )
642 {
643 filteredCovRatio[m] = covRatio[i] ;
644 ++m ;
645 }*/
646 srand( 17 ) ;
647 int maxTries = 10 ;
648 int t = 0 ;
649 double *buffer = new double[n] ;
650 for ( i = 0 ; i < n ; ++i )
651 buffer[i] = covRatio[i] ;
652 qsort( buffer, n, sizeof( double ), CompDouble ) ;
653 //covRatio = buffer ;
654 while ( 1 )
655 {
656 //printf( "EM\n" ) ;
657 MixtureGammaEM( covRatio, n, piRatio, kRatio, thetaRatio, t, meanBound ) ;
658 //printf( "%lf %lf %lf %lf %lf\n", piRatio, kRatio[0], kRatio[1], thetaRatio[0], thetaRatio[1] ) ;
659 if ( piRatio > 0.999 || piRatio < 0.001 || IsParametersTheSame( kRatio, thetaRatio ) )
660 {
661 ++t ;
662 if ( t > maxTries )
663 break ;
664 piRatio = 0.6 ;
665 kRatio[0] += ( ( rand() * 0.5 - RAND_MAX ) / (double)RAND_MAX * 0.1 ) ;
666 if ( kRatio[0] <= 0 )
667 kRatio[0] = 0.9 ;
668 kRatio[1] += ( ( rand() * 0.5 - RAND_MAX ) / (double)RAND_MAX * 0.1 ) ;
669 if ( kRatio[1] <= 0 )
670 kRatio[1] = 0.45 ;
671 thetaRatio[0] += ( ( rand() * 0.5 - RAND_MAX ) / (double)RAND_MAX * 0.1 ) ;
672 if ( thetaRatio[0] <= 0 )
673 thetaRatio[0] = 0.05 ;
674 thetaRatio[1] += ( ( rand() * 0.5 - RAND_MAX ) / (double)RAND_MAX * 0.1 ) ;
675 if ( thetaRatio[1] <= 0 )
676 thetaRatio[1] = 1 ;
677 if ( kRatio[0] < kRatio[1] )
678 {
679 if ( rand() & 1 )
680 kRatio[0] = kRatio[1] ;
681 else
682 kRatio[1] = kRatio[0] ;
683 }
684 if ( kRatio[0] * thetaRatio[0] > kRatio[1] * thetaRatio[1] )
685 {
686 thetaRatio[0] = kRatio[1] * thetaRatio[1] / kRatio[0] ;
687 }
688 //printf( "%lf %lf %lf %lf %lf\n", piRatio, kRatio[0], kRatio[1], thetaRatio[0], thetaRatio[1] ) ;
689
690 continue ;
691 }
692
693 break ;
694 }
695 //delete[] filteredCovRatio ;
696 if ( t > maxTries && piRatio > 0.999 )
697 {
698 /*piRatio = 0.6 ; // mixture coefficient for model 0 and 1
699 kRatio[0] = 0.9 ;
700 kRatio[1] = 0.45 ;
701 thetaRatio[0] = 0.05 ;
702 thetaRatio[1] = 1 ;*/
703 piRatio = 0.999 ;
704 }
705 if ( IsParametersTheSame( kRatio, thetaRatio ) || piRatio <= 1e-3 )
706 piRatio = 1e-3 ;
707
708 piCov = piRatio ; // mixture coefficient for model 0 and 1
709 kCov[0] = 0.9 ;
710 kCov[1] = 0.45 ;
711 thetaCov[0] = 3 ;
712 thetaCov[1] = 12 ;
713
714 // only do one iteration of EM, so that pi does not change?
715 // But it seems it still better to run full EM.
716 meanBound[0] = 1.01 ;
717 meanBound[1] = -1 ;
718
719 //printf( "for coverage:\n" ) ;
720 //piCov = 0.001000 ;
721 MixtureGammaEM( cov, n, piCov, kCov, thetaCov, 0, meanBound ) ;
722 //printf( "for coverage done\n" ) ;
723 piCov = piRatio ;
724
725 delete []buffer ;
726
727 return 0 ;
728 }
729
730 double GetPValue( double x, double *k, double *theta )
731 {
732 int fault ;
733 double p ;
734 p = 1 - gammad( x / theta[0], k[0], &fault ) ;
735 return p ;
736 }
737
738 // if x's value is less than the average of (k0-1)*theta0, then we force x=(k0-1)*theta0,
739 // the mode of the model 0. Of course, it does not affect when k0<=1 already.
740 double MixtureGammaAssignmentAdjust( double x, double pi, double* k, double *theta )
741 {
742 if ( x < ( k[0] - 1 ) * theta[0] )
743 {
744 x = ( k[0] - 1 ) * theta[0] ;
745 }
746 return MixtureGammaAssignment( x, pi, k, theta ) ;
747 }
748
749
750 // Transform the cov number for better fitting
751 double TransformCov( double c )
752 {
753 double ret ;
754 // original it is c-1.
755 // Use -2 instead of -1 is that many region covered to 1 reads will be filtered when
756 // build the subexons.
757 //
758 //ret = sqrt( c ) - 1 ;
759 if ( c <= 2 + 1e-6 )
760 ret = 1e-6 ;
761 else
762 ret = c - 2 ;
763 return ret ;
764 //return log( c ) / log( 2.0 ) ;
765 }
766
767 int main( int argc, char *argv[] )
768 {
769 int i, j ;
770 bool noStats = false ;
771 if ( argc < 3 )
772 {
773 fprintf( stderr, usage ) ;
774 exit( 1 ) ;
775 }
776
777 gMinDepth = 2 ;
778
779 for ( i = 3 ; i < argc ; ++i )
780 {
781 if ( !strcmp( argv[i], "--noStats" ) )
782 {
783 noStats = true ;
784 continue ;
785 }
786 else if ( !strcmp( argv[i], "--minDepth" ) )
787 {
788 gMinDepth = atoi( argv[i + 1] ) ;
789 ++i ;
790 continue ;
791 }
792 else
793 {
794 fprintf( stderr, "Unknown argument: %s\n", argv[i] ) ;
795 return 0 ;
796 }
797 }
798
799 Alignments alignments ;
800 alignments.Open( argv[1] ) ;
801 std::vector<struct _splitSite> splitSites ; // only compromised the
802 std::vector<struct _splitSite> allSplitSites ;
803
804 // read in the splice site
805 FILE *fp ;
806 fp = fopen( argv[2], "r" ) ;
807 char chrom[50] ;
808 int64_t start, end ;
809 int support ;
810 char strand[3] ;
811 int uniqSupport, secondarySupport, uniqEditDistance, secondaryEditDistance ;
812 while ( fscanf( fp, "%s %" PRId64 " %" PRId64 " %d %s %d %d %d %d", chrom, &start, &end, &support, strand,
813 &uniqSupport, &secondarySupport, &uniqEditDistance, &secondaryEditDistance ) != EOF )
814 {
815 if ( support <= 0 )
816 continue ;
817 //if ( !( uniqSupport >= 1
818 // || secondarySupport > 10 ) )
819 //if ( uniqSupport <= 0.01 * ( uniqSupport + secondarySupport ) || ( uniqSupport == 0 && secondarySupport < 20 ) )
820 //if ( uniqSupport == 0 && secondarySupport <= 10 )
821 // continue ;
822 int chrId = alignments.GetChromIdFromName( chrom ) ;
823 struct _splitSite ss ;
824 --start ;
825 --end ;
826 ss.pos = start ;
827 ss.chrId = chrId ;
828 ss.type = 2 ;
829 ss.oppositePos = end ;
830 ss.strand = strand[0] ;
831 ss.support = support ;
832 ss.uniqSupport = uniqSupport ;
833 ss.mismatchSum = uniqEditDistance + secondaryEditDistance ;
834 splitSites.push_back( ss ) ;
835
836 ss.pos = end ;
837 ss.type = 1 ;
838 ss.oppositePos = start ;
839 ss.strand = strand[0] ;
840 ss.support = support ;
841 ss.uniqSupport = uniqSupport ;
842 ss.mismatchSum = uniqEditDistance + secondaryEditDistance ;
843 splitSites.push_back( ss ) ;
844 }
845 fclose( fp ) ;
846 //printf( "ss:%d\n", splitSites.size() ) ;
847
848 //printf( "ss:%d\n", splitSites.size() ) ;
849
850 alignments.GetGeneralInfo( true ) ;
851 // Build the blocks
852 Blocks regions ;
853 alignments.Rewind() ;
854 regions.BuildExonBlocks( alignments ) ;
855 //printf( "%d\n", regions.exonBlocks.size() ) ;
856
857 FilterAndSortSplitSites( splitSites ) ;
858 FilterNearSplitSites( splitSites ) ;
859 FilterRepeatSplitSites( splitSites ) ;
860 regions.FilterSplitSitesInRegions( splitSites ) ;
861 regions.FilterGeneMergeSplitSites( splitSites ) ;
862
863
864 allSplitSites = splitSites ;
865 KeepUniqSplitSites( splitSites ) ;
866
867 //for ( i = 0 ; i < splitSites.size() ; ++i )
868 // printf( "%d %d\n", splitSites[i].pos + 1, splitSites[i].oppositePos + 1 ) ;
869 // Split the blocks using split site
870 regions.SplitBlocks( alignments, splitSites ) ;
871 //printf( "%d\n", regions.exonBlocks.size() ) ;
872 /*for ( i = 0 ; i < regions.exonBlocks.size() ; ++i )
873 {
874 struct _block &e = regions.exonBlocks[i] ;
875 printf( "%s %" PRId64 " %" PRId64 " %d %d\n", alignments.GetChromName( e.chrId ), e.start + 1, e.end + 1, e.leftType, e.rightType ) ;
876 }
877 return 0 ;*/
878 // Recompute the coverage for each block.
879 alignments.Rewind() ;
880 //printf( "Before computeDepth: %d\n", regions.exonBlocks.size() ) ;
881
882 regions.ComputeDepth( alignments ) ;
883 //printf( "After computeDepth: %d\n", regions.exonBlocks.size() ) ;
884
885 // Merge blocks that may have a hollow coverage by accident.
886 regions.MergeNearBlocks() ;
887 //printf( "After merge: %d\n", regions.exonBlocks.size() ) ;
888
889 // Put the intron informations
890 regions.AddIntronInformation( allSplitSites, alignments ) ;
891 //printf( "After add information.\n" ) ;
892
893 // Compute the average ratio against the left and right connected subexons.
894 regions.ComputeRatios() ;
895 //printf( "After compute ratios.\n" ) ;
896
897 //printf( "Finish building regions.\n" ) ;
898 if ( noStats )
899 {
900 // just output the subexons.
901 if ( realpath( argv[1], buffer ) == NULL )
902 {
903 strcpy( buffer, argv[1] ) ;
904 }
905 printf( "#%s\n", buffer ) ;
906 printf( "#fitted_ir_parameter_ratio: pi: -1 k0: -1 theta0: -1 k1: -1 theta1: -1\n" ) ;
907 printf( "#fitted_ir_parameter_cov: pi: -1 k0: -1 theta0: -1 k1: -1 theta1: -1\n" ) ;
908
909 int blockCnt = regions.exonBlocks.size() ;
910 for ( int i = 0 ; i < blockCnt ; ++i )
911 {
912 struct _block &e = regions.exonBlocks[i] ;
913 double avgDepth = (double)e.depthSum / ( e.end - e.start + 1 ) ;
914 printf( "%s %" PRId64 " %" PRId64 " %d %d %lf -1 -1 -1 -1 ", alignments.GetChromName( e.chrId ), e.start + 1, e.end + 1, e.leftType, e.rightType, avgDepth ) ;
915 int prevCnt = e.prevCnt ;
916 if ( i > 0 && e.start == regions.exonBlocks[i - 1].end + 1 &&
917 e.leftType == regions.exonBlocks[i - 1].rightType )
918 {
919 printf( "%d ", prevCnt + 1 ) ;
920 for ( j = 0 ; j < prevCnt ; ++j )
921 printf( "%" PRId64 " ", regions.exonBlocks[ e.prev[j] ].end + 1 ) ;
922 printf( "%" PRId64 " ", regions.exonBlocks[i - 1].end + 1 ) ;
923 }
924 else
925 {
926 printf( "%d ", prevCnt ) ;
927 for ( j = 0 ; j < prevCnt ; ++j )
928 printf( "%" PRId64 " ", regions.exonBlocks[ e.prev[j] ].end + 1 ) ;
929 }
930
931 int nextCnt = e.nextCnt ;
932 if ( i < blockCnt - 1 && e.end == regions.exonBlocks[i + 1].start - 1 &&
933 e.rightType == regions.exonBlocks[i + 1].leftType )
934 {
935 printf( "%d %" PRId64 " ", nextCnt + 1, regions.exonBlocks[i + 1].start + 1 ) ;
936 }
937 else
938 printf( "%d ", nextCnt ) ;
939 for ( j = 0 ; j < nextCnt ; ++j )
940 printf( "%" PRId64 " ", regions.exonBlocks[ e.next[j] ].start + 1 ) ;
941 printf( "\n" ) ;
942
943 }
944 return 0 ;
945 }
946
947 // Extract the blocks for different events.
948 int blockCnt = regions.exonBlocks.size() ;
949 std::vector<struct _block> irBlocks ; // The regions corresponds to intron retention events.
950 double *leftClassifier = new double[ blockCnt ] ;
951 double *rightClassifier = new double[ blockCnt ] ;
952 std::vector<struct _block> overhangBlocks ; //blocks like (...[ or ]...)
953 std::vector<struct _block> islandBlocks ; // blocks like (....)
954
955 for ( i = 0 ; i < blockCnt ; ++i )
956 {
957 int ltype = regions.exonBlocks[i].leftType ;
958 int rtype = regions.exonBlocks[i].rightType ;
959 leftClassifier[i] = -1 ;
960 rightClassifier[i] = -1 ;
961
962 //double avgDepth = (double)regions.exonBlocks[i].depthSum / ( regions.exonBlocks[i].end - regions.exonBlocks[i].start + 1 ) ;
963
964 if ( ltype == 2 && rtype == 1 )
965 {
966 // candidate intron retention.
967 // Note that when I compute the ratio, it is already made sure that the avgDepth>1.
968 double ratio = regions.PickLeftAndRightRatio( regions.exonBlocks[i] ) ;
969
970 //printf( "%lf %lf\n", regions.exonBlocks[i].leftRatio, regions.exonBlocks[i].rightRatio ) ;
971 if ( ratio > 0 )
972 {
973 regions.exonBlocks[i].ratio = ratio ;
974 irBlocks.push_back( regions.exonBlocks[i] ) ;
975 irBlocks[ irBlocks.size() - 1 ].contigId = i ;
976 }
977 }
978 else if ( ( ltype == 0 && rtype == 1 ) || ( ltype == 2 && rtype == 0 ) )
979 {
980 // subexons like (...[ or ]...)
981 double ratio ;
982 if ( ltype == 0 )
983 {
984 ratio = regions.exonBlocks[i].rightRatio ;
985 }
986 else if ( ltype == 2 )
987 {
988 ratio = regions.exonBlocks[i].leftRatio ;
989 }
990 if ( ratio > 0 )
991 {
992 regions.exonBlocks[i].ratio = ratio ;
993 overhangBlocks.push_back( regions.exonBlocks[i] ) ;
994 overhangBlocks[ overhangBlocks.size() - 1].contigId = i ;
995 }
996 }
997 else if ( ltype == 0 && rtype == 0 )
998 {
999 islandBlocks.push_back( regions.exonBlocks[i] ) ;
1000 islandBlocks[ islandBlocks.size() - 1].contigId = i ;
1001 }
1002 }
1003
1004 // Compute the histogram for each intron.
1005 int irBlockCnt = irBlocks.size() ;
1006 double *cov = new double[irBlockCnt] ;
1007 double *covRatio = new double[ irBlockCnt ] ;
1008 double piRatio, kRatio[2], thetaRatio[2] ;
1009 double piCov, kCov[2], thetaCov[2] ;
1010 for ( i = 0 ; i < irBlockCnt ; ++i )
1011 {
1012 double avgDepth = regions.GetAvgDepth( irBlocks[i] ) ;
1013 //cov[i] = ( avgDepth - 1 ) / ( flankingAvg - 1 ) ;
1014 cov[i] = TransformCov( avgDepth ) ;
1015
1016 covRatio[i] = regions.PickLeftAndRightRatio( irBlocks[i] ) ;
1017 //cov[i] = avgDepth / anchorAvg ;
1018 //printf( "%"PRId64" %d %d: %lf %lf\n", irBlocks[i].depthSum, irBlocks[i].start, irBlocks[i].end, avgDepth, cov[i] ) ;
1019 }
1020
1021 /*fp = fopen( "ratio.out", "r" ) ;
1022 int irBlockCnt = 0 ;
1023 double *cov = new double[10000] ;
1024 while ( 1 )
1025 {
1026 double r ;
1027 if ( fscanf( fp, "%lf", &r ) == EOF )
1028 break ;
1029 cov[ irBlockCnt ] = r ;
1030 ++irBlockCnt ;
1031 }
1032 fclose( fp ) ;*/
1033 RatioAndCovEM( covRatio, cov, irBlockCnt, piRatio, kRatio, thetaRatio, piCov, kCov, thetaCov ) ;
1034
1035 for ( i = 0 ; i < irBlockCnt ; ++i )
1036 {
1037 //double lf0 = -k[0] * log( theta[0] ) + ( k[0] - 1 ) * log( cov[i]) - cov[i] / theta[0] - lgamma( k[0] ) ;
1038 //double lf1 = -k[1] * log( theta[1] ) + ( k[1] - 1 ) * log( cov[i]) - cov[i] / theta[1] - lgamma( k[1] ) ;
1039
1040 double p1, p2, p ;
1041
1042 p1 = MixtureGammaAssignmentAdjust( covRatio[i], piRatio, kRatio, thetaRatio ) ;
1043 p2 = MixtureGammaAssignmentAdjust( cov[i], piCov, kCov, thetaCov ) ;
1044
1045 /*p1 = GetPValue( covRatio[i], kRatio, thetaRatio ) ; //1 - gammad( covRatio[i] / thetaRatio[0], kRatio[0], &fault ) ;
1046 if ( piRatio != 0 )
1047 p2 = GetPValue( cov[i], kCov, thetaCov ) ;//1 - gammad( cov[i] / thetaCov[0], kCov[0], &fault ) ;
1048 else
1049 p2 = p1 ;*/
1050 //printf( "%lf %lf: %lf %lf\n", covRatio[i], cov[i], p1, p2 ) ;
1051 p = p1 > p2 ? p1 : p2 ;
1052
1053
1054 //printf( "%d %d: avg: %lf ratio: %lf p: %lf\n", irBlocks[i].start, irBlocks[i].end, irBlocks[i].depthSum / (double)( irBlocks[i].end - irBlocks[i].start + 1 ), covRatio[i],
1055 // p ) ;
1056 leftClassifier[ irBlocks[i].contigId ] = p ;
1057 rightClassifier[ irBlocks[i].contigId ] = p ;
1058 }
1059
1060 // Process the classifier for overhang subexons and the subexons to see whether we need soft boundary
1061 int overhangBlockCnt = overhangBlocks.size() ;
1062 delete []cov ;
1063 delete []covRatio ;
1064
1065 cov = new double[ overhangBlockCnt ] ;
1066 covRatio = new double[overhangBlockCnt] ;
1067 double overhangPiRatio, overhangKRatio[2], overhangThetaRatio[2] ;
1068 double overhangPiCov, overhangKCov[2], overhangThetaCov[2] ;
1069
1070 for ( i = 0 ; i < overhangBlockCnt ; ++i )
1071 {
1072 covRatio[i] = overhangBlocks[i].ratio ;
1073 cov[i] = TransformCov( regions.GetAvgDepth( overhangBlocks[i] ) ) ;
1074 }
1075 RatioAndCovEM( covRatio, cov, overhangBlockCnt, overhangPiRatio, overhangKRatio, overhangThetaRatio, overhangPiCov, overhangKCov, overhangThetaCov ) ;
1076
1077 for ( i = 0 ; i < overhangBlockCnt ; ++i )
1078 {
1079 //double lf0 = -k[0] * log( theta[0] ) + ( k[0] - 1 ) * log( cov[i]) - cov[i] / theta[0] - lgamma( k[0] ) ;
1080 //double lf1 = -k[1] * log( theta[1] ) + ( k[1] - 1 ) * log( cov[i]) - cov[i] / theta[1] - lgamma( k[1] ) ;
1081
1082 double p1, p2, p ;
1083 p1 = MixtureGammaAssignmentAdjust( covRatio[i], overhangPiRatio, overhangKRatio, overhangThetaRatio ) ;
1084 p2 = MixtureGammaAssignmentAdjust( cov[i], overhangPiCov, overhangKCov, overhangThetaCov ) ;
1085
1086 /*p1 = GetPValue( covRatio[i], overhangKRatio, overhangThetaRatio ) ; //1 - gammad( covRatio[i] / thetaRatio[0], kRatio[0], &fault ) ;
1087 if ( overhangPiRatio != 0)
1088 p2 = GetPValue( cov[i], overhangKCov, overhangThetaCov ) ;//1 - gammad( cov[i] / thetaCov[0], kCov[0], &fault ) ;
1089 else
1090 p2 = p1 ;*/
1091
1092 //p = p1 < p2 ? p1 : p2 ;
1093 p = sqrt( p1 * p2 ) ;
1094
1095 int idx = overhangBlocks[i].contigId ;
1096 if ( regions.exonBlocks[idx].rightType == 0 )
1097 leftClassifier[ idx ] = rightClassifier[ idx ] = p ;
1098 else
1099 leftClassifier[ idx ] = rightClassifier[ idx ] = p ;
1100 }
1101
1102 for ( i = 0 ; i < blockCnt ; ++i )
1103 {
1104 struct _block &e = regions.exonBlocks[i] ;
1105 //if ( ( e.leftType == 0 && e.rightType == 1 ) ||
1106 // variance-stabailizing transformation of poisson distribution. But we are more conservative here.
1107 // The multiply 2 before that is because we ignore the region below 0, so we need to somehow renormalize the distribution.
1108 if ( e.leftType == 1 )
1109 {
1110 if ( e.leftRatio >= 0 )
1111 leftClassifier[i] = 2 * alnorm( e.leftRatio * 2.0 , true ) ;
1112 else
1113 leftClassifier[i] = 1 ;
1114 }
1115 /*else if ( e.leftType == 0 )
1116 {
1117 // If this region is a start of a gene, the other sample might introduce
1118 // a new intron before it. So we want to test whether this region can still
1119 // be a start of a gene even there is an intron before it.
1120 for ( j = i + 1 ; j < blockCnt ; ++j )
1121 {
1122 if ( regions.exonBlocks[j].contigId != regions.exonBlocks[i].contigId )
1123 break ;
1124 }
1125
1126 for ( k = i ; k < j ; ++k )
1127 if ( regions.exonBlocks[j].leftType == 1 )
1128 break ;
1129 if ( k >= j )
1130 {
1131 leftClassifier[i] = alnorm( )
1132 }
1133 }*/
1134
1135 //if ( ( e.rightType == 0 && e.leftType == 2 ) ||
1136 if ( e.rightType == 2 )
1137 {
1138 if ( e.rightRatio >= 0 )
1139 rightClassifier[i] = 2 * alnorm( e.rightRatio * 2.0, true ) ;
1140 else
1141 rightClassifier[i] = 1 ;
1142 }
1143 }
1144
1145 // Process the result for subexons seems like single-exon transcript (...)
1146 int islandBlockCnt = islandBlocks.size() ;
1147 //std::sort( islandBlocks.begin(), islandBlocks.end(), CompBlocksByAvgDepth ) ;
1148 for ( i = 0, j = 0 ; i < islandBlockCnt ; ++i )
1149 {
1150 /*if ( regions.GetAvgDepth( islandBlocks[i] ) != regions.GetAvgDepth( islandBlocks[j] ) )
1151 j = i ;
1152 leftClassifier[ islandBlocks[i].contigId ] = 1 - (j + 1) / (double)( islandBlockCnt + 1 ) ;
1153 rightClassifier[ islandBlocks[i].contigId ] = 1 - (j + 1) / (double)( islandBlockCnt + 1 ) ;*/
1154 double p = GetPValue( TransformCov( regions.GetAvgDepth( islandBlocks[i] ) ), kCov, thetaCov ) ;
1155 leftClassifier[ islandBlocks[i].contigId ] = p ;
1156 rightClassifier[ islandBlocks[i].contigId ] = p ;
1157 }
1158
1159
1160 // Output the result.
1161 if ( realpath( argv[1], buffer ) == NULL )
1162 {
1163 strcpy( buffer, argv[1] ) ;
1164 }
1165 printf( "#%s\n", buffer ) ;
1166 // TODO: higher precision.
1167 printf( "#fitted_ir_parameter_ratio: pi: %lf k0: %lf theta0: %lf k1: %lf theta1: %lf\n", piRatio, kRatio[0], thetaRatio[0], kRatio[1], thetaRatio[1] ) ;
1168 printf( "#fitted_ir_parameter_cov: pi: %lf k0: %lf theta0: %lf k1: %lf theta1: %lf\n", piCov, kCov[0], thetaCov[0], kCov[1], thetaCov[1] ) ;
1169
1170 printf( "#fitted_overhang_parameter_ratio: pi: %lf k0: %lf theta0: %lf k1: %lf theta1: %lf\n", overhangPiRatio, overhangKRatio[0], overhangThetaRatio[0], overhangKRatio[1], overhangThetaRatio[1] ) ;
1171 printf( "#fitted_overhang_parameter_cov: pi: %lf k0: %lf theta0: %lf k1: %lf theta1: %lf\n", overhangPiCov, overhangKCov[0], overhangThetaCov[0], overhangKCov[1], overhangThetaCov[1] ) ;
1172
1173
1174 for ( int i = 0 ; i < blockCnt ; ++i )
1175 {
1176 struct _block &e = regions.exonBlocks[i] ;
1177 double avgDepth = regions.GetAvgDepth( e ) ;
1178 printf( "%s %" PRId64 " %" PRId64 " %d %d %c %c %lf %lf %lf %lf %lf ", alignments.GetChromName( e.chrId ), e.start + 1, e.end + 1, e.leftType, e.rightType,
1179 e.leftStrand, e.rightStrand, avgDepth,
1180 e.leftRatio, e.rightRatio, leftClassifier[i], rightClassifier[i] ) ;
1181 int prevCnt = e.prevCnt ;
1182 if ( i > 0 && e.start == regions.exonBlocks[i - 1].end + 1 )
1183 //&& e.leftType == regions.exonBlocks[i - 1].rightType )
1184 {
1185 printf( "%d ", prevCnt + 1 ) ;
1186 for ( j = 0 ; j < prevCnt ; ++j )
1187 printf( "%" PRId64 " ", regions.exonBlocks[ e.prev[j] ].end + 1 ) ;
1188 printf( "%" PRId64 " ", regions.exonBlocks[i - 1].end + 1 ) ;
1189 }
1190 else
1191 {
1192 printf( "%d ", prevCnt ) ;
1193 for ( j = 0 ; j < prevCnt ; ++j )
1194 printf( "%" PRId64 " ", regions.exonBlocks[ e.prev[j] ].end + 1 ) ;
1195 }
1196
1197 int nextCnt = e.nextCnt ;
1198 if ( i < blockCnt - 1 && e.end == regions.exonBlocks[i + 1].start - 1 )
1199 //&& e.rightType == regions.exonBlocks[i + 1].leftType )
1200 {
1201 printf( "%d %" PRId64 " ", nextCnt + 1, regions.exonBlocks[i + 1].start + 1 ) ;
1202 }
1203 else
1204 printf( "%d ", nextCnt ) ;
1205 for ( j = 0 ; j < nextCnt ; ++j )
1206 printf( "%" PRId64 " ", regions.exonBlocks[ e.next[j] ].start + 1 ) ;
1207 printf( "\n" ) ;
1208 }
1209
1210 delete[] cov ;
1211 delete[] covRatio ;
1212 delete[] leftClassifier ;
1213 delete[] rightClassifier ;
1214 }