# HG changeset patch
# User davidvanzessen
# Date 1548752049 18000
# Node ID ba33b94637ca874848b4a9019f5ac6d968c65bbc
# Parent 43a1aa648537827ab031b667d2ade4ec8b4fc7a0
Uploaded
diff -r 43a1aa648537 -r ba33b94637ca LICENSE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/LICENSE Tue Jan 29 03:54:09 2019 -0500
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 david
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff -r 43a1aa648537 -r ba33b94637ca README.md
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/README.md Tue Jan 29 03:54:09 2019 -0500
@@ -0,0 +1,12 @@
+# SHM CSR
+
+Somatic hypermutation and class switch recombination pipeline
+
+# Dependencies
+--------------------
+[Python 2.7](https://www.python.org/)
+[Change-O](https://changeo.readthedocs.io/en/version-0.4.4/)
+[Baseline](http://selection.med.yale.edu/baseline/)
+[R data.table](https://cran.r-project.org/web/packages/data.table/data.table.pdf)
+[R ggplot2](https://cran.r-project.org/web/packages/ggplot2/ggplot2.pdf)
+[R reshape2](https://cran.r-project.org/web/packages/reshape/reshape.pdf)
\ No newline at end of file
diff -r 43a1aa648537 -r ba33b94637ca aa_histogram.r
--- a/aa_histogram.r Thu Dec 07 03:44:38 2017 -0500
+++ b/aa_histogram.r Tue Jan 29 03:54:09 2019 -0500
@@ -1,69 +1,69 @@
-library(ggplot2)
-
-args <- commandArgs(trailingOnly = TRUE)
-
-mutations.by.id.file = args[1]
-absent.aa.by.id.file = args[2]
-genes = strsplit(args[3], ",")[[1]]
-genes = c(genes, "")
-outdir = args[4]
-
-
-print("---------------- read input ----------------")
-
-mutations.by.id = read.table(mutations.by.id.file, sep="\t", fill=T, header=T, quote="")
-absent.aa.by.id = read.table(absent.aa.by.id.file, sep="\t", fill=T, header=T, quote="")
-
-for(gene in genes){
- graph.title = paste(gene, "AA mutation frequency")
- if(gene == ""){
- mutations.by.id.gene = mutations.by.id[!grepl("unmatched", mutations.by.id$best_match),]
- absent.aa.by.id.gene = absent.aa.by.id[!grepl("unmatched", absent.aa.by.id$best_match),]
-
- graph.title = "AA mutation frequency all"
- } else {
- mutations.by.id.gene = mutations.by.id[grepl(paste("^", gene, sep=""), mutations.by.id$best_match),]
- absent.aa.by.id.gene = absent.aa.by.id[grepl(paste("^", gene, sep=""), absent.aa.by.id$best_match),]
- }
- print(paste("nrow", gene, nrow(absent.aa.by.id.gene)))
- if(nrow(mutations.by.id.gene) == 0){
- next
- }
-
- mutations.at.position = colSums(mutations.by.id.gene[,-c(1,2)])
- aa.at.position = colSums(absent.aa.by.id.gene[,-c(1,2,3,4)])
-
- dat_freq = mutations.at.position / aa.at.position
- dat_freq[is.na(dat_freq)] = 0
- dat_dt = data.frame(i=1:length(dat_freq), freq=dat_freq)
-
-
- print("---------------- plot ----------------")
-
- m = ggplot(dat_dt, aes(x=i, y=freq)) + theme(axis.text.x = element_text(angle = 90, hjust = 1), text = element_text(size=13, colour="black"))
- m = m + geom_bar(stat="identity", colour = "black", fill = "darkgrey", alpha=0.8) + scale_x_continuous(breaks=dat_dt$i, labels=dat_dt$i)
- m = m + annotate("segment", x = 0.5, y = -0.05, xend=26.5, yend=-0.05, colour="darkgreen", size=1) + annotate("text", x = 13, y = -0.1, label="FR1")
- m = m + annotate("segment", x = 26.5, y = -0.07, xend=38.5, yend=-0.07, colour="darkblue", size=1) + annotate("text", x = 32.5, y = -0.15, label="CDR1")
- m = m + annotate("segment", x = 38.5, y = -0.05, xend=55.5, yend=-0.05, colour="darkgreen", size=1) + annotate("text", x = 47, y = -0.1, label="FR2")
- m = m + annotate("segment", x = 55.5, y = -0.07, xend=65.5, yend=-0.07, colour="darkblue", size=1) + annotate("text", x = 60.5, y = -0.15, label="CDR2")
- m = m + annotate("segment", x = 65.5, y = -0.05, xend=104.5, yend=-0.05, colour="darkgreen", size=1) + annotate("text", x = 85, y = -0.1, label="FR3")
- m = m + expand_limits(y=c(-0.1,1)) + xlab("AA position") + ylab("Frequency") + ggtitle(graph.title)
- m = m + theme(panel.background = element_rect(fill = "white", colour="black"), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
- #m = m + scale_colour_manual(values=c("black"))
-
- print("---------------- write/print ----------------")
-
-
- dat.sums = data.frame(index=1:length(mutations.at.position), mutations.at.position=mutations.at.position, aa.at.position=aa.at.position)
-
- write.table(dat.sums, paste(outdir, "/aa_histogram_sum_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
- write.table(mutations.by.id.gene, paste(outdir, "/aa_histogram_count_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
- write.table(absent.aa.by.id.gene, paste(outdir, "/aa_histogram_absent_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
- write.table(dat_dt, paste(outdir, "/aa_histogram_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
-
- png(filename=paste(outdir, "/aa_histogram_", gene, ".png", sep=""), width=1280, height=720)
- print(m)
- dev.off()
-
- ggsave(paste(outdir, "/aa_histogram_", gene, ".pdf", sep=""), m, width=14, height=7)
-}
+library(ggplot2)
+
+args <- commandArgs(trailingOnly = TRUE)
+
+mutations.by.id.file = args[1]
+absent.aa.by.id.file = args[2]
+genes = strsplit(args[3], ",")[[1]]
+genes = c(genes, "")
+outdir = args[4]
+
+
+print("---------------- read input ----------------")
+
+mutations.by.id = read.table(mutations.by.id.file, sep="\t", fill=T, header=T, quote="")
+absent.aa.by.id = read.table(absent.aa.by.id.file, sep="\t", fill=T, header=T, quote="")
+
+for(gene in genes){
+ graph.title = paste(gene, "AA mutation frequency")
+ if(gene == ""){
+ mutations.by.id.gene = mutations.by.id[!grepl("unmatched", mutations.by.id$best_match),]
+ absent.aa.by.id.gene = absent.aa.by.id[!grepl("unmatched", absent.aa.by.id$best_match),]
+
+ graph.title = "AA mutation frequency all"
+ } else {
+ mutations.by.id.gene = mutations.by.id[grepl(paste("^", gene, sep=""), mutations.by.id$best_match),]
+ absent.aa.by.id.gene = absent.aa.by.id[grepl(paste("^", gene, sep=""), absent.aa.by.id$best_match),]
+ }
+ print(paste("nrow", gene, nrow(absent.aa.by.id.gene)))
+ if(nrow(mutations.by.id.gene) == 0){
+ next
+ }
+
+ mutations.at.position = colSums(mutations.by.id.gene[,-c(1,2)])
+ aa.at.position = colSums(absent.aa.by.id.gene[,-c(1,2,3,4)])
+
+ dat_freq = mutations.at.position / aa.at.position
+ dat_freq[is.na(dat_freq)] = 0
+ dat_dt = data.frame(i=1:length(dat_freq), freq=dat_freq)
+
+
+ print("---------------- plot ----------------")
+
+ m = ggplot(dat_dt, aes(x=i, y=freq)) + theme(axis.text.x = element_text(angle = 90, hjust = 1), text = element_text(size=13, colour="black"))
+ m = m + geom_bar(stat="identity", colour = "black", fill = "darkgrey", alpha=0.8) + scale_x_continuous(breaks=dat_dt$i, labels=dat_dt$i)
+ m = m + annotate("segment", x = 0.5, y = -0.05, xend=26.5, yend=-0.05, colour="darkgreen", size=1) + annotate("text", x = 13, y = -0.1, label="FR1")
+ m = m + annotate("segment", x = 26.5, y = -0.07, xend=38.5, yend=-0.07, colour="darkblue", size=1) + annotate("text", x = 32.5, y = -0.15, label="CDR1")
+ m = m + annotate("segment", x = 38.5, y = -0.05, xend=55.5, yend=-0.05, colour="darkgreen", size=1) + annotate("text", x = 47, y = -0.1, label="FR2")
+ m = m + annotate("segment", x = 55.5, y = -0.07, xend=65.5, yend=-0.07, colour="darkblue", size=1) + annotate("text", x = 60.5, y = -0.15, label="CDR2")
+ m = m + annotate("segment", x = 65.5, y = -0.05, xend=104.5, yend=-0.05, colour="darkgreen", size=1) + annotate("text", x = 85, y = -0.1, label="FR3")
+ m = m + expand_limits(y=c(-0.1,1)) + xlab("AA position") + ylab("Frequency") + ggtitle(graph.title)
+ m = m + theme(panel.background = element_rect(fill = "white", colour="black"), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
+ #m = m + scale_colour_manual(values=c("black"))
+
+ print("---------------- write/print ----------------")
+
+
+ dat.sums = data.frame(index=1:length(mutations.at.position), mutations.at.position=mutations.at.position, aa.at.position=aa.at.position)
+
+ write.table(dat.sums, paste(outdir, "/aa_histogram_sum_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
+ write.table(mutations.by.id.gene, paste(outdir, "/aa_histogram_count_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
+ write.table(absent.aa.by.id.gene, paste(outdir, "/aa_histogram_absent_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
+ write.table(dat_dt, paste(outdir, "/aa_histogram_", gene, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
+
+ png(filename=paste(outdir, "/aa_histogram_", gene, ".png", sep=""), width=1280, height=720)
+ print(m)
+ dev.off()
+
+ ggsave(paste(outdir, "/aa_histogram_", gene, ".pdf", sep=""), m, width=14, height=7)
+}
diff -r 43a1aa648537 -r ba33b94637ca baseline/Baseline_Functions.r
--- a/baseline/Baseline_Functions.r Thu Dec 07 03:44:38 2017 -0500
+++ b/baseline/Baseline_Functions.r Tue Jan 29 03:54:09 2019 -0500
@@ -1,2287 +1,2287 @@
-#########################################################################################
-# License Agreement
-#
-# THIS WORK IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE
-# ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER
-# APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE
-# OR COPYRIGHT LAW IS PROHIBITED.
-#
-# BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
-# BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED
-# TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN
-# CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
-#
-# BASELIne: Bayesian Estimation of Antigen-Driven Selection in Immunoglobulin Sequences
-# Coded by: Mohamed Uduman & Gur Yaari
-# Copyright 2012 Kleinstein Lab
-# Version: 1.3 (01/23/2014)
-#########################################################################################
-
-# Global variables
-
- FILTER_BY_MUTATIONS = 1000
-
- # Nucleotides
- NUCLEOTIDES = c("A","C","G","T")
-
- # Amino Acids
- AMINO_ACIDS <- c("F", "F", "L", "L", "S", "S", "S", "S", "Y", "Y", "*", "*", "C", "C", "*", "W", "L", "L", "L", "L", "P", "P", "P", "P", "H", "H", "Q", "Q", "R", "R", "R", "R", "I", "I", "I", "M", "T", "T", "T", "T", "N", "N", "K", "K", "S", "S", "R", "R", "V", "V", "V", "V", "A", "A", "A", "A", "D", "D", "E", "E", "G", "G", "G", "G")
- names(AMINO_ACIDS) <- c("TTT", "TTC", "TTA", "TTG", "TCT", "TCC", "TCA", "TCG", "TAT", "TAC", "TAA", "TAG", "TGT", "TGC", "TGA", "TGG", "CTT", "CTC", "CTA", "CTG", "CCT", "CCC", "CCA", "CCG", "CAT", "CAC", "CAA", "CAG", "CGT", "CGC", "CGA", "CGG", "ATT", "ATC", "ATA", "ATG", "ACT", "ACC", "ACA", "ACG", "AAT", "AAC", "AAA", "AAG", "AGT", "AGC", "AGA", "AGG", "GTT", "GTC", "GTA", "GTG", "GCT", "GCC", "GCA", "GCG", "GAT", "GAC", "GAA", "GAG", "GGT", "GGC", "GGA", "GGG")
- names(AMINO_ACIDS) <- names(AMINO_ACIDS)
-
- #Amino Acid Traits
- #"*" "A" "C" "D" "E" "F" "G" "H" "I" "K" "L" "M" "N" "P" "Q" "R" "S" "T" "V" "W" "Y"
- #B = "Hydrophobic/Burried" N = "Intermediate/Neutral" S="Hydrophilic/Surface")
- TRAITS_AMINO_ACIDS_CHOTHIA98 <- c("*","N","B","S","S","B","N","N","B","S","B","B","S","N","S","S","N","N","B","B","N")
- names(TRAITS_AMINO_ACIDS_CHOTHIA98) <- sort(unique(AMINO_ACIDS))
- TRAITS_AMINO_ACIDS <- array(NA,21)
-
- # Codon Table
- CODON_TABLE <- as.data.frame(matrix(NA,ncol=64,nrow=12))
-
- # Substitution Model: Smith DS et al. 1996
- substitution_Literature_Mouse <- matrix(c(0, 0.156222928, 0.601501588, 0.242275484, 0.172506739, 0, 0.241239892, 0.586253369, 0.54636291, 0.255795364, 0, 0.197841727, 0.290240811, 0.467680608, 0.24207858, 0),nrow=4,byrow=T,dimnames=list(NUCLEOTIDES,NUCLEOTIDES))
- substitution_Flu_Human <- matrix(c(0,0.2795596,0.5026927,0.2177477,0.1693210,0,0.3264723,0.5042067,0.4983549,0.3328321,0,0.1688130,0.2021079,0.4696077,0.3282844,0),4,4,byrow=T,dimnames=list(NUCLEOTIDES,NUCLEOTIDES))
- substitution_Flu25_Human <- matrix(c(0,0.2580641,0.5163685,0.2255674,0.1541125,0,0.3210224,0.5248651,0.5239281,0.3101292,0,0.1659427,0.1997207,0.4579444,0.3423350,0),4,4,byrow=T,dimnames=list(NUCLEOTIDES,NUCLEOTIDES))
- load("FiveS_Substitution.RData")
-
- # Mutability Models: Shapiro GS et al. 2002
- triMutability_Literature_Human <- matrix(c(0.24, 1.2, 0.96, 0.43, 2.14, 2, 1.11, 1.9, 0.85, 1.83, 2.36, 1.31, 0.82, 0.52, 0.89, 1.33, 1.4, 0.82, 1.83, 0.73, 1.83, 1.62, 1.53, 0.57, 0.92, 0.42, 0.42, 1.47, 3.44, 2.58, 1.18, 0.47, 0.39, 1.12, 1.8, 0.68, 0.47, 2.19, 2.35, 2.19, 1.05, 1.84, 1.26, 0.28, 0.98, 2.37, 0.66, 1.58, 0.67, 0.92, 1.76, 0.83, 0.97, 0.56, 0.75, 0.62, 2.26, 0.62, 0.74, 1.11, 1.16, 0.61, 0.88, 0.67, 0.37, 0.07, 1.08, 0.46, 0.31, 0.94, 0.62, 0.57, 0.29, NA, 1.44, 0.46, 0.69, 0.57, 0.24, 0.37, 1.1, 0.99, 1.39, 0.6, 2.26, 1.24, 1.36, 0.52, 0.33, 0.26, 1.25, 0.37, 0.58, 1.03, 1.2, 0.34, 0.49, 0.33, 2.62, 0.16, 0.4, 0.16, 0.35, 0.75, 1.85, 0.94, 1.61, 0.85, 2.09, 1.39, 0.3, 0.52, 1.33, 0.29, 0.51, 0.26, 0.51, 3.83, 2.01, 0.71, 0.58, 0.62, 1.07, 0.28, 1.2, 0.74, 0.25, 0.59, 1.09, 0.91, 1.36, 0.45, 2.89, 1.27, 3.7, 0.69, 0.28, 0.41, 1.17, 0.56, 0.93, 3.41, 1, 1, NA, 5.9, 0.74, 2.51, 2.24, 2.24, 1.95, 3.32, 2.34, 1.3, 2.3, 1, 0.66, 0.73, 0.93, 0.41, 0.65, 0.89, 0.65, 0.32, NA, 0.43, 0.85, 0.43, 0.31, 0.31, 0.23, 0.29, 0.57, 0.71, 0.48, 0.44, 0.76, 0.51, 1.7, 0.85, 0.74, 2.23, 2.08, 1.16, 0.51, 0.51, 1, 0.5, NA, NA, 0.71, 2.14), nrow=64,byrow=T)
- triMutability_Literature_Mouse <- matrix(c(1.31, 1.35, 1.42, 1.18, 2.02, 2.02, 1.02, 1.61, 1.99, 1.42, 2.01, 1.03, 2.02, 0.97, 0.53, 0.71, 1.19, 0.83, 0.96, 0.96, 0, 1.7, 2.22, 0.59, 1.24, 1.07, 0.51, 1.68, 3.36, 3.36, 1.14, 0.29, 0.33, 0.9, 1.11, 0.63, 1.08, 2.07, 2.27, 1.74, 0.22, 1.19, 2.37, 1.15, 1.15, 1.56, 0.81, 0.34, 0.87, 0.79, 2.13, 0.49, 0.85, 0.97, 0.36, 0.82, 0.66, 0.63, 1.15, 0.94, 0.85, 0.25, 0.93, 1.19, 0.4, 0.2, 0.44, 0.44, 0.88, 1.06, 0.77, 0.39, 0, 0, 0, 0, 0, 0, 0.43, 0.43, 0.86, 0.59, 0.59, 0, 1.18, 0.86, 2.9, 1.66, 0.4, 0.2, 1.54, 0.43, 0.69, 1.71, 0.68, 0.55, 0.91, 0.7, 1.71, 0.09, 0.27, 0.63, 0.2, 0.45, 1.01, 1.63, 0.96, 1.48, 2.18, 1.2, 1.31, 0.66, 2.13, 0.49, 0, 0, 0, 2.97, 2.8, 0.79, 0.4, 0.5, 0.4, 0.11, 1.68, 0.42, 0.13, 0.44, 0.93, 0.71, 1.11, 1.19, 2.71, 1.08, 3.43, 0.4, 0.67, 0.47, 1.02, 0.14, 1.56, 1.98, 0.53, 0.33, 0.63, 2.06, 1.77, 1.46, 3.74, 2.93, 2.1, 2.18, 0.78, 0.73, 2.93, 0.63, 0.57, 0.17, 0.85, 0.52, 0.31, 0.31, 0, 0, 0.51, 0.29, 0.83, 0.54, 0.28, 0.47, 0.9, 0.99, 1.24, 2.47, 0.73, 0.23, 1.13, 0.24, 2.12, 0.24, 0.33, 0.83, 1.41, 0.62, 0.28, 0.35, 0.77, 0.17, 0.72, 0.58, 0.45, 0.41), nrow=64,byrow=T)
- triMutability_Names <- c("AAA", "AAC", "AAG", "AAT", "ACA", "ACC", "ACG", "ACT", "AGA", "AGC", "AGG", "AGT", "ATA", "ATC", "ATG", "ATT", "CAA", "CAC", "CAG", "CAT", "CCA", "CCC", "CCG", "CCT", "CGA", "CGC", "CGG", "CGT", "CTA", "CTC", "CTG", "CTT", "GAA", "GAC", "GAG", "GAT", "GCA", "GCC", "GCG", "GCT", "GGA", "GGC", "GGG", "GGT", "GTA", "GTC", "GTG", "GTT", "TAA", "TAC", "TAG", "TAT", "TCA", "TCC", "TCG", "TCT", "TGA", "TGC", "TGG", "TGT", "TTA", "TTC", "TTG", "TTT")
- load("FiveS_Mutability.RData")
-
-# Functions
-
- # Translate codon to amino acid
- translateCodonToAminoAcid<-function(Codon){
- return(AMINO_ACIDS[Codon])
- }
-
- # Translate amino acid to trait change
- translateAminoAcidToTraitChange<-function(AminoAcid){
- return(TRAITS_AMINO_ACIDS[AminoAcid])
- }
-
- # Initialize Amino Acid Trait Changes
- initializeTraitChange <- function(traitChangeModel=1,species=1,traitChangeFileName=NULL){
- if(!is.null(traitChangeFileName)){
- tryCatch(
- traitChange <- read.delim(traitChangeFileName,sep="\t",header=T)
- , error = function(ex){
- cat("Error|Error reading trait changes. Please check file name/path and format.\n")
- q()
- }
- )
- }else{
- traitChange <- TRAITS_AMINO_ACIDS_CHOTHIA98
- }
- TRAITS_AMINO_ACIDS <<- traitChange
- }
-
- # Read in formatted nucleotide substitution matrix
- initializeSubstitutionMatrix <- function(substitutionModel,species,subsMatFileName=NULL){
- if(!is.null(subsMatFileName)){
- tryCatch(
- subsMat <- read.delim(subsMatFileName,sep="\t",header=T)
- , error = function(ex){
- cat("Error|Error reading substitution matrix. Please check file name/path and format.\n")
- q()
- }
- )
- if(sum(apply(subsMat,1,sum)==1)!=4) subsMat = t(apply(subsMat,1,function(x)x/sum(x)))
- }else{
- if(substitutionModel==1)subsMat <- substitution_Literature_Mouse
- if(substitutionModel==2)subsMat <- substitution_Flu_Human
- if(substitutionModel==3)subsMat <- substitution_Flu25_Human
-
- }
-
- if(substitutionModel==0){
- subsMat <- matrix(1,4,4)
- subsMat[,] = 1/3
- subsMat[1,1] = 0
- subsMat[2,2] = 0
- subsMat[3,3] = 0
- subsMat[4,4] = 0
- }
-
-
- NUCLEOTIDESN = c(NUCLEOTIDES,"N", "-")
- if(substitutionModel==5){
- subsMat <- FiveS_Substitution
- return(subsMat)
- }else{
- subsMat <- rbind(subsMat,rep(NA,4),rep(NA,4))
- return( matrix(data.matrix(subsMat),6,4,dimnames=list(NUCLEOTIDESN,NUCLEOTIDES) ) )
- }
- }
-
-
- # Read in formatted Mutability file
- initializeMutabilityMatrix <- function(mutabilityModel=1, species=1,mutabilityMatFileName=NULL){
- if(!is.null(mutabilityMatFileName)){
- tryCatch(
- mutabilityMat <- read.delim(mutabilityMatFileName,sep="\t",header=T)
- , error = function(ex){
- cat("Error|Error reading mutability matrix. Please check file name/path and format.\n")
- q()
- }
- )
- }else{
- mutabilityMat <- triMutability_Literature_Human
- if(species==2) mutabilityMat <- triMutability_Literature_Mouse
- }
-
- if(mutabilityModel==0){ mutabilityMat <- matrix(1,64,3)}
-
- if(mutabilityModel==5){
- mutabilityMat <- FiveS_Mutability
- return(mutabilityMat)
- }else{
- return( matrix( data.matrix(mutabilityMat), 64, 3, dimnames=list(triMutability_Names,1:3)) )
- }
- }
-
- # Read FASTA file formats
- # Modified from read.fasta from the seqinR package
- baseline.read.fasta <-
- function (file = system.file("sequences/sample.fasta", package = "seqinr"),
- seqtype = c("DNA", "AA"), as.string = FALSE, forceDNAtolower = TRUE,
- set.attributes = TRUE, legacy.mode = TRUE, seqonly = FALSE,
- strip.desc = FALSE, sizeof.longlong = .Machine$sizeof.longlong,
- endian = .Platform$endian, apply.mask = TRUE)
- {
- seqtype <- match.arg(seqtype)
-
- lines <- readLines(file)
-
- if (legacy.mode) {
- comments <- grep("^;", lines)
- if (length(comments) > 0)
- lines <- lines[-comments]
- }
-
-
- ind_groups<-which(substr(lines, 1L, 3L) == ">>>")
- lines_mod<-lines
-
- if(!length(ind_groups)){
- lines_mod<-c(">>>All sequences combined",lines)
- }
-
- ind_groups<-which(substr(lines_mod, 1L, 3L) == ">>>")
-
- lines <- array("BLA",dim=(length(ind_groups)+length(lines_mod)))
- id<-sapply(1:length(ind_groups),function(i)ind_groups[i]+i-1)+1
- lines[id] <- "THIS IS A FAKE SEQUENCE"
- lines[-id] <- lines_mod
- rm(lines_mod)
-
- ind <- which(substr(lines, 1L, 1L) == ">")
- nseq <- length(ind)
- if (nseq == 0) {
- stop("no line starting with a > character found")
- }
- start <- ind + 1
- end <- ind - 1
-
- while( any(which(ind%in%end)) ){
- ind=ind[-which(ind%in%end)]
- nseq <- length(ind)
- if (nseq == 0) {
- stop("no line starting with a > character found")
- }
- start <- ind + 1
- end <- ind - 1
- }
-
- end <- c(end[-1], length(lines))
- sequences <- lapply(seq_len(nseq), function(i) paste(lines[start[i]:end[i]], collapse = ""))
- if (seqonly)
- return(sequences)
- nomseq <- lapply(seq_len(nseq), function(i) {
-
- #firstword <- strsplit(lines[ind[i]], " ")[[1]][1]
- substr(lines[ind[i]], 2, nchar(lines[ind[i]]))
-
- })
- if (seqtype == "DNA") {
- if (forceDNAtolower) {
- sequences <- as.list(tolower(chartr(".","-",sequences)))
- }else{
- sequences <- as.list(toupper(chartr(".","-",sequences)))
- }
- }
- if (as.string == FALSE)
- sequences <- lapply(sequences, s2c)
- if (set.attributes) {
- for (i in seq_len(nseq)) {
- Annot <- lines[ind[i]]
- if (strip.desc)
- Annot <- substr(Annot, 2L, nchar(Annot))
- attributes(sequences[[i]]) <- list(name = nomseq[[i]],
- Annot = Annot, class = switch(seqtype, AA = "SeqFastaAA",
- DNA = "SeqFastadna"))
- }
- }
- names(sequences) <- nomseq
- return(sequences)
- }
-
-
- # Replaces non FASTA characters in input files with N
- replaceNonFASTAChars <-function(inSeq="ACGTN-AApA"){
- gsub('[^ACGTNacgt[:punct:]-[:punct:].]','N',inSeq,perl=TRUE)
- }
-
- # Find the germlines in the FASTA list
- germlinesInFile <- function(seqIDs){
- firstChar = sapply(seqIDs,function(x){substr(x,1,1)})
- secondChar = sapply(seqIDs,function(x){substr(x,2,2)})
- return(firstChar==">" & secondChar!=">")
- }
-
- # Find the groups in the FASTA list
- groupsInFile <- function(seqIDs){
- sapply(seqIDs,function(x){substr(x,1,2)})==">>"
- }
-
- # In the process of finding germlines/groups, expand from the start to end of the group
- expandTillNext <- function(vecPosToID){
- IDs = names(vecPosToID)
- posOfInterests = which(vecPosToID)
-
- expandedID = rep(NA,length(IDs))
- expandedIDNames = gsub(">","",IDs[posOfInterests])
- startIndexes = c(1,posOfInterests[-1])
- stopIndexes = c(posOfInterests[-1]-1,length(IDs))
- expandedID = unlist(sapply(1:length(startIndexes),function(i){
- rep(i,stopIndexes[i]-startIndexes[i]+1)
- }))
- names(expandedID) = unlist(sapply(1:length(startIndexes),function(i){
- rep(expandedIDNames[i],stopIndexes[i]-startIndexes[i]+1)
- }))
- return(expandedID)
- }
-
- # Process FASTA (list) to return a matrix[input, germline)
- processInputAdvanced <- function(inputFASTA){
-
- seqIDs = names(inputFASTA)
- numbSeqs = length(seqIDs)
- posGermlines1 = germlinesInFile(seqIDs)
- numbGermlines = sum(posGermlines1)
- posGroups1 = groupsInFile(seqIDs)
- numbGroups = sum(posGroups1)
- consDef = NA
-
- if(numbGermlines==0){
- posGermlines = 2
- numbGermlines = 1
- }
-
- glPositionsSum = cumsum(posGermlines1)
- glPositions = table(glPositionsSum)
- #Find the position of the conservation row
- consDefPos = as.numeric(names(glPositions[names(glPositions)!=0 & glPositions==1]))+1
- if( length(consDefPos)> 0 ){
- consDefID = match(consDefPos, glPositionsSum)
- #The coservation rows need to be pulled out and stores seperately
- consDef = inputFASTA[consDefID]
- inputFASTA = inputFASTA[-consDefID]
-
- seqIDs = names(inputFASTA)
- numbSeqs = length(seqIDs)
- posGermlines1 = germlinesInFile(seqIDs)
- numbGermlines = sum(posGermlines1)
- posGroups1 = groupsInFile(seqIDs)
- numbGroups = sum(posGroups1)
- if(numbGermlines==0){
- posGermlines = 2
- numbGermlines = 1
- }
- }
-
- posGroups <- expandTillNext(posGroups1)
- posGermlines <- expandTillNext(posGermlines1)
- posGermlines[posGroups1] = 0
- names(posGermlines)[posGroups1] = names(posGroups)[posGroups1]
- posInput = rep(TRUE,numbSeqs)
- posInput[posGroups1 | posGermlines1] = FALSE
-
- matInput = matrix(NA, nrow=sum(posInput), ncol=2)
- rownames(matInput) = seqIDs[posInput]
- colnames(matInput) = c("Input","Germline")
-
- vecInputFASTA = unlist(inputFASTA)
- matInput[,1] = vecInputFASTA[posInput]
- matInput[,2] = vecInputFASTA[ which( names(inputFASTA)%in%paste(">",names(posGermlines)[posInput],sep="") )[ posGermlines[posInput]] ]
-
- germlines = posGermlines[posInput]
- groups = posGroups[posInput]
-
- return( list("matInput"=matInput, "germlines"=germlines, "groups"=groups, "conservationDefinition"=consDef ))
- }
-
-
- # Replace leading and trailing dashes in the sequence
- replaceLeadingTrailingDashes <- function(x,readEnd){
- iiGap = unlist(gregexpr("-",x[1]))
- ggGap = unlist(gregexpr("-",x[2]))
- #posToChange = intersect(iiGap,ggGap)
-
-
- seqIn = replaceLeadingTrailingDashesHelper(x[1])
- seqGL = replaceLeadingTrailingDashesHelper(x[2])
- seqTemplate = rep('N',readEnd)
- seqIn <- c(seqIn,seqTemplate[(length(seqIn)+1):readEnd])
- seqGL <- c(seqGL,seqTemplate[(length(seqGL)+1):readEnd])
-# if(posToChange!=-1){
-# seqIn[posToChange] = "-"
-# seqGL[posToChange] = "-"
-# }
-
- seqIn = c2s(seqIn[1:readEnd])
- seqGL = c2s(seqGL[1:readEnd])
-
- lenGL = nchar(seqGL)
- if(lenGL seqLen )
- trimmedSeq = substr(seqToTrim,1, ( (getCodonPos(seqLen)[1])-1 ) )
-
- return(trimmedSeq)
- }
-
- # Given a nuclotide position, returns the pos of the 3 nucs that made the codon
- # e.g. nuc 86 is part of nucs 85,86,87
- getCodonPos <- function(nucPos){
- codonNum = (ceiling(nucPos/3))*3
- return( (codonNum-2):codonNum)
- }
-
- # Given a nuclotide position, returns the codon number
- # e.g. nuc 86 = codon 29
- getCodonNumb <- function(nucPos){
- return( ceiling(nucPos/3) )
- }
-
- # Given a codon, returns all the nuc positions that make the codon
- getCodonNucs <- function(codonNumb){
- getCodonPos(codonNumb*3)
- }
-
- computeCodonTable <- function(testID=1){
-
- if(testID<=4){
- # Pre-compute every codons
- intCounter = 1
- for(pOne in NUCLEOTIDES){
- for(pTwo in NUCLEOTIDES){
- for(pThree in NUCLEOTIDES){
- codon = paste(pOne,pTwo,pThree,sep="")
- colnames(CODON_TABLE)[intCounter] = codon
- intCounter = intCounter + 1
- CODON_TABLE[,codon] = mutationTypeOptimized(cbind(permutateAllCodon(codon),rep(codon,12)))
- }
- }
- }
- chars = c("N","A","C","G","T", "-")
- for(a in chars){
- for(b in chars){
- for(c in chars){
- if(a=="N" | b=="N" | c=="N"){
- #cat(paste(a,b,c),sep="","\n")
- CODON_TABLE[,paste(a,b,c,sep="")] = rep(NA,12)
- }
- }
- }
- }
-
- chars = c("-","A","C","G","T")
- for(a in chars){
- for(b in chars){
- for(c in chars){
- if(a=="-" | b=="-" | c=="-"){
- #cat(paste(a,b,c),sep="","\n")
- CODON_TABLE[,paste(a,b,c,sep="")] = rep(NA,12)
- }
- }
- }
- }
- CODON_TABLE <<- as.matrix(CODON_TABLE)
- }
- }
-
- collapseClone <- function(vecInputSeqs,glSeq,readEnd,nonTerminalOnly=0){
- #print(length(vecInputSeqs))
- vecInputSeqs = unique(vecInputSeqs)
- if(length(vecInputSeqs)==1){
- return( list( c(vecInputSeqs,glSeq), F) )
- }else{
- charInputSeqs <- sapply(vecInputSeqs, function(x){
- s2c(x)[1:readEnd]
- })
- charGLSeq <- s2c(glSeq)
- matClone <- sapply(1:readEnd, function(i){
- posNucs = unique(charInputSeqs[i,])
- posGL = charGLSeq[i]
- error = FALSE
- if(posGL=="-" & sum(!(posNucs%in%c("-","N")))==0 ){
- return(c("-",error))
- }
- if(length(posNucs)==1)
- return(c(posNucs[1],error))
- else{
- if("N"%in%posNucs){
- error=TRUE
- }
- if(sum(!posNucs[posNucs!="N"]%in%posGL)==0){
- return( c(posGL,error) )
- }else{
- #return( c(sample(posNucs[posNucs!="N"],1),error) )
- if(nonTerminalOnly==0){
- return( c(sample(charInputSeqs[i,charInputSeqs[i,]!="N" & charInputSeqs[i,]!=posGL],1),error) )
- }else{
- posNucs = charInputSeqs[i,charInputSeqs[i,]!="N" & charInputSeqs[i,]!=posGL]
- posNucsTable = table(posNucs)
- if(sum(posNucsTable>1)==0){
- return( c(posGL,error) )
- }else{
- return( c(sample( posNucs[posNucs%in%names(posNucsTable)[posNucsTable>1]],1),error) )
- }
- }
-
- }
- }
- })
-
-
- #print(length(vecInputSeqs))
- return(list(c(c2s(matClone[1,]),glSeq),"TRUE"%in%matClone[2,]))
- }
- }
-
- # Compute the expected for each sequence-germline pair
- getExpectedIndividual <- function(matInput){
- if( any(grep("multicore",search())) ){
- facGL <- factor(matInput[,2])
- facLevels = levels(facGL)
- LisGLs_MutabilityU = mclapply(1:length(facLevels), function(x){
- computeMutabilities(facLevels[x])
- })
- facIndex = match(facGL,facLevels)
-
- LisGLs_Mutability = mclapply(1:nrow(matInput), function(x){
- cInput = rep(NA,nchar(matInput[x,1]))
- cInput[s2c(matInput[x,1])!="N"] = 1
- LisGLs_MutabilityU[[facIndex[x]]] * cInput
- })
-
- LisGLs_Targeting = mclapply(1:dim(matInput)[1], function(x){
- computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
- })
-
- LisGLs_MutationTypes = mclapply(1:length(matInput[,2]),function(x){
- #print(x)
- computeMutationTypes(matInput[x,2])
- })
-
- LisGLs_Exp = mclapply(1:dim(matInput)[1], function(x){
- computeExpected(LisGLs_Targeting[[x]],LisGLs_MutationTypes[[x]])
- })
-
- ul_LisGLs_Exp = unlist(LisGLs_Exp)
- return(matrix(ul_LisGLs_Exp,ncol=4,nrow=(length(ul_LisGLs_Exp)/4),byrow=T))
- }else{
- facGL <- factor(matInput[,2])
- facLevels = levels(facGL)
- LisGLs_MutabilityU = lapply(1:length(facLevels), function(x){
- computeMutabilities(facLevels[x])
- })
- facIndex = match(facGL,facLevels)
-
- LisGLs_Mutability = lapply(1:nrow(matInput), function(x){
- cInput = rep(NA,nchar(matInput[x,1]))
- cInput[s2c(matInput[x,1])!="N"] = 1
- LisGLs_MutabilityU[[facIndex[x]]] * cInput
- })
-
- LisGLs_Targeting = lapply(1:dim(matInput)[1], function(x){
- computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
- })
-
- LisGLs_MutationTypes = lapply(1:length(matInput[,2]),function(x){
- #print(x)
- computeMutationTypes(matInput[x,2])
- })
-
- LisGLs_Exp = lapply(1:dim(matInput)[1], function(x){
- computeExpected(LisGLs_Targeting[[x]],LisGLs_MutationTypes[[x]])
- })
-
- ul_LisGLs_Exp = unlist(LisGLs_Exp)
- return(matrix(ul_LisGLs_Exp,ncol=4,nrow=(length(ul_LisGLs_Exp)/4),byrow=T))
-
- }
- }
-
- # Compute mutabilities of sequence based on the tri-nucleotide model
- computeMutabilities <- function(paramSeq){
- seqLen = nchar(paramSeq)
- seqMutabilites = rep(NA,seqLen)
-
- gaplessSeq = gsub("-", "", paramSeq)
- gaplessSeqLen = nchar(gaplessSeq)
- gaplessSeqMutabilites = rep(NA,gaplessSeqLen)
-
- if(mutabilityModel!=5){
- pos<- 3:(gaplessSeqLen)
- subSeq = substr(rep(gaplessSeq,gaplessSeqLen-2),(pos-2),(pos+2))
- gaplessSeqMutabilites[pos] =
- tapply( c(
- getMutability( substr(subSeq,1,3), 3) ,
- getMutability( substr(subSeq,2,4), 2),
- getMutability( substr(subSeq,3,5), 1)
- ),rep(1:(gaplessSeqLen-2),3),mean,na.rm=TRUE
- )
- #Pos 1
- subSeq = substr(gaplessSeq,1,3)
- gaplessSeqMutabilites[1] = getMutability(subSeq , 1)
- #Pos 2
- subSeq = substr(gaplessSeq,1,4)
- gaplessSeqMutabilites[2] = mean( c(
- getMutability( substr(subSeq,1,3), 2) ,
- getMutability( substr(subSeq,2,4), 1)
- ),na.rm=T
- )
- seqMutabilites[which(s2c(paramSeq)!="-")]<- gaplessSeqMutabilites
- return(seqMutabilites)
- }else{
-
- pos<- 3:(gaplessSeqLen)
- subSeq = substr(rep(gaplessSeq,gaplessSeqLen-2),(pos-2),(pos+2))
- gaplessSeqMutabilites[pos] = sapply(subSeq,function(x){ getMutability5(x) }, simplify=T)
- seqMutabilites[which(s2c(paramSeq)!="-")]<- gaplessSeqMutabilites
- return(seqMutabilites)
- }
-
- }
-
- # Returns the mutability of a triplet at a given position
- getMutability <- function(codon, pos=1:3){
- triplets <- rownames(mutability)
- mutability[ match(codon,triplets) ,pos]
- }
-
- getMutability5 <- function(fivemer){
- return(mutability[fivemer])
- }
-
- # Returns the substitution probabilty
- getTransistionProb <- function(nuc){
- substitution[nuc,]
- }
-
- getTransistionProb5 <- function(fivemer){
- if(any(which(fivemer==colnames(substitution)))){
- return(substitution[,fivemer])
- }else{
- return(array(NA,4))
- }
- }
-
- # Given a nuc, returns the other 3 nucs it can mutate to
- canMutateTo <- function(nuc){
- NUCLEOTIDES[- which(NUCLEOTIDES==nuc)]
- }
-
- # Given a nucleotide, returns the probabilty of other nucleotide it can mutate to
- canMutateToProb <- function(nuc){
- substitution[nuc,canMutateTo(nuc)]
- }
-
- # Compute targeting, based on precomputed mutatbility & substitution
- computeTargeting <- function(param_strSeq,param_vecMutabilities){
-
- if(substitutionModel!=5){
- vecSeq = s2c(param_strSeq)
- matTargeting = sapply( 1:length(vecSeq), function(x) { param_vecMutabilities[x] * getTransistionProb(vecSeq[x]) } )
- #matTargeting = apply( rbind(vecSeq,param_vecMutabilities),2, function(x) { as.vector(as.numeric(x[2]) * getTransistionProb(x[1])) } )
- dimnames( matTargeting ) = list(NUCLEOTIDES,1:(length(vecSeq)))
- return (matTargeting)
- }else{
-
- seqLen = nchar(param_strSeq)
- seqsubstitution = matrix(NA,ncol=seqLen,nrow=4)
- paramSeq <- param_strSeq
- gaplessSeq = gsub("-", "", paramSeq)
- gaplessSeqLen = nchar(gaplessSeq)
- gaplessSeqSubstitution = matrix(NA,ncol=gaplessSeqLen,nrow=4)
-
- pos<- 3:(gaplessSeqLen)
- subSeq = substr(rep(gaplessSeq,gaplessSeqLen-2),(pos-2),(pos+2))
- gaplessSeqSubstitution[,pos] = sapply(subSeq,function(x){ getTransistionProb5(x) }, simplify=T)
- seqsubstitution[,which(s2c(paramSeq)!="-")]<- gaplessSeqSubstitution
- #matTargeting <- param_vecMutabilities %*% seqsubstitution
- matTargeting <- sweep(seqsubstitution,2,param_vecMutabilities,`*`)
- dimnames( matTargeting ) = list(NUCLEOTIDES,1:(seqLen))
- return (matTargeting)
- }
- }
-
- # Compute the mutations types
- computeMutationTypes <- function(param_strSeq){
- #cat(param_strSeq,"\n")
- #vecSeq = trimToLastCodon(param_strSeq)
- lenSeq = nchar(param_strSeq)
- vecCodons = sapply({1:(lenSeq/3)}*3-2,function(x){substr(param_strSeq,x,x+2)})
- matMutationTypes = matrix( unlist(CODON_TABLE[,vecCodons]) ,ncol=lenSeq,nrow=4, byrow=F)
- dimnames( matMutationTypes ) = list(NUCLEOTIDES,1:(ncol(matMutationTypes)))
- return(matMutationTypes)
- }
- computeMutationTypesFast <- function(param_strSeq){
- matMutationTypes = matrix( CODON_TABLE[,param_strSeq] ,ncol=3,nrow=4, byrow=F)
- #dimnames( matMutationTypes ) = list(NUCLEOTIDES,1:(length(vecSeq)))
- return(matMutationTypes)
- }
- mutationTypeOptimized <- function( matOfCodons ){
- apply( matOfCodons,1,function(x){ mutationType(x[2],x[1]) } )
- }
-
- # Returns a vector of codons 1 mutation away from the given codon
- permutateAllCodon <- function(codon){
- cCodon = s2c(codon)
- matCodons = t(array(cCodon,dim=c(3,12)))
- matCodons[1:4,1] = NUCLEOTIDES
- matCodons[5:8,2] = NUCLEOTIDES
- matCodons[9:12,3] = NUCLEOTIDES
- apply(matCodons,1,c2s)
- }
-
- # Given two codons, tells you if the mutation is R or S (based on your definition)
- mutationType <- function(codonFrom,codonTo){
- if(testID==4){
- if( is.na(codonFrom) | is.na(codonTo) | is.na(translateCodonToAminoAcid(codonFrom)) | is.na(translateCodonToAminoAcid(codonTo)) ){
- return(NA)
- }else{
- mutationType = "S"
- if( translateAminoAcidToTraitChange(translateCodonToAminoAcid(codonFrom)) != translateAminoAcidToTraitChange(translateCodonToAminoAcid(codonTo)) ){
- mutationType = "R"
- }
- if(translateCodonToAminoAcid(codonTo)=="*" | translateCodonToAminoAcid(codonFrom)=="*"){
- mutationType = "Stop"
- }
- return(mutationType)
- }
- }else if(testID==5){
- if( is.na(codonFrom) | is.na(codonTo) | is.na(translateCodonToAminoAcid(codonFrom)) | is.na(translateCodonToAminoAcid(codonTo)) ){
- return(NA)
- }else{
- if(codonFrom==codonTo){
- mutationType = "S"
- }else{
- codonFrom = s2c(codonFrom)
- codonTo = s2c(codonTo)
- mutationType = "Stop"
- nucOfI = codonFrom[which(codonTo!=codonFrom)]
- if(nucOfI=="C"){
- mutationType = "R"
- }else if(nucOfI=="G"){
- mutationType = "S"
- }
- }
- return(mutationType)
- }
- }else{
- if( is.na(codonFrom) | is.na(codonTo) | is.na(translateCodonToAminoAcid(codonFrom)) | is.na(translateCodonToAminoAcid(codonTo)) ){
- return(NA)
- }else{
- mutationType = "S"
- if( translateCodonToAminoAcid(codonFrom) != translateCodonToAminoAcid(codonTo) ){
- mutationType = "R"
- }
- if(translateCodonToAminoAcid(codonTo)=="*" | translateCodonToAminoAcid(codonFrom)=="*"){
- mutationType = "Stop"
- }
- return(mutationType)
- }
- }
- }
-
-
- #given a mat of targeting & it's corresponding mutationtypes returns
- #a vector of Exp_RCDR,Exp_SCDR,Exp_RFWR,Exp_RFWR
- computeExpected <- function(paramTargeting,paramMutationTypes){
- # Replacements
- RPos = which(paramMutationTypes=="R")
- #FWR
- Exp_R_FWR = sum(paramTargeting[ RPos[which(FWR_Nuc_Mat[RPos]==T)] ],na.rm=T)
- #CDR
- Exp_R_CDR = sum(paramTargeting[ RPos[which(CDR_Nuc_Mat[RPos]==T)] ],na.rm=T)
- # Silents
- SPos = which(paramMutationTypes=="S")
- #FWR
- Exp_S_FWR = sum(paramTargeting[ SPos[which(FWR_Nuc_Mat[SPos]==T)] ],na.rm=T)
- #CDR
- Exp_S_CDR = sum(paramTargeting[ SPos[which(CDR_Nuc_Mat[SPos]==T)] ],na.rm=T)
-
- return(c(Exp_R_CDR,Exp_S_CDR,Exp_R_FWR,Exp_S_FWR))
- }
-
- # Count the mutations in a sequence
- # each mutation is treated independently
- analyzeMutations2NucUri_website <- function( rev_in_matrix ){
- paramGL = rev_in_matrix[2,]
- paramSeq = rev_in_matrix[1,]
-
- #Fill seq with GL seq if gapped
- #if( any(paramSeq=="-") ){
- # gapPos_Seq = which(paramSeq=="-")
- # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "-"]
- # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
- #}
-
-
- #if( any(paramSeq=="N") ){
- # gapPos_Seq = which(paramSeq=="N")
- # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "N"]
- # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
- #}
-
- analyzeMutations2NucUri( matrix(c( paramGL, paramSeq ),2,length(paramGL),byrow=T) )
-
- }
-
- #1 = GL
- #2 = Seq
- analyzeMutations2NucUri <- function( in_matrix=matrix(c(c("A","A","A","C","C","C"),c("A","G","G","C","C","A")),2,6,byrow=T) ){
- paramGL = in_matrix[2,]
- paramSeq = in_matrix[1,]
- paramSeqUri = paramGL
- #mutations = apply(rbind(paramGL,paramSeq), 2, function(x){!x[1]==x[2]})
- mutations_val = paramGL != paramSeq
- if(any(mutations_val)){
- mutationPos = {1:length(mutations_val)}[mutations_val]
- mutationPos = mutationPos[sapply(mutationPos, function(x){!any(paramSeq[getCodonPos(x)]=="N")})]
- length_mutations =length(mutationPos)
- mutationInfo = rep(NA,length_mutations)
- if(any(mutationPos)){
-
- pos<- mutationPos
- pos_array<-array(sapply(pos,getCodonPos))
- codonGL = paramGL[pos_array]
-
- codonSeq = sapply(pos,function(x){
- seqP = paramGL[getCodonPos(x)]
- muCodonPos = {x-1}%%3+1
- seqP[muCodonPos] = paramSeq[x]
- return(seqP)
- })
- GLcodons = apply(matrix(codonGL,length_mutations,3,byrow=TRUE),1,c2s)
- Seqcodons = apply(codonSeq,2,c2s)
- mutationInfo = apply(rbind(GLcodons , Seqcodons),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
- names(mutationInfo) = mutationPos
- }
- if(any(!is.na(mutationInfo))){
- return(mutationInfo[!is.na(mutationInfo)])
- }else{
- return(NA)
- }
-
-
- }else{
- return (NA)
- }
- }
-
- processNucMutations2 <- function(mu){
- if(!is.na(mu)){
- #R
- if(any(mu=="R")){
- Rs = mu[mu=="R"]
- nucNumbs = as.numeric(names(Rs))
- R_CDR = sum(as.integer(CDR_Nuc[nucNumbs]),na.rm=T)
- R_FWR = sum(as.integer(FWR_Nuc[nucNumbs]),na.rm=T)
- }else{
- R_CDR = 0
- R_FWR = 0
- }
-
- #S
- if(any(mu=="S")){
- Ss = mu[mu=="S"]
- nucNumbs = as.numeric(names(Ss))
- S_CDR = sum(as.integer(CDR_Nuc[nucNumbs]),na.rm=T)
- S_FWR = sum(as.integer(FWR_Nuc[nucNumbs]),na.rm=T)
- }else{
- S_CDR = 0
- S_FWR = 0
- }
-
-
- retVec = c(R_CDR,S_CDR,R_FWR,S_FWR)
- retVec[is.na(retVec)]=0
- return(retVec)
- }else{
- return(rep(0,4))
- }
- }
-
-
- ## Z-score Test
- computeZScore <- function(mat, test="Focused"){
- matRes <- matrix(NA,ncol=2,nrow=(nrow(mat)))
- if(test=="Focused"){
- #Z_Focused_CDR
- #P_Denom = sum( mat[1,c(5,6,8)], na.rm=T )
- P = apply(mat[,c(5,6,8)],1,function(x){(x[1]/sum(x))})
- R_mean = apply(cbind(mat[,c(1,2,4)],P),1,function(x){x[4]*(sum(x[1:3]))})
- R_sd=sqrt(R_mean*(1-P))
- matRes[,1] = (mat[,1]-R_mean)/R_sd
-
- #Z_Focused_FWR
- #P_Denom = sum( mat[1,c(7,6,8)], na.rm=T )
- P = apply(mat[,c(7,6,8)],1,function(x){(x[1]/sum(x))})
- R_mean = apply(cbind(mat[,c(3,2,4)],P),1,function(x){x[4]*(sum(x[1:3]))})
- R_sd=sqrt(R_mean*(1-P))
- matRes[,2] = (mat[,3]-R_mean)/R_sd
- }
-
- if(test=="Local"){
- #Z_Focused_CDR
- #P_Denom = sum( mat[1,c(5,6,8)], na.rm=T )
- P = apply(mat[,c(5,6)],1,function(x){(x[1]/sum(x))})
- R_mean = apply(cbind(mat[,c(1,2)],P),1,function(x){x[3]*(sum(x[1:2]))})
- R_sd=sqrt(R_mean*(1-P))
- matRes[,1] = (mat[,1]-R_mean)/R_sd
-
- #Z_Focused_FWR
- #P_Denom = sum( mat[1,c(7,6,8)], na.rm=T )
- P = apply(mat[,c(7,8)],1,function(x){(x[1]/sum(x))})
- R_mean = apply(cbind(mat[,c(3,4)],P),1,function(x){x[3]*(sum(x[1:2]))})
- R_sd=sqrt(R_mean*(1-P))
- matRes[,2] = (mat[,3]-R_mean)/R_sd
- }
-
- if(test=="Imbalanced"){
- #Z_Focused_CDR
- #P_Denom = sum( mat[1,c(5,6,8)], na.rm=T )
- P = apply(mat[,5:8],1,function(x){((x[1]+x[2])/sum(x))})
- R_mean = apply(cbind(mat[,1:4],P),1,function(x){x[5]*(sum(x[1:4]))})
- R_sd=sqrt(R_mean*(1-P))
- matRes[,1] = (mat[,1]-R_mean)/R_sd
-
- #Z_Focused_FWR
- #P_Denom = sum( mat[1,c(7,6,8)], na.rm=T )
- P = apply(mat[,5:8],1,function(x){((x[3]+x[4])/sum(x))})
- R_mean = apply(cbind(mat[,1:4],P),1,function(x){x[5]*(sum(x[1:4]))})
- R_sd=sqrt(R_mean*(1-P))
- matRes[,2] = (mat[,3]-R_mean)/R_sd
- }
-
- matRes[is.nan(matRes)] = NA
- return(matRes)
- }
-
- # Return a p-value for a z-score
- z2p <- function(z){
- p=NA
- if( !is.nan(z) && !is.na(z)){
- if(z>0){
- p = (1 - pnorm(z,0,1))
- } else if(z<0){
- p = (-1 * pnorm(z,0,1))
- } else{
- p = 0.5
- }
- }else{
- p = NA
- }
- return(p)
- }
-
-
- ## Bayesian Test
-
- # Fitted parameter for the bayesian framework
-BAYESIAN_FITTED<-c(0.407277142798302, 0.554007336744485, 0.63777155771234, 0.693989162719009, 0.735450014674917, 0.767972534429806, 0.794557287143399, 0.816906816601605, 0.83606796225341, 0.852729446430296, 0.867370424541641, 0.880339760590323, 0.891900995024999, 0.902259181289864, 0.911577919359,0.919990301665853, 0.927606458124537, 0.934518806350661, 0.940805863754375, 0.946534836475715, 0.951763691199255, 0.95654428191308, 0.960920179487397, 0.964930893680829, 0.968611312149038, 0.971992459313836, 0.975102110004818, 0.977964943023096, 0.980603428208439, 0.983037660179428, 0.985285800977406, 0.987364285326685, 0.989288037855441, 0.991070478823525, 0.992723699729969, 0.994259575477392, 0.995687688867975, 0.997017365051493, 0.998257085153047, 0.999414558305388, 1.00049681357804, 1.00151036237481, 1.00246080204981, 1.00335370751909, 1.0041939329768, 1.0049859393417, 1.00573382091263, 1.00644127217376, 1.00711179729107, 1.00774845526417, 1.00835412715854, 1.00893143010366, 1.00948275846309, 1.01001030293661, 1.01051606798079, 1.01100188771288, 1.01146944044216, 1.01192026195449, 1.01235575766094, 1.01277721370986)
- CONST_i <- sort(c(((2^(seq(-39,0,length.out=201)))/2)[1:200],(c(0:11,13:99)+0.5)/100,1-(2^(seq(-39,0,length.out=201)))/2))
-
- # Given x, M & p, returns a pdf
- calculate_bayes <- function ( x=3, N=10, p=0.33,
- i=CONST_i,
- max_sigma=20,length_sigma=4001
- ){
- if(!0%in%N){
- G <- max(length(x),length(N),length(p))
- x=array(x,dim=G)
- N=array(N,dim=G)
- p=array(p,dim=G)
- sigma_s<-seq(-max_sigma,max_sigma,length.out=length_sigma)
- sigma_1<-log({i/{1-i}}/{p/{1-p}})
- index<-min(N,60)
- y<-dbeta(i,x+BAYESIAN_FITTED[index],N+BAYESIAN_FITTED[index]-x)*(1-p)*p*exp(sigma_1)/({1-p}^2+2*p*{1-p}*exp(sigma_1)+{p^2}*exp(2*sigma_1))
- if(!sum(is.na(y))){
- tmp<-approx(sigma_1,y,sigma_s)$y
- tmp/sum(tmp)/{2*max_sigma/{length_sigma-1}}
- }else{
- return(NA)
- }
- }else{
- return(NA)
- }
- }
- # Given a mat of observed & expected, return a list of CDR & FWR pdf for selection
- computeBayesianScore <- function(mat, test="Focused", max_sigma=20,length_sigma=4001){
- flagOneSeq = F
- if(nrow(mat)==1){
- mat=rbind(mat,mat)
- flagOneSeq = T
- }
- if(test=="Focused"){
- #CDR
- P = c(apply(mat[,c(5,6,8)],1,function(x){(x[1]/sum(x))}),0.5)
- N = c(apply(mat[,c(1,2,4)],1,function(x){(sum(x))}),0)
- X = c(mat[,1],0)
- bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesCDR = bayesCDR[-length(bayesCDR)]
-
- #FWR
- P = c(apply(mat[,c(7,6,8)],1,function(x){(x[1]/sum(x))}),0.5)
- N = c(apply(mat[,c(3,2,4)],1,function(x){(sum(x))}),0)
- X = c(mat[,3],0)
- bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesFWR = bayesFWR[-length(bayesFWR)]
- }
-
- if(test=="Local"){
- #CDR
- P = c(apply(mat[,c(5,6)],1,function(x){(x[1]/sum(x))}),0.5)
- N = c(apply(mat[,c(1,2)],1,function(x){(sum(x))}),0)
- X = c(mat[,1],0)
- bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesCDR = bayesCDR[-length(bayesCDR)]
-
- #FWR
- P = c(apply(mat[,c(7,8)],1,function(x){(x[1]/sum(x))}),0.5)
- N = c(apply(mat[,c(3,4)],1,function(x){(sum(x))}),0)
- X = c(mat[,3],0)
- bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesFWR = bayesFWR[-length(bayesFWR)]
- }
-
- if(test=="Imbalanced"){
- #CDR
- P = c(apply(mat[,c(5:8)],1,function(x){((x[1]+x[2])/sum(x))}),0.5)
- N = c(apply(mat[,c(1:4)],1,function(x){(sum(x))}),0)
- X = c(apply(mat[,c(1:2)],1,function(x){(sum(x))}),0)
- bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesCDR = bayesCDR[-length(bayesCDR)]
-
- #FWR
- P = c(apply(mat[,c(5:8)],1,function(x){((x[3]+x[4])/sum(x))}),0.5)
- N = c(apply(mat[,c(1:4)],1,function(x){(sum(x))}),0)
- X = c(apply(mat[,c(3:4)],1,function(x){(sum(x))}),0)
- bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesFWR = bayesFWR[-length(bayesFWR)]
- }
-
- if(test=="ImbalancedSilent"){
- #CDR
- P = c(apply(mat[,c(6,8)],1,function(x){((x[1])/sum(x))}),0.5)
- N = c(apply(mat[,c(2,4)],1,function(x){(sum(x))}),0)
- X = c(apply(mat[,c(2,4)],1,function(x){(x[1])}),0)
- bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesCDR = bayesCDR[-length(bayesCDR)]
-
- #FWR
- P = c(apply(mat[,c(6,8)],1,function(x){((x[2])/sum(x))}),0.5)
- N = c(apply(mat[,c(2,4)],1,function(x){(sum(x))}),0)
- X = c(apply(mat[,c(2,4)],1,function(x){(x[2])}),0)
- bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
- bayesFWR = bayesFWR[-length(bayesFWR)]
- }
-
- if(flagOneSeq==T){
- bayesCDR = bayesCDR[1]
- bayesFWR = bayesFWR[1]
- }
- return( list("CDR"=bayesCDR, "FWR"=bayesFWR) )
- }
-
- ##Covolution
- break2chunks<-function(G=1000){
- base<-2^round(log(sqrt(G),2),0)
- return(c(rep(base,floor(G/base)-1),base+G-(floor(G/base)*base)))
- }
-
- PowersOfTwo <- function(G=100){
- exponents <- array()
- i = 0
- while(G > 0){
- i=i+1
- exponents[i] <- floor( log2(G) )
- G <- G-2^exponents[i]
- }
- return(exponents)
- }
-
- convolutionPowersOfTwo <- function( cons, length_sigma=4001 ){
- G = ncol(cons)
- if(G>1){
- for(gen in log(G,2):1){
- ll<-seq(from=2,to=2^gen,by=2)
- sapply(ll,function(l){cons[,l/2]<<-weighted_conv(cons[,l],cons[,l-1],length_sigma=length_sigma)})
- }
- }
- return( cons[,1] )
- }
-
- convolutionPowersOfTwoByTwos <- function( cons, length_sigma=4001,G=1 ){
- if(length(ncol(cons))) G<-ncol(cons)
- groups <- PowersOfTwo(G)
- matG <- matrix(NA, ncol=length(groups), nrow=length(cons)/G )
- startIndex = 1
- for( i in 1:length(groups) ){
- stopIndex <- 2^groups[i] + startIndex - 1
- if(stopIndex!=startIndex){
- matG[,i] <- convolutionPowersOfTwo( cons[,startIndex:stopIndex], length_sigma=length_sigma )
- startIndex = stopIndex + 1
- }
- else {
- if(G>1) matG[,i] <- cons[,startIndex:stopIndex]
- else matG[,i] <- cons
- #startIndex = stopIndex + 1
- }
- }
- return( list( matG, groups ) )
- }
-
- weighted_conv<-function(x,y,w=1,m=100,length_sigma=4001){
- lx<-length(x)
- ly<-length(y)
- if({lx1){
- while( i1 & Length_Postrior<=Threshold){
- cons = matrix(unlist(listPosteriors),length(listPosteriors[[1]]),length(listPosteriors))
- listMatG <- convolutionPowersOfTwoByTwos(cons,length_sigma=length_sigma)
- y<-calculate_bayesGHelper(listMatG,length_sigma=length_sigma)
- return( y/sum(y)/(2*max_sigma/(length_sigma-1)) )
- }else if(Length_Postrior==1) return(listPosteriors[[1]])
- else if(Length_Postrior==0) return(NA)
- else {
- cons = matrix(unlist(listPosteriors),length(listPosteriors[[1]]),length(listPosteriors))
- y = fastConv(cons,max_sigma=max_sigma, length_sigma=length_sigma )
- return( y/sum(y)/(2*max_sigma/(length_sigma-1)) )
- }
- }
-
- fastConv<-function(cons, max_sigma=20, length_sigma=4001){
- chunks<-break2chunks(G=ncol(cons))
- if(ncol(cons)==3) chunks<-2:1
- index_chunks_end <- cumsum(chunks)
- index_chunks_start <- c(1,index_chunks_end[-length(index_chunks_end)]+1)
- index_chunks <- cbind(index_chunks_start,index_chunks_end)
-
- case <- sum(chunks!=chunks[1])
- if(case==1) End <- max(1,((length(index_chunks)/2)-1))
- else End <- max(1,((length(index_chunks)/2)))
-
- firsts <- sapply(1:End,function(i){
- indexes<-index_chunks[i,1]:index_chunks[i,2]
- convolutionPowersOfTwoByTwos(cons[ ,indexes])[[1]]
- })
- if(case==0){
- result<-calculate_bayesGHelper( convolutionPowersOfTwoByTwos(firsts) )
- }else if(case==1){
- last<-list(calculate_bayesGHelper(
- convolutionPowersOfTwoByTwos( cons[ ,index_chunks[length(index_chunks)/2,1]:index_chunks[length(index_chunks)/2,2]] )
- ),0)
- result_first<-calculate_bayesGHelper(convolutionPowersOfTwoByTwos(firsts))
- result<-calculate_bayesGHelper(
- list(
- cbind(
- result_first,last[[1]]),
- c(log(index_chunks_end[length(index_chunks)/2-1],2),log(index_chunks[length(index_chunks)/2,2]-index_chunks[length(index_chunks)/2,1]+1,2))
- )
- )
- }
- return(as.vector(result))
- }
-
- # Computes the 95% CI for a pdf
- calcBayesCI <- function(Pdf,low=0.025,up=0.975,max_sigma=20, length_sigma=4001){
- if(length(Pdf)!=length_sigma) return(NA)
- sigma_s=seq(-max_sigma,max_sigma,length.out=length_sigma)
- cdf = cumsum(Pdf)
- cdf = cdf/cdf[length(cdf)]
- return( c(sigma_s[findInterval(low,cdf)-1] , sigma_s[findInterval(up,cdf)]) )
- }
-
- # Computes a mean for a pdf
- calcBayesMean <- function(Pdf,max_sigma=20,length_sigma=4001){
- if(length(Pdf)!=length_sigma) return(NA)
- sigma_s=seq(-max_sigma,max_sigma,length.out=length_sigma)
- norm = {length_sigma-1}/2/max_sigma
- return( (Pdf%*%sigma_s/norm) )
- }
-
- # Returns the mean, and the 95% CI for a pdf
- calcBayesOutputInfo <- function(Pdf,low=0.025,up=0.975,max_sigma=20, length_sigma=4001){
- if(is.na(Pdf))
- return(rep(NA,3))
- bCI = calcBayesCI(Pdf=Pdf,low=low,up=up,max_sigma=max_sigma,length_sigma=length_sigma)
- bMean = calcBayesMean(Pdf=Pdf,max_sigma=max_sigma,length_sigma=length_sigma)
- return(c(bMean, bCI))
- }
-
- # Computes the p-value of a pdf
- computeSigmaP <- function(Pdf, length_sigma=4001, max_sigma=20){
- if(length(Pdf)>1){
- norm = {length_sigma-1}/2/max_sigma
- pVal = {sum(Pdf[1:{{length_sigma-1}/2}]) + Pdf[{{length_sigma+1}/2}]/2}/norm
- if(pVal>0.5){
- pVal = pVal-1
- }
- return(pVal)
- }else{
- return(NA)
- }
- }
-
- # Compute p-value of two distributions
- compareTwoDistsFaster <-function(sigma_S=seq(-20,20,length.out=4001), N=10000, dens1=runif(4001,0,1), dens2=runif(4001,0,1)){
- #print(c(length(dens1),length(dens2)))
- if(length(dens1)>1 & length(dens2)>1 ){
- dens1<-dens1/sum(dens1)
- dens2<-dens2/sum(dens2)
- cum2 <- cumsum(dens2)-dens2/2
- tmp<- sum(sapply(1:length(dens1),function(i)return(dens1[i]*cum2[i])))
- #print(tmp)
- if(tmp>0.5)tmp<-tmp-1
- return( tmp )
- }
- else {
- return(NA)
- }
- #return (sum(sapply(1:N,function(i)(sample(sigma_S,1,prob=dens1)>sample(sigma_S,1,prob=dens2))))/N)
- }
-
- # get number of seqeunces contributing to the sigma (i.e. seqeunces with mutations)
- numberOfSeqsWithMutations <- function(matMutations,test=1){
- if(test==4)test=2
- cdrSeqs <- 0
- fwrSeqs <- 0
- if(test==1){#focused
- cdrMutations <- apply(matMutations, 1, function(x){ sum(x[c(1,2,4)]) })
- fwrMutations <- apply(matMutations, 1, function(x){ sum(x[c(3,4,2)]) })
- if( any(which(cdrMutations>0)) ) cdrSeqs <- sum(cdrMutations>0)
- if( any(which(fwrMutations>0)) ) fwrSeqs <- sum(fwrMutations>0)
- }
- if(test==2){#local
- cdrMutations <- apply(matMutations, 1, function(x){ sum(x[c(1,2)]) })
- fwrMutations <- apply(matMutations, 1, function(x){ sum(x[c(3,4)]) })
- if( any(which(cdrMutations>0)) ) cdrSeqs <- sum(cdrMutations>0)
- if( any(which(fwrMutations>0)) ) fwrSeqs <- sum(fwrMutations>0)
- }
- return(c("CDR"=cdrSeqs, "FWR"=fwrSeqs))
-}
-
-
-
-shadeColor <- function(sigmaVal=NA,pVal=NA){
- if(is.na(sigmaVal) & is.na(pVal)) return(NA)
- if(is.na(sigmaVal) & !is.na(pVal)) sigmaVal=sign(pVal)
- if(is.na(pVal) || pVal==1 || pVal==0){
- returnColor = "#FFFFFF";
- }else{
- colVal=abs(pVal);
-
- if(sigmaVal<0){
- if(colVal>0.1)
- returnColor = "#CCFFCC";
- if(colVal<=0.1)
- returnColor = "#99FF99";
- if(colVal<=0.050)
- returnColor = "#66FF66";
- if(colVal<=0.010)
- returnColor = "#33FF33";
- if(colVal<=0.005)
- returnColor = "#00FF00";
-
- }else{
- if(colVal>0.1)
- returnColor = "#FFCCCC";
- if(colVal<=0.1)
- returnColor = "#FF9999";
- if(colVal<=0.05)
- returnColor = "#FF6666";
- if(colVal<=0.01)
- returnColor = "#FF3333";
- if(colVal<0.005)
- returnColor = "#FF0000";
- }
- }
-
- return(returnColor)
-}
-
-
-
-plotHelp <- function(xfrac=0.05,yfrac=0.05,log=FALSE){
- if(!log){
- x = par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac
- y = par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac
- }else {
- if(log==2){
- x = par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac
- y = 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac)
- }
- if(log==1){
- x = 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac)
- y = par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac
- }
- if(log==3){
- x = 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac)
- y = 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac)
- }
- }
- return(c("x"=x,"y"=y))
-}
-
-# SHMulation
-
- # Based on targeting, introduce a single mutation & then update the targeting
- oneMutation <- function(){
- # Pick a postion + mutation
- posMutation = sample(1:(seqGermlineLen*4),1,replace=F,prob=as.vector(seqTargeting))
- posNucNumb = ceiling(posMutation/4) # Nucleotide number
- posNucKind = 4 - ( (posNucNumb*4) - posMutation ) # Nuc the position mutates to
-
- #mutate the simulation sequence
- seqSimVec <- s2c(seqSim)
- seqSimVec[posNucNumb] <- NUCLEOTIDES[posNucKind]
- seqSim <<- c2s(seqSimVec)
-
- #update Mutability, Targeting & MutationsTypes
- updateMutabilityNTargeting(posNucNumb)
-
- #return(c(posNucNumb,NUCLEOTIDES[posNucKind]))
- return(posNucNumb)
- }
-
- updateMutabilityNTargeting <- function(position){
- min_i<-max((position-2),1)
- max_i<-min((position+2),nchar(seqSim))
- min_ii<-min(min_i,3)
-
- #mutability - update locally
- seqMutability[(min_i):(max_i)] <<- computeMutabilities(substr(seqSim,position-4,position+4))[(min_ii):(max_i-min_i+min_ii)]
-
-
- #targeting - compute locally
- seqTargeting[,min_i:max_i] <<- computeTargeting(substr(seqSim,min_i,max_i),seqMutability[min_i:max_i])
- seqTargeting[is.na(seqTargeting)] <<- 0
- #mutCodonPos = getCodonPos(position)
- mutCodonPos = seq(getCodonPos(min_i)[1],getCodonPos(max_i)[3])
- #cat(mutCodonPos,"\n")
- mutTypeCodon = getCodonPos(position)
- seqMutationTypes[,mutTypeCodon] <<- computeMutationTypesFast( substr(seqSim,mutTypeCodon[1],mutTypeCodon[3]) )
- # Stop = 0
- if(any(seqMutationTypes[,mutCodonPos]=="Stop",na.rm=T )){
- seqTargeting[,mutCodonPos][seqMutationTypes[,mutCodonPos]=="Stop"] <<- 0
- }
-
-
- #Selection
- selectedPos = (min_i*4-4)+(which(seqMutationTypes[,min_i:max_i]=="R"))
- # CDR
- selectedCDR = selectedPos[which(matCDR[selectedPos]==T)]
- seqTargeting[selectedCDR] <<- seqTargeting[selectedCDR] * exp(selCDR)
- seqTargeting[selectedCDR] <<- seqTargeting[selectedCDR]/baseLineCDR_K
-
- # FWR
- selectedFWR = selectedPos[which(matFWR[selectedPos]==T)]
- seqTargeting[selectedFWR] <<- seqTargeting[selectedFWR] * exp(selFWR)
- seqTargeting[selectedFWR] <<- seqTargeting[selectedFWR]/baseLineFWR_K
-
- }
-
-
-
- # Validate the mutation: if the mutation has not been sampled before validate it, else discard it.
- validateMutation <- function(){
- if( !(mutatedPos%in%mutatedPositions) ){ # if it's a new mutation
- uniqueMutationsIntroduced <<- uniqueMutationsIntroduced + 1
- mutatedPositions[uniqueMutationsIntroduced] <<- mutatedPos
- }else{
- if(substr(seqSim,mutatedPos,mutatedPos)==substr(seqGermline,mutatedPos,mutatedPos)){ # back to germline mutation
- mutatedPositions <<- mutatedPositions[-which(mutatedPositions==mutatedPos)]
- uniqueMutationsIntroduced <<- uniqueMutationsIntroduced - 1
- }
- }
- }
-
-
-
- # Places text (labels) at normalized coordinates
- myaxis <- function(xfrac=0.05,yfrac=0.05,log=FALSE,w="text",cex=1,adj=1,thecol="black"){
- par(xpd=TRUE)
- if(!log)
- text(par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac,par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac,w,cex=cex,adj=adj,col=thecol)
- else {
- if(log==2)
- text(
- par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac,
- 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac),
- w,cex=cex,adj=adj,col=thecol)
- if(log==1)
- text(
- 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac),
- par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac,
- w,cex=cex,adj=adj,col=thecol)
- if(log==3)
- text(
- 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac),
- 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac),
- w,cex=cex,adj=adj,col=thecol)
- }
- par(xpd=FALSE)
- }
-
-
-
- # Count the mutations in a sequence
- analyzeMutations <- function( inputMatrixIndex, model = 0 , multipleMutation=0, seqWithStops=0){
-
- paramGL = s2c(matInput[inputMatrixIndex,2])
- paramSeq = s2c(matInput[inputMatrixIndex,1])
-
- #if( any(paramSeq=="N") ){
- # gapPos_Seq = which(paramSeq=="N")
- # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "N"]
- # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
- #}
- mutations_val = paramGL != paramSeq
-
- if(any(mutations_val)){
- mutationPos = which(mutations_val)#{1:length(mutations_val)}[mutations_val]
- length_mutations =length(mutationPos)
- mutationInfo = rep(NA,length_mutations)
-
- pos<- mutationPos
- pos_array<-array(sapply(pos,getCodonPos))
- codonGL = paramGL[pos_array]
- codonSeqWhole = paramSeq[pos_array]
- codonSeq = sapply(pos,function(x){
- seqP = paramGL[getCodonPos(x)]
- muCodonPos = {x-1}%%3+1
- seqP[muCodonPos] = paramSeq[x]
- return(seqP)
- })
- GLcodons = apply(matrix(codonGL,length_mutations,3,byrow=TRUE),1,c2s)
- SeqcodonsWhole = apply(matrix(codonSeqWhole,length_mutations,3,byrow=TRUE),1,c2s)
- Seqcodons = apply(codonSeq,2,c2s)
-
- mutationInfo = apply(rbind(GLcodons , Seqcodons),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
- names(mutationInfo) = mutationPos
-
- mutationInfoWhole = apply(rbind(GLcodons , SeqcodonsWhole),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
- names(mutationInfoWhole) = mutationPos
-
- mutationInfo <- mutationInfo[!is.na(mutationInfo)]
- mutationInfoWhole <- mutationInfoWhole[!is.na(mutationInfoWhole)]
-
- if(any(!is.na(mutationInfo))){
-
- #Filter based on Stop (at the codon level)
- if(seqWithStops==1){
- nucleotidesAtStopCodons = names(mutationInfoWhole[mutationInfoWhole!="Stop"])
- mutationInfo = mutationInfo[nucleotidesAtStopCodons]
- mutationInfoWhole = mutationInfo[nucleotidesAtStopCodons]
- }else{
- countStops = sum(mutationInfoWhole=="Stop")
- if(seqWithStops==2 & countStops==0) mutationInfo = NA
- if(seqWithStops==3 & countStops>0) mutationInfo = NA
- }
-
- if(any(!is.na(mutationInfo))){
- #Filter mutations based on multipleMutation
- if(multipleMutation==1 & !is.na(mutationInfo)){
- mutationCodons = getCodonNumb(as.numeric(names(mutationInfoWhole)))
- tableMutationCodons <- table(mutationCodons)
- codonsWithMultipleMutations <- as.numeric(names(tableMutationCodons[tableMutationCodons>1]))
- if(any(codonsWithMultipleMutations)){
- #remove the nucleotide mutations in the codons with multiple mutations
- mutationInfo <- mutationInfo[!(mutationCodons %in% codonsWithMultipleMutations)]
- #replace those codons with Ns in the input sequence
- paramSeq[unlist(lapply(codonsWithMultipleMutations, getCodonNucs))] = "N"
- matInput[inputMatrixIndex,1] <<- c2s(paramSeq)
- }
- }
-
- #Filter mutations based on the model
- if(any(mutationInfo)==T | is.na(any(mutationInfo))){
-
- if(model==1 & !is.na(mutationInfo)){
- mutationInfo <- mutationInfo[mutationInfo=="S"]
- }
- if(any(mutationInfo)==T | is.na(any(mutationInfo))) return(mutationInfo)
- else return(NA)
- }else{
- return(NA)
- }
- }else{
- return(NA)
- }
-
-
- }else{
- return(NA)
- }
-
-
- }else{
- return (NA)
- }
- }
-
- analyzeMutationsFixed <- function( inputArray, model = 0 , multipleMutation=0, seqWithStops=0){
-
- paramGL = s2c(inputArray[2])
- paramSeq = s2c(inputArray[1])
- inputSeq <- inputArray[1]
- #if( any(paramSeq=="N") ){
- # gapPos_Seq = which(paramSeq=="N")
- # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "N"]
- # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
- #}
- mutations_val = paramGL != paramSeq
-
- if(any(mutations_val)){
- mutationPos = which(mutations_val)#{1:length(mutations_val)}[mutations_val]
- length_mutations =length(mutationPos)
- mutationInfo = rep(NA,length_mutations)
-
- pos<- mutationPos
- pos_array<-array(sapply(pos,getCodonPos))
- codonGL = paramGL[pos_array]
- codonSeqWhole = paramSeq[pos_array]
- codonSeq = sapply(pos,function(x){
- seqP = paramGL[getCodonPos(x)]
- muCodonPos = {x-1}%%3+1
- seqP[muCodonPos] = paramSeq[x]
- return(seqP)
- })
- GLcodons = apply(matrix(codonGL,length_mutations,3,byrow=TRUE),1,c2s)
- SeqcodonsWhole = apply(matrix(codonSeqWhole,length_mutations,3,byrow=TRUE),1,c2s)
- Seqcodons = apply(codonSeq,2,c2s)
-
- mutationInfo = apply(rbind(GLcodons , Seqcodons),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
- names(mutationInfo) = mutationPos
-
- mutationInfoWhole = apply(rbind(GLcodons , SeqcodonsWhole),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
- names(mutationInfoWhole) = mutationPos
-
- mutationInfo <- mutationInfo[!is.na(mutationInfo)]
- mutationInfoWhole <- mutationInfoWhole[!is.na(mutationInfoWhole)]
-
- if(any(!is.na(mutationInfo))){
-
- #Filter based on Stop (at the codon level)
- if(seqWithStops==1){
- nucleotidesAtStopCodons = names(mutationInfoWhole[mutationInfoWhole!="Stop"])
- mutationInfo = mutationInfo[nucleotidesAtStopCodons]
- mutationInfoWhole = mutationInfo[nucleotidesAtStopCodons]
- }else{
- countStops = sum(mutationInfoWhole=="Stop")
- if(seqWithStops==2 & countStops==0) mutationInfo = NA
- if(seqWithStops==3 & countStops>0) mutationInfo = NA
- }
-
- if(any(!is.na(mutationInfo))){
- #Filter mutations based on multipleMutation
- if(multipleMutation==1 & !is.na(mutationInfo)){
- mutationCodons = getCodonNumb(as.numeric(names(mutationInfoWhole)))
- tableMutationCodons <- table(mutationCodons)
- codonsWithMultipleMutations <- as.numeric(names(tableMutationCodons[tableMutationCodons>1]))
- if(any(codonsWithMultipleMutations)){
- #remove the nucleotide mutations in the codons with multiple mutations
- mutationInfo <- mutationInfo[!(mutationCodons %in% codonsWithMultipleMutations)]
- #replace those codons with Ns in the input sequence
- paramSeq[unlist(lapply(codonsWithMultipleMutations, getCodonNucs))] = "N"
- #matInput[inputMatrixIndex,1] <<- c2s(paramSeq)
- inputSeq <- c2s(paramSeq)
- }
- }
-
- #Filter mutations based on the model
- if(any(mutationInfo)==T | is.na(any(mutationInfo))){
-
- if(model==1 & !is.na(mutationInfo)){
- mutationInfo <- mutationInfo[mutationInfo=="S"]
- }
- if(any(mutationInfo)==T | is.na(any(mutationInfo))) return(list(mutationInfo,inputSeq))
- else return(list(NA,inputSeq))
- }else{
- return(list(NA,inputSeq))
- }
- }else{
- return(list(NA,inputSeq))
- }
-
-
- }else{
- return(list(NA,inputSeq))
- }
-
-
- }else{
- return (list(NA,inputSeq))
- }
- }
-
- # triMutability Background Count
- buildMutabilityModel <- function( inputMatrixIndex, model=0 , multipleMutation=0, seqWithStops=0, stopMutations=0){
-
- #rowOrigMatInput = matInput[inputMatrixIndex,]
- seqGL = gsub("-", "", matInput[inputMatrixIndex,2])
- seqInput = gsub("-", "", matInput[inputMatrixIndex,1])
- #matInput[inputMatrixIndex,] <<- cbind(seqInput,seqGL)
- tempInput <- cbind(seqInput,seqGL)
- seqLength = nchar(seqGL)
- list_analyzeMutationsFixed<- analyzeMutationsFixed(tempInput, model, multipleMutation, seqWithStops)
- mutationCount <- list_analyzeMutationsFixed[[1]]
- seqInput <- list_analyzeMutationsFixed[[2]]
- BackgroundMatrix = mutabilityMatrix
- MutationMatrix = mutabilityMatrix
- MutationCountMatrix = mutabilityMatrix
- if(!is.na(mutationCount)){
- if((stopMutations==0 & model==0) | (stopMutations==1 & (sum(mutationCount=="Stop")0)) ){
-
- fivermerStartPos = 1:(seqLength-4)
- fivemerLength <- length(fivermerStartPos)
- fivemerGL <- substr(rep(seqGL,length(fivermerStartPos)),(fivermerStartPos),(fivermerStartPos+4))
- fivemerSeq <- substr(rep(seqInput,length(fivermerStartPos)),(fivermerStartPos),(fivermerStartPos+4))
-
- #Background
- for(fivemerIndex in 1:fivemerLength){
- fivemer = fivemerGL[fivemerIndex]
- if(!any(grep("N",fivemer))){
- fivemerCodonPos = fivemerCodon(fivemerIndex)
- fivemerReadingFrameCodon = substr(fivemer,fivemerCodonPos[1],fivemerCodonPos[3])
- fivemerReadingFrameCodonInputSeq = substr(fivemerSeq[fivemerIndex],fivemerCodonPos[1],fivemerCodonPos[3])
-
- # All mutations model
- #if(!any(grep("N",fivemerReadingFrameCodon))){
- if(model==0){
- if(stopMutations==0){
- if(!any(grep("N",fivemerReadingFrameCodonInputSeq)))
- BackgroundMatrix[fivemer] <- (BackgroundMatrix[fivemer] + 1)
- }else{
- if( !any(grep("N",fivemerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(fivemerReadingFrameCodon)!="*" ){
- positionWithinCodon = which(fivemerCodonPos==3)#positionsWithinCodon[(fivemerCodonPos[1]%%3)+1]
- BackgroundMatrix[fivemer] <- (BackgroundMatrix[fivemer] + probNonStopMutations[fivemerReadingFrameCodon,positionWithinCodon])
- }
- }
- }else{ # Only silent mutations
- if( !any(grep("N",fivemerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(fivemerReadingFrameCodon)!="*" & translateCodonToAminoAcid(fivemerReadingFrameCodonInputSeq)==translateCodonToAminoAcid(fivemerReadingFrameCodon)){
- positionWithinCodon = which(fivemerCodonPos==3)
- BackgroundMatrix[fivemer] <- (BackgroundMatrix[fivemer] + probSMutations[fivemerReadingFrameCodon,positionWithinCodon])
- }
- }
- #}
- }
- }
-
- #Mutations
- if(stopMutations==1) mutationCount = mutationCount[mutationCount!="Stop"]
- if(model==1) mutationCount = mutationCount[mutationCount=="S"]
- mutationPositions = as.numeric(names(mutationCount))
- mutationCount = mutationCount[mutationPositions>2 & mutationPositions<(seqLength-1)]
- mutationPositions = mutationPositions[mutationPositions>2 & mutationPositions<(seqLength-1)]
- countMutations = 0
- for(mutationPosition in mutationPositions){
- fivemerIndex = mutationPosition-2
- fivemer = fivemerSeq[fivemerIndex]
- GLfivemer = fivemerGL[fivemerIndex]
- fivemerCodonPos = fivemerCodon(fivemerIndex)
- fivemerReadingFrameCodon = substr(fivemer,fivemerCodonPos[1],fivemerCodonPos[3])
- fivemerReadingFrameCodonGL = substr(GLfivemer,fivemerCodonPos[1],fivemerCodonPos[3])
- if(!any(grep("N",fivemer)) & !any(grep("N",GLfivemer))){
- if(model==0){
- countMutations = countMutations + 1
- MutationMatrix[GLfivemer] <- (MutationMatrix[GLfivemer] + 1)
- MutationCountMatrix[GLfivemer] <- (MutationCountMatrix[GLfivemer] + 1)
- }else{
- if( translateCodonToAminoAcid(fivemerReadingFrameCodonGL)!="*" ){
- countMutations = countMutations + 1
- positionWithinCodon = which(fivemerCodonPos==3)
- glNuc = substr(fivemerReadingFrameCodonGL,positionWithinCodon,positionWithinCodon)
- inputNuc = substr(fivemerReadingFrameCodon,positionWithinCodon,positionWithinCodon)
- MutationMatrix[GLfivemer] <- (MutationMatrix[GLfivemer] + substitution[glNuc,inputNuc])
- MutationCountMatrix[GLfivemer] <- (MutationCountMatrix[GLfivemer] + 1)
- }
- }
- }
- }
-
- seqMutability = MutationMatrix/BackgroundMatrix
- seqMutability = seqMutability/sum(seqMutability,na.rm=TRUE)
- #cat(inputMatrixIndex,"\t",countMutations,"\n")
- return(list("seqMutability" = seqMutability,"numbMutations" = countMutations,"seqMutabilityCount" = MutationCountMatrix, "BackgroundMatrix"=BackgroundMatrix))
-
- }
- }
-
- }
-
- #Returns the codon position containing the middle nucleotide
- fivemerCodon <- function(fivemerIndex){
- codonPos = list(2:4,1:3,3:5)
- fivemerType = fivemerIndex%%3
- return(codonPos[[fivemerType+1]])
- }
-
- #returns probability values for one mutation in codons resulting in R, S or Stop
- probMutations <- function(typeOfMutation){
- matMutationProb <- matrix(0,ncol=3,nrow=125,dimnames=list(words(alphabet = c(NUCLEOTIDES,"N"), length=3),c(1:3)))
- for(codon in rownames(matMutationProb)){
- if( !any(grep("N",codon)) ){
- for(muPos in 1:3){
- matCodon = matrix(rep(s2c(codon),3),nrow=3,ncol=3,byrow=T)
- glNuc = matCodon[1,muPos]
- matCodon[,muPos] = canMutateTo(glNuc)
- substitutionRate = substitution[glNuc,matCodon[,muPos]]
- typeOfMutations = apply(rbind(rep(codon,3),apply(matCodon,1,c2s)),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
- matMutationProb[codon,muPos] <- sum(substitutionRate[typeOfMutations==typeOfMutation])
- }
- }
- }
-
- return(matMutationProb)
- }
-
-
-
-
-#Mapping Trinucleotides to fivemers
-mapTriToFivemer <- function(triMutability=triMutability_Literature_Human){
- rownames(triMutability) <- triMutability_Names
- Fivemer<-rep(NA,1024)
- names(Fivemer)<-words(alphabet=NUCLEOTIDES,length=5)
- Fivemer<-sapply(names(Fivemer),function(Word)return(sum( c(triMutability[substring(Word,3,5),1],triMutability[substring(Word,2,4),2],triMutability[substring(Word,1,3),3]),na.rm=TRUE)))
- Fivemer<-Fivemer/sum(Fivemer)
- return(Fivemer)
-}
-
-collapseFivemerToTri<-function(Fivemer,Weights=MutabilityWeights,position=1,NUC="A"){
- Indices<-substring(names(Fivemer),3,3)==NUC
- Factors<-substring(names(Fivemer[Indices]),(4-position),(6-position))
- tapply(which(Indices),Factors,function(i)weighted.mean(Fivemer[i],Weights[i],na.rm=TRUE))
-}
-
-
-
-CountFivemerToTri<-function(Fivemer,Weights=MutabilityWeights,position=1,NUC="A"){
- Indices<-substring(names(Fivemer),3,3)==NUC
- Factors<-substring(names(Fivemer[Indices]),(4-position),(6-position))
- tapply(which(Indices),Factors,function(i)sum(Weights[i],na.rm=TRUE))
-}
-
-#Uses the real counts of the mutated fivemers
-CountFivemerToTri2<-function(Fivemer,Counts=MutabilityCounts,position=1,NUC="A"){
- Indices<-substring(names(Fivemer),3,3)==NUC
- Factors<-substring(names(Fivemer[Indices]),(4-position),(6-position))
- tapply(which(Indices),Factors,function(i)sum(Counts[i],na.rm=TRUE))
-}
-
-bootstrap<-function(x=c(33,12,21),M=10000,alpha=0.05){
-N<-sum(x)
-if(N){
-p<-x/N
-k<-length(x)-1
-tmp<-rmultinom(M, size = N, prob=p)
-tmp_p<-apply(tmp,2,function(y)y/N)
-(apply(tmp_p,1,function(y)quantile(y,c(alpha/2/k,1-alpha/2/k))))
-}
-else return(matrix(0,2,length(x)))
-}
-
-
-
-
-bootstrap2<-function(x=c(33,12,21),n=10,M=10000,alpha=0.05){
-
-N<-sum(x)
-k<-length(x)
-y<-rep(1:k,x)
-tmp<-sapply(1:M,function(i)sample(y,n))
-if(n>1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[,j]==i)))/n
-if(n==1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[j]==i)))/n
-(apply(tmp_p,1,function(z)quantile(z,c(alpha/2/(k-1),1-alpha/2/(k-1)))))
-}
-
-
-
-p_value<-function(x=c(33,12,21),M=100000,x_obs=c(2,5,3)){
-n=sum(x_obs)
-N<-sum(x)
-k<-length(x)
-y<-rep(1:k,x)
-tmp<-sapply(1:M,function(i)sample(y,n))
-if(n>1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[,j]==i)))
-if(n==1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[j]==i)))
-tmp<-rbind(sapply(1:3,function(i)sum(tmp_p[i,]>=x_obs[i])/M),
-sapply(1:3,function(i)sum(tmp_p[i,]<=x_obs[i])/M))
-sapply(1:3,function(i){if(tmp[1,i]>=tmp[2,i])return(-tmp[2,i])else return(tmp[1,i])})
-}
-
-#"D:\\Sequences\\IMGT Germlines\\Human_SNPless_IGHJ.FASTA"
-# Remove SNPs from IMGT germline segment alleles
-generateUnambiguousRepertoire <- function(repertoireInFile,repertoireOutFile){
- repertoireIn <- read.fasta(repertoireInFile, seqtype="DNA",as.string=T,set.attributes=F,forceDNAtolower=F)
- alleleNames <- sapply(names(repertoireIn),function(x)strsplit(x,"|",fixed=TRUE)[[1]][2])
- SNPs <- tapply(repertoireIn,sapply(alleleNames,function(x)strsplit(x,"*",fixed=TRUE)[[1]][1]),function(x){
- Indices<-NULL
- for(i in 1:length(x)){
- firstSeq = s2c(x[[1]])
- iSeq = s2c(x[[i]])
- Indices<-c(Indices,which(firstSeq[1:320]!=iSeq[1:320] & firstSeq[1:320]!="." & iSeq[1:320]!="." ))
- }
- return(sort(unique(Indices)))
- })
- repertoireOut <- repertoireIn
- repertoireOut <- lapply(names(repertoireOut), function(repertoireName){
- alleleName <- strsplit(repertoireName,"|",fixed=TRUE)[[1]][2]
- geneSegmentName <- strsplit(alleleName,"*",fixed=TRUE)[[1]][1]
- alleleSeq <- s2c(repertoireOut[[repertoireName]])
- alleleSeq[as.numeric(unlist(SNPs[geneSegmentName]))] <- "N"
- alleleSeq <- c2s(alleleSeq)
- repertoireOut[[repertoireName]] <- alleleSeq
- })
- names(repertoireOut) <- names(repertoireIn)
- write.fasta(repertoireOut,names(repertoireOut),file.out=repertoireOutFile)
-
-}
-
-
-
-
-
-
-############
-groupBayes2 = function(indexes, param_resultMat){
-
- BayesGDist_Focused_CDR = calculate_bayesG( x=param_resultMat[indexes,1], N=apply(param_resultMat[indexes,c(1,2,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[1]/(x[1]+x[2]+x[4])}))
- BayesGDist_Focused_FWR = calculate_bayesG( x=param_resultMat[indexes,3], N=apply(param_resultMat[indexes,c(3,2,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[3]/(x[3]+x[2]+x[4])}))
- #BayesGDist_Local_CDR = calculate_bayesG( x=param_resultMat[indexes,1], N=apply(param_resultMat[indexes,c(1,2)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[1]/(x[1]+x[2])}))
- #BayesGDist_Local_FWR = calculate_bayesG( x=param_resultMat[indexes,3], N=apply(param_resultMat[indexes,c(3,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[3]/(x[3]+x[4])}))
- #BayesGDist_Global_CDR = calculate_bayesG( x=param_resultMat[indexes,1], N=apply(param_resultMat[indexes,c(1,2,3,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[1]/(x[1]+x[2]+x[3]+x[4])}))
- #BayesGDist_Global_FWR = calculate_bayesG( x=param_resultMat[indexes,3], N=apply(param_resultMat[indexes,c(1,2,3,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[3]/(x[1]+x[2]+x[3]+x[4])}))
- return ( list("BayesGDist_Focused_CDR"=BayesGDist_Focused_CDR,
- "BayesGDist_Focused_FWR"=BayesGDist_Focused_FWR) )
- #"BayesGDist_Local_CDR"=BayesGDist_Local_CDR,
- #"BayesGDist_Local_FWR" = BayesGDist_Local_FWR))
-# "BayesGDist_Global_CDR" = BayesGDist_Global_CDR,
-# "BayesGDist_Global_FWR" = BayesGDist_Global_FWR) )
-
-
-}
-
-
-calculate_bayesG <- function( x=array(), N=array(), p=array(), max_sigma=20, length_sigma=4001){
- G <- max(length(x),length(N),length(p))
- x=array(x,dim=G)
- N=array(N,dim=G)
- p=array(p,dim=G)
-
- indexOfZero = N>0 & p>0
- N = N[indexOfZero]
- x = x[indexOfZero]
- p = p[indexOfZero]
- G <- length(x)
-
- if(G){
-
- cons<-array( dim=c(length_sigma,G) )
- if(G==1) {
- return(calculate_bayes(x=x[G],N=N[G],p=p[G],max_sigma=max_sigma,length_sigma=length_sigma))
- }
- else {
- for(g in 1:G) cons[,g] <- calculate_bayes(x=x[g],N=N[g],p=p[g],max_sigma=max_sigma,length_sigma=length_sigma)
- listMatG <- convolutionPowersOfTwoByTwos(cons,length_sigma=length_sigma)
- y<-calculate_bayesGHelper(listMatG,length_sigma=length_sigma)
- return( y/sum(y)/(2*max_sigma/(length_sigma-1)) )
- }
- }else{
- return(NA)
- }
-}
-
-
-calculate_bayesGHelper <- function( listMatG,length_sigma=4001 ){
- matG <- listMatG[[1]]
- groups <- listMatG[[2]]
- i = 1
- resConv <- matG[,i]
- denom <- 2^groups[i]
- if(length(groups)>1){
- while( i0)) ){
-
-# ONEmerStartPos = 1:(seqLength)
-# ONEmerLength <- length(ONEmerStartPos)
- ONEmerGL <- s2c(seqGL)
- ONEmerSeq <- s2c(seqInput)
-
- #Background
- for(ONEmerIndex in 1:seqLength){
- ONEmer = ONEmerGL[ONEmerIndex]
- if(ONEmer!="N"){
- ONEmerCodonPos = getCodonPos(ONEmerIndex)
- ONEmerReadingFrameCodon = c2s(ONEmerGL[ONEmerCodonPos])
- ONEmerReadingFrameCodonInputSeq = c2s(ONEmerSeq[ONEmerCodonPos] )
-
- # All mutations model
- #if(!any(grep("N",ONEmerReadingFrameCodon))){
- if(model==0){
- if(stopMutations==0){
- if(!any(grep("N",ONEmerReadingFrameCodonInputSeq)))
- BackgroundMatrix[ONEmer] <- (BackgroundMatrix[ONEmer] + 1)
- }else{
- if( !any(grep("N",ONEmerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(ONEmerReadingFrameCodonInputSeq)!="*"){
- positionWithinCodon = which(ONEmerCodonPos==ONEmerIndex)#positionsWithinCodon[(ONEmerCodonPos[1]%%3)+1]
- BackgroundMatrix[ONEmer] <- (BackgroundMatrix[ONEmer] + probNonStopMutations[ONEmerReadingFrameCodon,positionWithinCodon])
- }
- }
- }else{ # Only silent mutations
- if( !any(grep("N",ONEmerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(ONEmerReadingFrameCodonInputSeq)!="*" & translateCodonToAminoAcid(ONEmerReadingFrameCodonInputSeq)==translateCodonToAminoAcid(ONEmerReadingFrameCodon) ){
- positionWithinCodon = which(ONEmerCodonPos==ONEmerIndex)
- BackgroundMatrix[ONEmer] <- (BackgroundMatrix[ONEmer] + probSMutations[ONEmerReadingFrameCodon,positionWithinCodon])
- }
- }
- }
- }
- }
-
- #Mutations
- if(stopMutations==1) mutationCount = mutationCount[mutationCount!="Stop"]
- if(model==1) mutationCount = mutationCount[mutationCount=="S"]
- mutationPositions = as.numeric(names(mutationCount))
- mutationCount = mutationCount[mutationPositions>2 & mutationPositions<(seqLength-1)]
- mutationPositions = mutationPositions[mutationPositions>2 & mutationPositions<(seqLength-1)]
- countMutations = 0
- for(mutationPosition in mutationPositions){
- ONEmerIndex = mutationPosition
- ONEmer = ONEmerSeq[ONEmerIndex]
- GLONEmer = ONEmerGL[ONEmerIndex]
- ONEmerCodonPos = getCodonPos(ONEmerIndex)
- ONEmerReadingFrameCodon = c2s(ONEmerSeq[ONEmerCodonPos])
- ONEmerReadingFrameCodonGL =c2s(ONEmerGL[ONEmerCodonPos])
- if(!any(grep("N",ONEmer)) & !any(grep("N",GLONEmer))){
- if(model==0){
- countMutations = countMutations + 1
- MutationMatrix[GLONEmer] <- (MutationMatrix[GLONEmer] + 1)
- MutationCountMatrix[GLONEmer] <- (MutationCountMatrix[GLONEmer] + 1)
- }else{
- if( translateCodonToAminoAcid(ONEmerReadingFrameCodonGL)!="*" ){
- countMutations = countMutations + 1
- positionWithinCodon = which(ONEmerCodonPos==ONEmerIndex)
- glNuc = substr(ONEmerReadingFrameCodonGL,positionWithinCodon,positionWithinCodon)
- inputNuc = substr(ONEmerReadingFrameCodon,positionWithinCodon,positionWithinCodon)
- MutationMatrix[GLONEmer] <- (MutationMatrix[GLONEmer] + substitution[glNuc,inputNuc])
- MutationCountMatrix[GLONEmer] <- (MutationCountMatrix[GLONEmer] + 1)
- }
- }
- }
- }
-
- seqMutability = MutationMatrix/BackgroundMatrix
- seqMutability = seqMutability/sum(seqMutability,na.rm=TRUE)
- #cat(inputMatrixIndex,"\t",countMutations,"\n")
- return(list("seqMutability" = seqMutability,"numbMutations" = countMutations,"seqMutabilityCount" = MutationCountMatrix, "BackgroundMatrix"=BackgroundMatrix))
-# tmp<-list("seqMutability" = seqMutability,"numbMutations" = countMutations,"seqMutabilityCount" = MutationCountMatrix)
- }
- }
-
-################
-# $Id: trim.R 989 2006-10-29 15:28:26Z ggorjan $
-
-trim <- function(s, recode.factor=TRUE, ...)
- UseMethod("trim", s)
-
-trim.default <- function(s, recode.factor=TRUE, ...)
- s
-
-trim.character <- function(s, recode.factor=TRUE, ...)
-{
- s <- sub(pattern="^ +", replacement="", x=s)
- s <- sub(pattern=" +$", replacement="", x=s)
- s
-}
-
-trim.factor <- function(s, recode.factor=TRUE, ...)
-{
- levels(s) <- trim(levels(s))
- if(recode.factor) {
- dots <- list(x=s, ...)
- if(is.null(dots$sort)) dots$sort <- sort
- s <- do.call(what=reorder.factor, args=dots)
- }
- s
-}
-
-trim.list <- function(s, recode.factor=TRUE, ...)
- lapply(s, trim, recode.factor=recode.factor, ...)
-
-trim.data.frame <- function(s, recode.factor=TRUE, ...)
-{
- s[] <- trim.list(s, recode.factor=recode.factor, ...)
- s
-}
-#######################################
-# Compute the expected for each sequence-germline pair by codon
-getExpectedIndividualByCodon <- function(matInput){
-if( any(grep("multicore",search())) ){
- facGL <- factor(matInput[,2])
- facLevels = levels(facGL)
- LisGLs_MutabilityU = mclapply(1:length(facLevels), function(x){
- computeMutabilities(facLevels[x])
- })
- facIndex = match(facGL,facLevels)
-
- LisGLs_Mutability = mclapply(1:nrow(matInput), function(x){
- cInput = rep(NA,nchar(matInput[x,1]))
- cInput[s2c(matInput[x,1])!="N"] = 1
- LisGLs_MutabilityU[[facIndex[x]]] * cInput
- })
-
- LisGLs_Targeting = mclapply(1:dim(matInput)[1], function(x){
- computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
- })
-
- LisGLs_MutationTypes = mclapply(1:length(matInput[,2]),function(x){
- #print(x)
- computeMutationTypes(matInput[x,2])
- })
-
- LisGLs_R_Exp = mclapply(1:nrow(matInput), function(x){
- Exp_R <- rollapply(as.zoo(1:readEnd),width=3,by=3,
- function(codonNucs){
- RPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="R")
- sum( LisGLs_Targeting[[x]][,codonNucs][RPos], na.rm=T )
- }
- )
- })
-
- LisGLs_S_Exp = mclapply(1:nrow(matInput), function(x){
- Exp_S <- rollapply(as.zoo(1:readEnd),width=3,by=3,
- function(codonNucs){
- SPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="S")
- sum( LisGLs_Targeting[[x]][,codonNucs][SPos], na.rm=T )
- }
- )
- })
-
- Exp_R = matrix(unlist(LisGLs_R_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
- Exp_S = matrix(unlist(LisGLs_S_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
- return( list( "Expected_R"=Exp_R, "Expected_S"=Exp_S) )
- }else{
- facGL <- factor(matInput[,2])
- facLevels = levels(facGL)
- LisGLs_MutabilityU = lapply(1:length(facLevels), function(x){
- computeMutabilities(facLevels[x])
- })
- facIndex = match(facGL,facLevels)
-
- LisGLs_Mutability = lapply(1:nrow(matInput), function(x){
- cInput = rep(NA,nchar(matInput[x,1]))
- cInput[s2c(matInput[x,1])!="N"] = 1
- LisGLs_MutabilityU[[facIndex[x]]] * cInput
- })
-
- LisGLs_Targeting = lapply(1:dim(matInput)[1], function(x){
- computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
- })
-
- LisGLs_MutationTypes = lapply(1:length(matInput[,2]),function(x){
- #print(x)
- computeMutationTypes(matInput[x,2])
- })
-
- LisGLs_R_Exp = lapply(1:nrow(matInput), function(x){
- Exp_R <- rollapply(as.zoo(1:readEnd),width=3,by=3,
- function(codonNucs){
- RPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="R")
- sum( LisGLs_Targeting[[x]][,codonNucs][RPos], na.rm=T )
- }
- )
- })
-
- LisGLs_S_Exp = lapply(1:nrow(matInput), function(x){
- Exp_S <- rollapply(as.zoo(1:readEnd),width=3,by=3,
- function(codonNucs){
- SPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="S")
- sum( LisGLs_Targeting[[x]][,codonNucs][SPos], na.rm=T )
- }
- )
- })
-
- Exp_R = matrix(unlist(LisGLs_R_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
- Exp_S = matrix(unlist(LisGLs_S_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
- return( list( "Expected_R"=Exp_R, "Expected_S"=Exp_S) )
- }
-}
-
-# getObservedMutationsByCodon <- function(listMutations){
-# numbSeqs <- length(listMutations)
-# obsMu_R <- matrix(0,nrow=numbSeqs,ncol=readEnd/3,dimnames=list(c(1:numbSeqs),c(1:(readEnd/3))))
-# obsMu_S <- obsMu_R
-# temp <- mclapply(1:length(listMutations), function(i){
-# arrMutations = listMutations[[i]]
-# RPos = as.numeric(names(arrMutations)[arrMutations=="R"])
-# RPos <- sapply(RPos,getCodonNumb)
-# if(any(RPos)){
-# tabR <- table(RPos)
-# obsMu_R[i,as.numeric(names(tabR))] <<- tabR
-# }
-#
-# SPos = as.numeric(names(arrMutations)[arrMutations=="S"])
-# SPos <- sapply(SPos,getCodonNumb)
-# if(any(SPos)){
-# tabS <- table(SPos)
-# obsMu_S[i,names(tabS)] <<- tabS
-# }
-# }
-# )
-# return( list( "Observed_R"=obsMu_R, "Observed_S"=obsMu_S) )
-# }
-
-getObservedMutationsByCodon <- function(listMutations){
- numbSeqs <- length(listMutations)
- obsMu_R <- matrix(0,nrow=numbSeqs,ncol=readEnd/3,dimnames=list(c(1:numbSeqs),c(1:(readEnd/3))))
- obsMu_S <- obsMu_R
- temp <- lapply(1:length(listMutations), function(i){
- arrMutations = listMutations[[i]]
- RPos = as.numeric(names(arrMutations)[arrMutations=="R"])
- RPos <- sapply(RPos,getCodonNumb)
- if(any(RPos)){
- tabR <- table(RPos)
- obsMu_R[i,as.numeric(names(tabR))] <<- tabR
- }
-
- SPos = as.numeric(names(arrMutations)[arrMutations=="S"])
- SPos <- sapply(SPos,getCodonNumb)
- if(any(SPos)){
- tabS <- table(SPos)
- obsMu_S[i,names(tabS)] <<- tabS
- }
- }
- )
- return( list( "Observed_R"=obsMu_R, "Observed_S"=obsMu_S) )
-}
-
+#########################################################################################
+# License Agreement
+#
+# THIS WORK IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE
+# ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER
+# APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE
+# OR COPYRIGHT LAW IS PROHIBITED.
+#
+# BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
+# BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED
+# TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN
+# CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+#
+# BASELIne: Bayesian Estimation of Antigen-Driven Selection in Immunoglobulin Sequences
+# Coded by: Mohamed Uduman & Gur Yaari
+# Copyright 2012 Kleinstein Lab
+# Version: 1.3 (01/23/2014)
+#########################################################################################
+
+# Global variables
+
+ FILTER_BY_MUTATIONS = 1000
+
+ # Nucleotides
+ NUCLEOTIDES = c("A","C","G","T")
+
+ # Amino Acids
+ AMINO_ACIDS <- c("F", "F", "L", "L", "S", "S", "S", "S", "Y", "Y", "*", "*", "C", "C", "*", "W", "L", "L", "L", "L", "P", "P", "P", "P", "H", "H", "Q", "Q", "R", "R", "R", "R", "I", "I", "I", "M", "T", "T", "T", "T", "N", "N", "K", "K", "S", "S", "R", "R", "V", "V", "V", "V", "A", "A", "A", "A", "D", "D", "E", "E", "G", "G", "G", "G")
+ names(AMINO_ACIDS) <- c("TTT", "TTC", "TTA", "TTG", "TCT", "TCC", "TCA", "TCG", "TAT", "TAC", "TAA", "TAG", "TGT", "TGC", "TGA", "TGG", "CTT", "CTC", "CTA", "CTG", "CCT", "CCC", "CCA", "CCG", "CAT", "CAC", "CAA", "CAG", "CGT", "CGC", "CGA", "CGG", "ATT", "ATC", "ATA", "ATG", "ACT", "ACC", "ACA", "ACG", "AAT", "AAC", "AAA", "AAG", "AGT", "AGC", "AGA", "AGG", "GTT", "GTC", "GTA", "GTG", "GCT", "GCC", "GCA", "GCG", "GAT", "GAC", "GAA", "GAG", "GGT", "GGC", "GGA", "GGG")
+ names(AMINO_ACIDS) <- names(AMINO_ACIDS)
+
+ #Amino Acid Traits
+ #"*" "A" "C" "D" "E" "F" "G" "H" "I" "K" "L" "M" "N" "P" "Q" "R" "S" "T" "V" "W" "Y"
+ #B = "Hydrophobic/Burried" N = "Intermediate/Neutral" S="Hydrophilic/Surface")
+ TRAITS_AMINO_ACIDS_CHOTHIA98 <- c("*","N","B","S","S","B","N","N","B","S","B","B","S","N","S","S","N","N","B","B","N")
+ names(TRAITS_AMINO_ACIDS_CHOTHIA98) <- sort(unique(AMINO_ACIDS))
+ TRAITS_AMINO_ACIDS <- array(NA,21)
+
+ # Codon Table
+ CODON_TABLE <- as.data.frame(matrix(NA,ncol=64,nrow=12))
+
+ # Substitution Model: Smith DS et al. 1996
+ substitution_Literature_Mouse <- matrix(c(0, 0.156222928, 0.601501588, 0.242275484, 0.172506739, 0, 0.241239892, 0.586253369, 0.54636291, 0.255795364, 0, 0.197841727, 0.290240811, 0.467680608, 0.24207858, 0),nrow=4,byrow=T,dimnames=list(NUCLEOTIDES,NUCLEOTIDES))
+ substitution_Flu_Human <- matrix(c(0,0.2795596,0.5026927,0.2177477,0.1693210,0,0.3264723,0.5042067,0.4983549,0.3328321,0,0.1688130,0.2021079,0.4696077,0.3282844,0),4,4,byrow=T,dimnames=list(NUCLEOTIDES,NUCLEOTIDES))
+ substitution_Flu25_Human <- matrix(c(0,0.2580641,0.5163685,0.2255674,0.1541125,0,0.3210224,0.5248651,0.5239281,0.3101292,0,0.1659427,0.1997207,0.4579444,0.3423350,0),4,4,byrow=T,dimnames=list(NUCLEOTIDES,NUCLEOTIDES))
+ load("FiveS_Substitution.RData")
+
+ # Mutability Models: Shapiro GS et al. 2002
+ triMutability_Literature_Human <- matrix(c(0.24, 1.2, 0.96, 0.43, 2.14, 2, 1.11, 1.9, 0.85, 1.83, 2.36, 1.31, 0.82, 0.52, 0.89, 1.33, 1.4, 0.82, 1.83, 0.73, 1.83, 1.62, 1.53, 0.57, 0.92, 0.42, 0.42, 1.47, 3.44, 2.58, 1.18, 0.47, 0.39, 1.12, 1.8, 0.68, 0.47, 2.19, 2.35, 2.19, 1.05, 1.84, 1.26, 0.28, 0.98, 2.37, 0.66, 1.58, 0.67, 0.92, 1.76, 0.83, 0.97, 0.56, 0.75, 0.62, 2.26, 0.62, 0.74, 1.11, 1.16, 0.61, 0.88, 0.67, 0.37, 0.07, 1.08, 0.46, 0.31, 0.94, 0.62, 0.57, 0.29, NA, 1.44, 0.46, 0.69, 0.57, 0.24, 0.37, 1.1, 0.99, 1.39, 0.6, 2.26, 1.24, 1.36, 0.52, 0.33, 0.26, 1.25, 0.37, 0.58, 1.03, 1.2, 0.34, 0.49, 0.33, 2.62, 0.16, 0.4, 0.16, 0.35, 0.75, 1.85, 0.94, 1.61, 0.85, 2.09, 1.39, 0.3, 0.52, 1.33, 0.29, 0.51, 0.26, 0.51, 3.83, 2.01, 0.71, 0.58, 0.62, 1.07, 0.28, 1.2, 0.74, 0.25, 0.59, 1.09, 0.91, 1.36, 0.45, 2.89, 1.27, 3.7, 0.69, 0.28, 0.41, 1.17, 0.56, 0.93, 3.41, 1, 1, NA, 5.9, 0.74, 2.51, 2.24, 2.24, 1.95, 3.32, 2.34, 1.3, 2.3, 1, 0.66, 0.73, 0.93, 0.41, 0.65, 0.89, 0.65, 0.32, NA, 0.43, 0.85, 0.43, 0.31, 0.31, 0.23, 0.29, 0.57, 0.71, 0.48, 0.44, 0.76, 0.51, 1.7, 0.85, 0.74, 2.23, 2.08, 1.16, 0.51, 0.51, 1, 0.5, NA, NA, 0.71, 2.14), nrow=64,byrow=T)
+ triMutability_Literature_Mouse <- matrix(c(1.31, 1.35, 1.42, 1.18, 2.02, 2.02, 1.02, 1.61, 1.99, 1.42, 2.01, 1.03, 2.02, 0.97, 0.53, 0.71, 1.19, 0.83, 0.96, 0.96, 0, 1.7, 2.22, 0.59, 1.24, 1.07, 0.51, 1.68, 3.36, 3.36, 1.14, 0.29, 0.33, 0.9, 1.11, 0.63, 1.08, 2.07, 2.27, 1.74, 0.22, 1.19, 2.37, 1.15, 1.15, 1.56, 0.81, 0.34, 0.87, 0.79, 2.13, 0.49, 0.85, 0.97, 0.36, 0.82, 0.66, 0.63, 1.15, 0.94, 0.85, 0.25, 0.93, 1.19, 0.4, 0.2, 0.44, 0.44, 0.88, 1.06, 0.77, 0.39, 0, 0, 0, 0, 0, 0, 0.43, 0.43, 0.86, 0.59, 0.59, 0, 1.18, 0.86, 2.9, 1.66, 0.4, 0.2, 1.54, 0.43, 0.69, 1.71, 0.68, 0.55, 0.91, 0.7, 1.71, 0.09, 0.27, 0.63, 0.2, 0.45, 1.01, 1.63, 0.96, 1.48, 2.18, 1.2, 1.31, 0.66, 2.13, 0.49, 0, 0, 0, 2.97, 2.8, 0.79, 0.4, 0.5, 0.4, 0.11, 1.68, 0.42, 0.13, 0.44, 0.93, 0.71, 1.11, 1.19, 2.71, 1.08, 3.43, 0.4, 0.67, 0.47, 1.02, 0.14, 1.56, 1.98, 0.53, 0.33, 0.63, 2.06, 1.77, 1.46, 3.74, 2.93, 2.1, 2.18, 0.78, 0.73, 2.93, 0.63, 0.57, 0.17, 0.85, 0.52, 0.31, 0.31, 0, 0, 0.51, 0.29, 0.83, 0.54, 0.28, 0.47, 0.9, 0.99, 1.24, 2.47, 0.73, 0.23, 1.13, 0.24, 2.12, 0.24, 0.33, 0.83, 1.41, 0.62, 0.28, 0.35, 0.77, 0.17, 0.72, 0.58, 0.45, 0.41), nrow=64,byrow=T)
+ triMutability_Names <- c("AAA", "AAC", "AAG", "AAT", "ACA", "ACC", "ACG", "ACT", "AGA", "AGC", "AGG", "AGT", "ATA", "ATC", "ATG", "ATT", "CAA", "CAC", "CAG", "CAT", "CCA", "CCC", "CCG", "CCT", "CGA", "CGC", "CGG", "CGT", "CTA", "CTC", "CTG", "CTT", "GAA", "GAC", "GAG", "GAT", "GCA", "GCC", "GCG", "GCT", "GGA", "GGC", "GGG", "GGT", "GTA", "GTC", "GTG", "GTT", "TAA", "TAC", "TAG", "TAT", "TCA", "TCC", "TCG", "TCT", "TGA", "TGC", "TGG", "TGT", "TTA", "TTC", "TTG", "TTT")
+ load("FiveS_Mutability.RData")
+
+# Functions
+
+ # Translate codon to amino acid
+ translateCodonToAminoAcid<-function(Codon){
+ return(AMINO_ACIDS[Codon])
+ }
+
+ # Translate amino acid to trait change
+ translateAminoAcidToTraitChange<-function(AminoAcid){
+ return(TRAITS_AMINO_ACIDS[AminoAcid])
+ }
+
+ # Initialize Amino Acid Trait Changes
+ initializeTraitChange <- function(traitChangeModel=1,species=1,traitChangeFileName=NULL){
+ if(!is.null(traitChangeFileName)){
+ tryCatch(
+ traitChange <- read.delim(traitChangeFileName,sep="\t",header=T)
+ , error = function(ex){
+ cat("Error|Error reading trait changes. Please check file name/path and format.\n")
+ q()
+ }
+ )
+ }else{
+ traitChange <- TRAITS_AMINO_ACIDS_CHOTHIA98
+ }
+ TRAITS_AMINO_ACIDS <<- traitChange
+ }
+
+ # Read in formatted nucleotide substitution matrix
+ initializeSubstitutionMatrix <- function(substitutionModel,species,subsMatFileName=NULL){
+ if(!is.null(subsMatFileName)){
+ tryCatch(
+ subsMat <- read.delim(subsMatFileName,sep="\t",header=T)
+ , error = function(ex){
+ cat("Error|Error reading substitution matrix. Please check file name/path and format.\n")
+ q()
+ }
+ )
+ if(sum(apply(subsMat,1,sum)==1)!=4) subsMat = t(apply(subsMat,1,function(x)x/sum(x)))
+ }else{
+ if(substitutionModel==1)subsMat <- substitution_Literature_Mouse
+ if(substitutionModel==2)subsMat <- substitution_Flu_Human
+ if(substitutionModel==3)subsMat <- substitution_Flu25_Human
+
+ }
+
+ if(substitutionModel==0){
+ subsMat <- matrix(1,4,4)
+ subsMat[,] = 1/3
+ subsMat[1,1] = 0
+ subsMat[2,2] = 0
+ subsMat[3,3] = 0
+ subsMat[4,4] = 0
+ }
+
+
+ NUCLEOTIDESN = c(NUCLEOTIDES,"N", "-")
+ if(substitutionModel==5){
+ subsMat <- FiveS_Substitution
+ return(subsMat)
+ }else{
+ subsMat <- rbind(subsMat,rep(NA,4),rep(NA,4))
+ return( matrix(data.matrix(subsMat),6,4,dimnames=list(NUCLEOTIDESN,NUCLEOTIDES) ) )
+ }
+ }
+
+
+ # Read in formatted Mutability file
+ initializeMutabilityMatrix <- function(mutabilityModel=1, species=1,mutabilityMatFileName=NULL){
+ if(!is.null(mutabilityMatFileName)){
+ tryCatch(
+ mutabilityMat <- read.delim(mutabilityMatFileName,sep="\t",header=T)
+ , error = function(ex){
+ cat("Error|Error reading mutability matrix. Please check file name/path and format.\n")
+ q()
+ }
+ )
+ }else{
+ mutabilityMat <- triMutability_Literature_Human
+ if(species==2) mutabilityMat <- triMutability_Literature_Mouse
+ }
+
+ if(mutabilityModel==0){ mutabilityMat <- matrix(1,64,3)}
+
+ if(mutabilityModel==5){
+ mutabilityMat <- FiveS_Mutability
+ return(mutabilityMat)
+ }else{
+ return( matrix( data.matrix(mutabilityMat), 64, 3, dimnames=list(triMutability_Names,1:3)) )
+ }
+ }
+
+ # Read FASTA file formats
+ # Modified from read.fasta from the seqinR package
+ baseline.read.fasta <-
+ function (file = system.file("sequences/sample.fasta", package = "seqinr"),
+ seqtype = c("DNA", "AA"), as.string = FALSE, forceDNAtolower = TRUE,
+ set.attributes = TRUE, legacy.mode = TRUE, seqonly = FALSE,
+ strip.desc = FALSE, sizeof.longlong = .Machine$sizeof.longlong,
+ endian = .Platform$endian, apply.mask = TRUE)
+ {
+ seqtype <- match.arg(seqtype)
+
+ lines <- readLines(file)
+
+ if (legacy.mode) {
+ comments <- grep("^;", lines)
+ if (length(comments) > 0)
+ lines <- lines[-comments]
+ }
+
+
+ ind_groups<-which(substr(lines, 1L, 3L) == ">>>")
+ lines_mod<-lines
+
+ if(!length(ind_groups)){
+ lines_mod<-c(">>>All sequences combined",lines)
+ }
+
+ ind_groups<-which(substr(lines_mod, 1L, 3L) == ">>>")
+
+ lines <- array("BLA",dim=(length(ind_groups)+length(lines_mod)))
+ id<-sapply(1:length(ind_groups),function(i)ind_groups[i]+i-1)+1
+ lines[id] <- "THIS IS A FAKE SEQUENCE"
+ lines[-id] <- lines_mod
+ rm(lines_mod)
+
+ ind <- which(substr(lines, 1L, 1L) == ">")
+ nseq <- length(ind)
+ if (nseq == 0) {
+ stop("no line starting with a > character found")
+ }
+ start <- ind + 1
+ end <- ind - 1
+
+ while( any(which(ind%in%end)) ){
+ ind=ind[-which(ind%in%end)]
+ nseq <- length(ind)
+ if (nseq == 0) {
+ stop("no line starting with a > character found")
+ }
+ start <- ind + 1
+ end <- ind - 1
+ }
+
+ end <- c(end[-1], length(lines))
+ sequences <- lapply(seq_len(nseq), function(i) paste(lines[start[i]:end[i]], collapse = ""))
+ if (seqonly)
+ return(sequences)
+ nomseq <- lapply(seq_len(nseq), function(i) {
+
+ #firstword <- strsplit(lines[ind[i]], " ")[[1]][1]
+ substr(lines[ind[i]], 2, nchar(lines[ind[i]]))
+
+ })
+ if (seqtype == "DNA") {
+ if (forceDNAtolower) {
+ sequences <- as.list(tolower(chartr(".","-",sequences)))
+ }else{
+ sequences <- as.list(toupper(chartr(".","-",sequences)))
+ }
+ }
+ if (as.string == FALSE)
+ sequences <- lapply(sequences, s2c)
+ if (set.attributes) {
+ for (i in seq_len(nseq)) {
+ Annot <- lines[ind[i]]
+ if (strip.desc)
+ Annot <- substr(Annot, 2L, nchar(Annot))
+ attributes(sequences[[i]]) <- list(name = nomseq[[i]],
+ Annot = Annot, class = switch(seqtype, AA = "SeqFastaAA",
+ DNA = "SeqFastadna"))
+ }
+ }
+ names(sequences) <- nomseq
+ return(sequences)
+ }
+
+
+ # Replaces non FASTA characters in input files with N
+ replaceNonFASTAChars <-function(inSeq="ACGTN-AApA"){
+ gsub('[^ACGTNacgt[:punct:]-[:punct:].]','N',inSeq,perl=TRUE)
+ }
+
+ # Find the germlines in the FASTA list
+ germlinesInFile <- function(seqIDs){
+ firstChar = sapply(seqIDs,function(x){substr(x,1,1)})
+ secondChar = sapply(seqIDs,function(x){substr(x,2,2)})
+ return(firstChar==">" & secondChar!=">")
+ }
+
+ # Find the groups in the FASTA list
+ groupsInFile <- function(seqIDs){
+ sapply(seqIDs,function(x){substr(x,1,2)})==">>"
+ }
+
+ # In the process of finding germlines/groups, expand from the start to end of the group
+ expandTillNext <- function(vecPosToID){
+ IDs = names(vecPosToID)
+ posOfInterests = which(vecPosToID)
+
+ expandedID = rep(NA,length(IDs))
+ expandedIDNames = gsub(">","",IDs[posOfInterests])
+ startIndexes = c(1,posOfInterests[-1])
+ stopIndexes = c(posOfInterests[-1]-1,length(IDs))
+ expandedID = unlist(sapply(1:length(startIndexes),function(i){
+ rep(i,stopIndexes[i]-startIndexes[i]+1)
+ }))
+ names(expandedID) = unlist(sapply(1:length(startIndexes),function(i){
+ rep(expandedIDNames[i],stopIndexes[i]-startIndexes[i]+1)
+ }))
+ return(expandedID)
+ }
+
+ # Process FASTA (list) to return a matrix[input, germline)
+ processInputAdvanced <- function(inputFASTA){
+
+ seqIDs = names(inputFASTA)
+ numbSeqs = length(seqIDs)
+ posGermlines1 = germlinesInFile(seqIDs)
+ numbGermlines = sum(posGermlines1)
+ posGroups1 = groupsInFile(seqIDs)
+ numbGroups = sum(posGroups1)
+ consDef = NA
+
+ if(numbGermlines==0){
+ posGermlines = 2
+ numbGermlines = 1
+ }
+
+ glPositionsSum = cumsum(posGermlines1)
+ glPositions = table(glPositionsSum)
+ #Find the position of the conservation row
+ consDefPos = as.numeric(names(glPositions[names(glPositions)!=0 & glPositions==1]))+1
+ if( length(consDefPos)> 0 ){
+ consDefID = match(consDefPos, glPositionsSum)
+ #The coservation rows need to be pulled out and stores seperately
+ consDef = inputFASTA[consDefID]
+ inputFASTA = inputFASTA[-consDefID]
+
+ seqIDs = names(inputFASTA)
+ numbSeqs = length(seqIDs)
+ posGermlines1 = germlinesInFile(seqIDs)
+ numbGermlines = sum(posGermlines1)
+ posGroups1 = groupsInFile(seqIDs)
+ numbGroups = sum(posGroups1)
+ if(numbGermlines==0){
+ posGermlines = 2
+ numbGermlines = 1
+ }
+ }
+
+ posGroups <- expandTillNext(posGroups1)
+ posGermlines <- expandTillNext(posGermlines1)
+ posGermlines[posGroups1] = 0
+ names(posGermlines)[posGroups1] = names(posGroups)[posGroups1]
+ posInput = rep(TRUE,numbSeqs)
+ posInput[posGroups1 | posGermlines1] = FALSE
+
+ matInput = matrix(NA, nrow=sum(posInput), ncol=2)
+ rownames(matInput) = seqIDs[posInput]
+ colnames(matInput) = c("Input","Germline")
+
+ vecInputFASTA = unlist(inputFASTA)
+ matInput[,1] = vecInputFASTA[posInput]
+ matInput[,2] = vecInputFASTA[ which( names(inputFASTA)%in%paste(">",names(posGermlines)[posInput],sep="") )[ posGermlines[posInput]] ]
+
+ germlines = posGermlines[posInput]
+ groups = posGroups[posInput]
+
+ return( list("matInput"=matInput, "germlines"=germlines, "groups"=groups, "conservationDefinition"=consDef ))
+ }
+
+
+ # Replace leading and trailing dashes in the sequence
+ replaceLeadingTrailingDashes <- function(x,readEnd){
+ iiGap = unlist(gregexpr("-",x[1]))
+ ggGap = unlist(gregexpr("-",x[2]))
+ #posToChange = intersect(iiGap,ggGap)
+
+
+ seqIn = replaceLeadingTrailingDashesHelper(x[1])
+ seqGL = replaceLeadingTrailingDashesHelper(x[2])
+ seqTemplate = rep('N',readEnd)
+ seqIn <- c(seqIn,seqTemplate[(length(seqIn)+1):readEnd])
+ seqGL <- c(seqGL,seqTemplate[(length(seqGL)+1):readEnd])
+# if(posToChange!=-1){
+# seqIn[posToChange] = "-"
+# seqGL[posToChange] = "-"
+# }
+
+ seqIn = c2s(seqIn[1:readEnd])
+ seqGL = c2s(seqGL[1:readEnd])
+
+ lenGL = nchar(seqGL)
+ if(lenGL seqLen )
+ trimmedSeq = substr(seqToTrim,1, ( (getCodonPos(seqLen)[1])-1 ) )
+
+ return(trimmedSeq)
+ }
+
+ # Given a nuclotide position, returns the pos of the 3 nucs that made the codon
+ # e.g. nuc 86 is part of nucs 85,86,87
+ getCodonPos <- function(nucPos){
+ codonNum = (ceiling(nucPos/3))*3
+ return( (codonNum-2):codonNum)
+ }
+
+ # Given a nuclotide position, returns the codon number
+ # e.g. nuc 86 = codon 29
+ getCodonNumb <- function(nucPos){
+ return( ceiling(nucPos/3) )
+ }
+
+ # Given a codon, returns all the nuc positions that make the codon
+ getCodonNucs <- function(codonNumb){
+ getCodonPos(codonNumb*3)
+ }
+
+ computeCodonTable <- function(testID=1){
+
+ if(testID<=4){
+ # Pre-compute every codons
+ intCounter = 1
+ for(pOne in NUCLEOTIDES){
+ for(pTwo in NUCLEOTIDES){
+ for(pThree in NUCLEOTIDES){
+ codon = paste(pOne,pTwo,pThree,sep="")
+ colnames(CODON_TABLE)[intCounter] = codon
+ intCounter = intCounter + 1
+ CODON_TABLE[,codon] = mutationTypeOptimized(cbind(permutateAllCodon(codon),rep(codon,12)))
+ }
+ }
+ }
+ chars = c("N","A","C","G","T", "-")
+ for(a in chars){
+ for(b in chars){
+ for(c in chars){
+ if(a=="N" | b=="N" | c=="N"){
+ #cat(paste(a,b,c),sep="","\n")
+ CODON_TABLE[,paste(a,b,c,sep="")] = rep(NA,12)
+ }
+ }
+ }
+ }
+
+ chars = c("-","A","C","G","T")
+ for(a in chars){
+ for(b in chars){
+ for(c in chars){
+ if(a=="-" | b=="-" | c=="-"){
+ #cat(paste(a,b,c),sep="","\n")
+ CODON_TABLE[,paste(a,b,c,sep="")] = rep(NA,12)
+ }
+ }
+ }
+ }
+ CODON_TABLE <<- as.matrix(CODON_TABLE)
+ }
+ }
+
+ collapseClone <- function(vecInputSeqs,glSeq,readEnd,nonTerminalOnly=0){
+ #print(length(vecInputSeqs))
+ vecInputSeqs = unique(vecInputSeqs)
+ if(length(vecInputSeqs)==1){
+ return( list( c(vecInputSeqs,glSeq), F) )
+ }else{
+ charInputSeqs <- sapply(vecInputSeqs, function(x){
+ s2c(x)[1:readEnd]
+ })
+ charGLSeq <- s2c(glSeq)
+ matClone <- sapply(1:readEnd, function(i){
+ posNucs = unique(charInputSeqs[i,])
+ posGL = charGLSeq[i]
+ error = FALSE
+ if(posGL=="-" & sum(!(posNucs%in%c("-","N")))==0 ){
+ return(c("-",error))
+ }
+ if(length(posNucs)==1)
+ return(c(posNucs[1],error))
+ else{
+ if("N"%in%posNucs){
+ error=TRUE
+ }
+ if(sum(!posNucs[posNucs!="N"]%in%posGL)==0){
+ return( c(posGL,error) )
+ }else{
+ #return( c(sample(posNucs[posNucs!="N"],1),error) )
+ if(nonTerminalOnly==0){
+ return( c(sample(charInputSeqs[i,charInputSeqs[i,]!="N" & charInputSeqs[i,]!=posGL],1),error) )
+ }else{
+ posNucs = charInputSeqs[i,charInputSeqs[i,]!="N" & charInputSeqs[i,]!=posGL]
+ posNucsTable = table(posNucs)
+ if(sum(posNucsTable>1)==0){
+ return( c(posGL,error) )
+ }else{
+ return( c(sample( posNucs[posNucs%in%names(posNucsTable)[posNucsTable>1]],1),error) )
+ }
+ }
+
+ }
+ }
+ })
+
+
+ #print(length(vecInputSeqs))
+ return(list(c(c2s(matClone[1,]),glSeq),"TRUE"%in%matClone[2,]))
+ }
+ }
+
+ # Compute the expected for each sequence-germline pair
+ getExpectedIndividual <- function(matInput){
+ if( any(grep("multicore",search())) ){
+ facGL <- factor(matInput[,2])
+ facLevels = levels(facGL)
+ LisGLs_MutabilityU = mclapply(1:length(facLevels), function(x){
+ computeMutabilities(facLevels[x])
+ })
+ facIndex = match(facGL,facLevels)
+
+ LisGLs_Mutability = mclapply(1:nrow(matInput), function(x){
+ cInput = rep(NA,nchar(matInput[x,1]))
+ cInput[s2c(matInput[x,1])!="N"] = 1
+ LisGLs_MutabilityU[[facIndex[x]]] * cInput
+ })
+
+ LisGLs_Targeting = mclapply(1:dim(matInput)[1], function(x){
+ computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
+ })
+
+ LisGLs_MutationTypes = mclapply(1:length(matInput[,2]),function(x){
+ #print(x)
+ computeMutationTypes(matInput[x,2])
+ })
+
+ LisGLs_Exp = mclapply(1:dim(matInput)[1], function(x){
+ computeExpected(LisGLs_Targeting[[x]],LisGLs_MutationTypes[[x]])
+ })
+
+ ul_LisGLs_Exp = unlist(LisGLs_Exp)
+ return(matrix(ul_LisGLs_Exp,ncol=4,nrow=(length(ul_LisGLs_Exp)/4),byrow=T))
+ }else{
+ facGL <- factor(matInput[,2])
+ facLevels = levels(facGL)
+ LisGLs_MutabilityU = lapply(1:length(facLevels), function(x){
+ computeMutabilities(facLevels[x])
+ })
+ facIndex = match(facGL,facLevels)
+
+ LisGLs_Mutability = lapply(1:nrow(matInput), function(x){
+ cInput = rep(NA,nchar(matInput[x,1]))
+ cInput[s2c(matInput[x,1])!="N"] = 1
+ LisGLs_MutabilityU[[facIndex[x]]] * cInput
+ })
+
+ LisGLs_Targeting = lapply(1:dim(matInput)[1], function(x){
+ computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
+ })
+
+ LisGLs_MutationTypes = lapply(1:length(matInput[,2]),function(x){
+ #print(x)
+ computeMutationTypes(matInput[x,2])
+ })
+
+ LisGLs_Exp = lapply(1:dim(matInput)[1], function(x){
+ computeExpected(LisGLs_Targeting[[x]],LisGLs_MutationTypes[[x]])
+ })
+
+ ul_LisGLs_Exp = unlist(LisGLs_Exp)
+ return(matrix(ul_LisGLs_Exp,ncol=4,nrow=(length(ul_LisGLs_Exp)/4),byrow=T))
+
+ }
+ }
+
+ # Compute mutabilities of sequence based on the tri-nucleotide model
+ computeMutabilities <- function(paramSeq){
+ seqLen = nchar(paramSeq)
+ seqMutabilites = rep(NA,seqLen)
+
+ gaplessSeq = gsub("-", "", paramSeq)
+ gaplessSeqLen = nchar(gaplessSeq)
+ gaplessSeqMutabilites = rep(NA,gaplessSeqLen)
+
+ if(mutabilityModel!=5){
+ pos<- 3:(gaplessSeqLen)
+ subSeq = substr(rep(gaplessSeq,gaplessSeqLen-2),(pos-2),(pos+2))
+ gaplessSeqMutabilites[pos] =
+ tapply( c(
+ getMutability( substr(subSeq,1,3), 3) ,
+ getMutability( substr(subSeq,2,4), 2),
+ getMutability( substr(subSeq,3,5), 1)
+ ),rep(1:(gaplessSeqLen-2),3),mean,na.rm=TRUE
+ )
+ #Pos 1
+ subSeq = substr(gaplessSeq,1,3)
+ gaplessSeqMutabilites[1] = getMutability(subSeq , 1)
+ #Pos 2
+ subSeq = substr(gaplessSeq,1,4)
+ gaplessSeqMutabilites[2] = mean( c(
+ getMutability( substr(subSeq,1,3), 2) ,
+ getMutability( substr(subSeq,2,4), 1)
+ ),na.rm=T
+ )
+ seqMutabilites[which(s2c(paramSeq)!="-")]<- gaplessSeqMutabilites
+ return(seqMutabilites)
+ }else{
+
+ pos<- 3:(gaplessSeqLen)
+ subSeq = substr(rep(gaplessSeq,gaplessSeqLen-2),(pos-2),(pos+2))
+ gaplessSeqMutabilites[pos] = sapply(subSeq,function(x){ getMutability5(x) }, simplify=T)
+ seqMutabilites[which(s2c(paramSeq)!="-")]<- gaplessSeqMutabilites
+ return(seqMutabilites)
+ }
+
+ }
+
+ # Returns the mutability of a triplet at a given position
+ getMutability <- function(codon, pos=1:3){
+ triplets <- rownames(mutability)
+ mutability[ match(codon,triplets) ,pos]
+ }
+
+ getMutability5 <- function(fivemer){
+ return(mutability[fivemer])
+ }
+
+ # Returns the substitution probabilty
+ getTransistionProb <- function(nuc){
+ substitution[nuc,]
+ }
+
+ getTransistionProb5 <- function(fivemer){
+ if(any(which(fivemer==colnames(substitution)))){
+ return(substitution[,fivemer])
+ }else{
+ return(array(NA,4))
+ }
+ }
+
+ # Given a nuc, returns the other 3 nucs it can mutate to
+ canMutateTo <- function(nuc){
+ NUCLEOTIDES[- which(NUCLEOTIDES==nuc)]
+ }
+
+ # Given a nucleotide, returns the probabilty of other nucleotide it can mutate to
+ canMutateToProb <- function(nuc){
+ substitution[nuc,canMutateTo(nuc)]
+ }
+
+ # Compute targeting, based on precomputed mutatbility & substitution
+ computeTargeting <- function(param_strSeq,param_vecMutabilities){
+
+ if(substitutionModel!=5){
+ vecSeq = s2c(param_strSeq)
+ matTargeting = sapply( 1:length(vecSeq), function(x) { param_vecMutabilities[x] * getTransistionProb(vecSeq[x]) } )
+ #matTargeting = apply( rbind(vecSeq,param_vecMutabilities),2, function(x) { as.vector(as.numeric(x[2]) * getTransistionProb(x[1])) } )
+ dimnames( matTargeting ) = list(NUCLEOTIDES,1:(length(vecSeq)))
+ return (matTargeting)
+ }else{
+
+ seqLen = nchar(param_strSeq)
+ seqsubstitution = matrix(NA,ncol=seqLen,nrow=4)
+ paramSeq <- param_strSeq
+ gaplessSeq = gsub("-", "", paramSeq)
+ gaplessSeqLen = nchar(gaplessSeq)
+ gaplessSeqSubstitution = matrix(NA,ncol=gaplessSeqLen,nrow=4)
+
+ pos<- 3:(gaplessSeqLen)
+ subSeq = substr(rep(gaplessSeq,gaplessSeqLen-2),(pos-2),(pos+2))
+ gaplessSeqSubstitution[,pos] = sapply(subSeq,function(x){ getTransistionProb5(x) }, simplify=T)
+ seqsubstitution[,which(s2c(paramSeq)!="-")]<- gaplessSeqSubstitution
+ #matTargeting <- param_vecMutabilities %*% seqsubstitution
+ matTargeting <- sweep(seqsubstitution,2,param_vecMutabilities,`*`)
+ dimnames( matTargeting ) = list(NUCLEOTIDES,1:(seqLen))
+ return (matTargeting)
+ }
+ }
+
+ # Compute the mutations types
+ computeMutationTypes <- function(param_strSeq){
+ #cat(param_strSeq,"\n")
+ #vecSeq = trimToLastCodon(param_strSeq)
+ lenSeq = nchar(param_strSeq)
+ vecCodons = sapply({1:(lenSeq/3)}*3-2,function(x){substr(param_strSeq,x,x+2)})
+ matMutationTypes = matrix( unlist(CODON_TABLE[,vecCodons]) ,ncol=lenSeq,nrow=4, byrow=F)
+ dimnames( matMutationTypes ) = list(NUCLEOTIDES,1:(ncol(matMutationTypes)))
+ return(matMutationTypes)
+ }
+ computeMutationTypesFast <- function(param_strSeq){
+ matMutationTypes = matrix( CODON_TABLE[,param_strSeq] ,ncol=3,nrow=4, byrow=F)
+ #dimnames( matMutationTypes ) = list(NUCLEOTIDES,1:(length(vecSeq)))
+ return(matMutationTypes)
+ }
+ mutationTypeOptimized <- function( matOfCodons ){
+ apply( matOfCodons,1,function(x){ mutationType(x[2],x[1]) } )
+ }
+
+ # Returns a vector of codons 1 mutation away from the given codon
+ permutateAllCodon <- function(codon){
+ cCodon = s2c(codon)
+ matCodons = t(array(cCodon,dim=c(3,12)))
+ matCodons[1:4,1] = NUCLEOTIDES
+ matCodons[5:8,2] = NUCLEOTIDES
+ matCodons[9:12,3] = NUCLEOTIDES
+ apply(matCodons,1,c2s)
+ }
+
+ # Given two codons, tells you if the mutation is R or S (based on your definition)
+ mutationType <- function(codonFrom,codonTo){
+ if(testID==4){
+ if( is.na(codonFrom) | is.na(codonTo) | is.na(translateCodonToAminoAcid(codonFrom)) | is.na(translateCodonToAminoAcid(codonTo)) ){
+ return(NA)
+ }else{
+ mutationType = "S"
+ if( translateAminoAcidToTraitChange(translateCodonToAminoAcid(codonFrom)) != translateAminoAcidToTraitChange(translateCodonToAminoAcid(codonTo)) ){
+ mutationType = "R"
+ }
+ if(translateCodonToAminoAcid(codonTo)=="*" | translateCodonToAminoAcid(codonFrom)=="*"){
+ mutationType = "Stop"
+ }
+ return(mutationType)
+ }
+ }else if(testID==5){
+ if( is.na(codonFrom) | is.na(codonTo) | is.na(translateCodonToAminoAcid(codonFrom)) | is.na(translateCodonToAminoAcid(codonTo)) ){
+ return(NA)
+ }else{
+ if(codonFrom==codonTo){
+ mutationType = "S"
+ }else{
+ codonFrom = s2c(codonFrom)
+ codonTo = s2c(codonTo)
+ mutationType = "Stop"
+ nucOfI = codonFrom[which(codonTo!=codonFrom)]
+ if(nucOfI=="C"){
+ mutationType = "R"
+ }else if(nucOfI=="G"){
+ mutationType = "S"
+ }
+ }
+ return(mutationType)
+ }
+ }else{
+ if( is.na(codonFrom) | is.na(codonTo) | is.na(translateCodonToAminoAcid(codonFrom)) | is.na(translateCodonToAminoAcid(codonTo)) ){
+ return(NA)
+ }else{
+ mutationType = "S"
+ if( translateCodonToAminoAcid(codonFrom) != translateCodonToAminoAcid(codonTo) ){
+ mutationType = "R"
+ }
+ if(translateCodonToAminoAcid(codonTo)=="*" | translateCodonToAminoAcid(codonFrom)=="*"){
+ mutationType = "Stop"
+ }
+ return(mutationType)
+ }
+ }
+ }
+
+
+ #given a mat of targeting & it's corresponding mutationtypes returns
+ #a vector of Exp_RCDR,Exp_SCDR,Exp_RFWR,Exp_RFWR
+ computeExpected <- function(paramTargeting,paramMutationTypes){
+ # Replacements
+ RPos = which(paramMutationTypes=="R")
+ #FWR
+ Exp_R_FWR = sum(paramTargeting[ RPos[which(FWR_Nuc_Mat[RPos]==T)] ],na.rm=T)
+ #CDR
+ Exp_R_CDR = sum(paramTargeting[ RPos[which(CDR_Nuc_Mat[RPos]==T)] ],na.rm=T)
+ # Silents
+ SPos = which(paramMutationTypes=="S")
+ #FWR
+ Exp_S_FWR = sum(paramTargeting[ SPos[which(FWR_Nuc_Mat[SPos]==T)] ],na.rm=T)
+ #CDR
+ Exp_S_CDR = sum(paramTargeting[ SPos[which(CDR_Nuc_Mat[SPos]==T)] ],na.rm=T)
+
+ return(c(Exp_R_CDR,Exp_S_CDR,Exp_R_FWR,Exp_S_FWR))
+ }
+
+ # Count the mutations in a sequence
+ # each mutation is treated independently
+ analyzeMutations2NucUri_website <- function( rev_in_matrix ){
+ paramGL = rev_in_matrix[2,]
+ paramSeq = rev_in_matrix[1,]
+
+ #Fill seq with GL seq if gapped
+ #if( any(paramSeq=="-") ){
+ # gapPos_Seq = which(paramSeq=="-")
+ # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "-"]
+ # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
+ #}
+
+
+ #if( any(paramSeq=="N") ){
+ # gapPos_Seq = which(paramSeq=="N")
+ # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "N"]
+ # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
+ #}
+
+ analyzeMutations2NucUri( matrix(c( paramGL, paramSeq ),2,length(paramGL),byrow=T) )
+
+ }
+
+ #1 = GL
+ #2 = Seq
+ analyzeMutations2NucUri <- function( in_matrix=matrix(c(c("A","A","A","C","C","C"),c("A","G","G","C","C","A")),2,6,byrow=T) ){
+ paramGL = in_matrix[2,]
+ paramSeq = in_matrix[1,]
+ paramSeqUri = paramGL
+ #mutations = apply(rbind(paramGL,paramSeq), 2, function(x){!x[1]==x[2]})
+ mutations_val = paramGL != paramSeq
+ if(any(mutations_val)){
+ mutationPos = {1:length(mutations_val)}[mutations_val]
+ mutationPos = mutationPos[sapply(mutationPos, function(x){!any(paramSeq[getCodonPos(x)]=="N")})]
+ length_mutations =length(mutationPos)
+ mutationInfo = rep(NA,length_mutations)
+ if(any(mutationPos)){
+
+ pos<- mutationPos
+ pos_array<-array(sapply(pos,getCodonPos))
+ codonGL = paramGL[pos_array]
+
+ codonSeq = sapply(pos,function(x){
+ seqP = paramGL[getCodonPos(x)]
+ muCodonPos = {x-1}%%3+1
+ seqP[muCodonPos] = paramSeq[x]
+ return(seqP)
+ })
+ GLcodons = apply(matrix(codonGL,length_mutations,3,byrow=TRUE),1,c2s)
+ Seqcodons = apply(codonSeq,2,c2s)
+ mutationInfo = apply(rbind(GLcodons , Seqcodons),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
+ names(mutationInfo) = mutationPos
+ }
+ if(any(!is.na(mutationInfo))){
+ return(mutationInfo[!is.na(mutationInfo)])
+ }else{
+ return(NA)
+ }
+
+
+ }else{
+ return (NA)
+ }
+ }
+
+ processNucMutations2 <- function(mu){
+ if(!is.na(mu)){
+ #R
+ if(any(mu=="R")){
+ Rs = mu[mu=="R"]
+ nucNumbs = as.numeric(names(Rs))
+ R_CDR = sum(as.integer(CDR_Nuc[nucNumbs]),na.rm=T)
+ R_FWR = sum(as.integer(FWR_Nuc[nucNumbs]),na.rm=T)
+ }else{
+ R_CDR = 0
+ R_FWR = 0
+ }
+
+ #S
+ if(any(mu=="S")){
+ Ss = mu[mu=="S"]
+ nucNumbs = as.numeric(names(Ss))
+ S_CDR = sum(as.integer(CDR_Nuc[nucNumbs]),na.rm=T)
+ S_FWR = sum(as.integer(FWR_Nuc[nucNumbs]),na.rm=T)
+ }else{
+ S_CDR = 0
+ S_FWR = 0
+ }
+
+
+ retVec = c(R_CDR,S_CDR,R_FWR,S_FWR)
+ retVec[is.na(retVec)]=0
+ return(retVec)
+ }else{
+ return(rep(0,4))
+ }
+ }
+
+
+ ## Z-score Test
+ computeZScore <- function(mat, test="Focused"){
+ matRes <- matrix(NA,ncol=2,nrow=(nrow(mat)))
+ if(test=="Focused"){
+ #Z_Focused_CDR
+ #P_Denom = sum( mat[1,c(5,6,8)], na.rm=T )
+ P = apply(mat[,c(5,6,8)],1,function(x){(x[1]/sum(x))})
+ R_mean = apply(cbind(mat[,c(1,2,4)],P),1,function(x){x[4]*(sum(x[1:3]))})
+ R_sd=sqrt(R_mean*(1-P))
+ matRes[,1] = (mat[,1]-R_mean)/R_sd
+
+ #Z_Focused_FWR
+ #P_Denom = sum( mat[1,c(7,6,8)], na.rm=T )
+ P = apply(mat[,c(7,6,8)],1,function(x){(x[1]/sum(x))})
+ R_mean = apply(cbind(mat[,c(3,2,4)],P),1,function(x){x[4]*(sum(x[1:3]))})
+ R_sd=sqrt(R_mean*(1-P))
+ matRes[,2] = (mat[,3]-R_mean)/R_sd
+ }
+
+ if(test=="Local"){
+ #Z_Focused_CDR
+ #P_Denom = sum( mat[1,c(5,6,8)], na.rm=T )
+ P = apply(mat[,c(5,6)],1,function(x){(x[1]/sum(x))})
+ R_mean = apply(cbind(mat[,c(1,2)],P),1,function(x){x[3]*(sum(x[1:2]))})
+ R_sd=sqrt(R_mean*(1-P))
+ matRes[,1] = (mat[,1]-R_mean)/R_sd
+
+ #Z_Focused_FWR
+ #P_Denom = sum( mat[1,c(7,6,8)], na.rm=T )
+ P = apply(mat[,c(7,8)],1,function(x){(x[1]/sum(x))})
+ R_mean = apply(cbind(mat[,c(3,4)],P),1,function(x){x[3]*(sum(x[1:2]))})
+ R_sd=sqrt(R_mean*(1-P))
+ matRes[,2] = (mat[,3]-R_mean)/R_sd
+ }
+
+ if(test=="Imbalanced"){
+ #Z_Focused_CDR
+ #P_Denom = sum( mat[1,c(5,6,8)], na.rm=T )
+ P = apply(mat[,5:8],1,function(x){((x[1]+x[2])/sum(x))})
+ R_mean = apply(cbind(mat[,1:4],P),1,function(x){x[5]*(sum(x[1:4]))})
+ R_sd=sqrt(R_mean*(1-P))
+ matRes[,1] = (mat[,1]-R_mean)/R_sd
+
+ #Z_Focused_FWR
+ #P_Denom = sum( mat[1,c(7,6,8)], na.rm=T )
+ P = apply(mat[,5:8],1,function(x){((x[3]+x[4])/sum(x))})
+ R_mean = apply(cbind(mat[,1:4],P),1,function(x){x[5]*(sum(x[1:4]))})
+ R_sd=sqrt(R_mean*(1-P))
+ matRes[,2] = (mat[,3]-R_mean)/R_sd
+ }
+
+ matRes[is.nan(matRes)] = NA
+ return(matRes)
+ }
+
+ # Return a p-value for a z-score
+ z2p <- function(z){
+ p=NA
+ if( !is.nan(z) && !is.na(z)){
+ if(z>0){
+ p = (1 - pnorm(z,0,1))
+ } else if(z<0){
+ p = (-1 * pnorm(z,0,1))
+ } else{
+ p = 0.5
+ }
+ }else{
+ p = NA
+ }
+ return(p)
+ }
+
+
+ ## Bayesian Test
+
+ # Fitted parameter for the bayesian framework
+BAYESIAN_FITTED<-c(0.407277142798302, 0.554007336744485, 0.63777155771234, 0.693989162719009, 0.735450014674917, 0.767972534429806, 0.794557287143399, 0.816906816601605, 0.83606796225341, 0.852729446430296, 0.867370424541641, 0.880339760590323, 0.891900995024999, 0.902259181289864, 0.911577919359,0.919990301665853, 0.927606458124537, 0.934518806350661, 0.940805863754375, 0.946534836475715, 0.951763691199255, 0.95654428191308, 0.960920179487397, 0.964930893680829, 0.968611312149038, 0.971992459313836, 0.975102110004818, 0.977964943023096, 0.980603428208439, 0.983037660179428, 0.985285800977406, 0.987364285326685, 0.989288037855441, 0.991070478823525, 0.992723699729969, 0.994259575477392, 0.995687688867975, 0.997017365051493, 0.998257085153047, 0.999414558305388, 1.00049681357804, 1.00151036237481, 1.00246080204981, 1.00335370751909, 1.0041939329768, 1.0049859393417, 1.00573382091263, 1.00644127217376, 1.00711179729107, 1.00774845526417, 1.00835412715854, 1.00893143010366, 1.00948275846309, 1.01001030293661, 1.01051606798079, 1.01100188771288, 1.01146944044216, 1.01192026195449, 1.01235575766094, 1.01277721370986)
+ CONST_i <- sort(c(((2^(seq(-39,0,length.out=201)))/2)[1:200],(c(0:11,13:99)+0.5)/100,1-(2^(seq(-39,0,length.out=201)))/2))
+
+ # Given x, M & p, returns a pdf
+ calculate_bayes <- function ( x=3, N=10, p=0.33,
+ i=CONST_i,
+ max_sigma=20,length_sigma=4001
+ ){
+ if(!0%in%N){
+ G <- max(length(x),length(N),length(p))
+ x=array(x,dim=G)
+ N=array(N,dim=G)
+ p=array(p,dim=G)
+ sigma_s<-seq(-max_sigma,max_sigma,length.out=length_sigma)
+ sigma_1<-log({i/{1-i}}/{p/{1-p}})
+ index<-min(N,60)
+ y<-dbeta(i,x+BAYESIAN_FITTED[index],N+BAYESIAN_FITTED[index]-x)*(1-p)*p*exp(sigma_1)/({1-p}^2+2*p*{1-p}*exp(sigma_1)+{p^2}*exp(2*sigma_1))
+ if(!sum(is.na(y))){
+ tmp<-approx(sigma_1,y,sigma_s)$y
+ tmp/sum(tmp)/{2*max_sigma/{length_sigma-1}}
+ }else{
+ return(NA)
+ }
+ }else{
+ return(NA)
+ }
+ }
+ # Given a mat of observed & expected, return a list of CDR & FWR pdf for selection
+ computeBayesianScore <- function(mat, test="Focused", max_sigma=20,length_sigma=4001){
+ flagOneSeq = F
+ if(nrow(mat)==1){
+ mat=rbind(mat,mat)
+ flagOneSeq = T
+ }
+ if(test=="Focused"){
+ #CDR
+ P = c(apply(mat[,c(5,6,8)],1,function(x){(x[1]/sum(x))}),0.5)
+ N = c(apply(mat[,c(1,2,4)],1,function(x){(sum(x))}),0)
+ X = c(mat[,1],0)
+ bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesCDR = bayesCDR[-length(bayesCDR)]
+
+ #FWR
+ P = c(apply(mat[,c(7,6,8)],1,function(x){(x[1]/sum(x))}),0.5)
+ N = c(apply(mat[,c(3,2,4)],1,function(x){(sum(x))}),0)
+ X = c(mat[,3],0)
+ bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesFWR = bayesFWR[-length(bayesFWR)]
+ }
+
+ if(test=="Local"){
+ #CDR
+ P = c(apply(mat[,c(5,6)],1,function(x){(x[1]/sum(x))}),0.5)
+ N = c(apply(mat[,c(1,2)],1,function(x){(sum(x))}),0)
+ X = c(mat[,1],0)
+ bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesCDR = bayesCDR[-length(bayesCDR)]
+
+ #FWR
+ P = c(apply(mat[,c(7,8)],1,function(x){(x[1]/sum(x))}),0.5)
+ N = c(apply(mat[,c(3,4)],1,function(x){(sum(x))}),0)
+ X = c(mat[,3],0)
+ bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesFWR = bayesFWR[-length(bayesFWR)]
+ }
+
+ if(test=="Imbalanced"){
+ #CDR
+ P = c(apply(mat[,c(5:8)],1,function(x){((x[1]+x[2])/sum(x))}),0.5)
+ N = c(apply(mat[,c(1:4)],1,function(x){(sum(x))}),0)
+ X = c(apply(mat[,c(1:2)],1,function(x){(sum(x))}),0)
+ bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesCDR = bayesCDR[-length(bayesCDR)]
+
+ #FWR
+ P = c(apply(mat[,c(5:8)],1,function(x){((x[3]+x[4])/sum(x))}),0.5)
+ N = c(apply(mat[,c(1:4)],1,function(x){(sum(x))}),0)
+ X = c(apply(mat[,c(3:4)],1,function(x){(sum(x))}),0)
+ bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesFWR = bayesFWR[-length(bayesFWR)]
+ }
+
+ if(test=="ImbalancedSilent"){
+ #CDR
+ P = c(apply(mat[,c(6,8)],1,function(x){((x[1])/sum(x))}),0.5)
+ N = c(apply(mat[,c(2,4)],1,function(x){(sum(x))}),0)
+ X = c(apply(mat[,c(2,4)],1,function(x){(x[1])}),0)
+ bayesCDR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesCDR = bayesCDR[-length(bayesCDR)]
+
+ #FWR
+ P = c(apply(mat[,c(6,8)],1,function(x){((x[2])/sum(x))}),0.5)
+ N = c(apply(mat[,c(2,4)],1,function(x){(sum(x))}),0)
+ X = c(apply(mat[,c(2,4)],1,function(x){(x[2])}),0)
+ bayesFWR = apply(cbind(X,N,P),1,function(x){calculate_bayes(x=x[1],N=x[2],p=x[3],max_sigma=max_sigma,length_sigma=length_sigma)})
+ bayesFWR = bayesFWR[-length(bayesFWR)]
+ }
+
+ if(flagOneSeq==T){
+ bayesCDR = bayesCDR[1]
+ bayesFWR = bayesFWR[1]
+ }
+ return( list("CDR"=bayesCDR, "FWR"=bayesFWR) )
+ }
+
+ ##Covolution
+ break2chunks<-function(G=1000){
+ base<-2^round(log(sqrt(G),2),0)
+ return(c(rep(base,floor(G/base)-1),base+G-(floor(G/base)*base)))
+ }
+
+ PowersOfTwo <- function(G=100){
+ exponents <- array()
+ i = 0
+ while(G > 0){
+ i=i+1
+ exponents[i] <- floor( log2(G) )
+ G <- G-2^exponents[i]
+ }
+ return(exponents)
+ }
+
+ convolutionPowersOfTwo <- function( cons, length_sigma=4001 ){
+ G = ncol(cons)
+ if(G>1){
+ for(gen in log(G,2):1){
+ ll<-seq(from=2,to=2^gen,by=2)
+ sapply(ll,function(l){cons[,l/2]<<-weighted_conv(cons[,l],cons[,l-1],length_sigma=length_sigma)})
+ }
+ }
+ return( cons[,1] )
+ }
+
+ convolutionPowersOfTwoByTwos <- function( cons, length_sigma=4001,G=1 ){
+ if(length(ncol(cons))) G<-ncol(cons)
+ groups <- PowersOfTwo(G)
+ matG <- matrix(NA, ncol=length(groups), nrow=length(cons)/G )
+ startIndex = 1
+ for( i in 1:length(groups) ){
+ stopIndex <- 2^groups[i] + startIndex - 1
+ if(stopIndex!=startIndex){
+ matG[,i] <- convolutionPowersOfTwo( cons[,startIndex:stopIndex], length_sigma=length_sigma )
+ startIndex = stopIndex + 1
+ }
+ else {
+ if(G>1) matG[,i] <- cons[,startIndex:stopIndex]
+ else matG[,i] <- cons
+ #startIndex = stopIndex + 1
+ }
+ }
+ return( list( matG, groups ) )
+ }
+
+ weighted_conv<-function(x,y,w=1,m=100,length_sigma=4001){
+ lx<-length(x)
+ ly<-length(y)
+ if({lx1){
+ while( i1 & Length_Postrior<=Threshold){
+ cons = matrix(unlist(listPosteriors),length(listPosteriors[[1]]),length(listPosteriors))
+ listMatG <- convolutionPowersOfTwoByTwos(cons,length_sigma=length_sigma)
+ y<-calculate_bayesGHelper(listMatG,length_sigma=length_sigma)
+ return( y/sum(y)/(2*max_sigma/(length_sigma-1)) )
+ }else if(Length_Postrior==1) return(listPosteriors[[1]])
+ else if(Length_Postrior==0) return(NA)
+ else {
+ cons = matrix(unlist(listPosteriors),length(listPosteriors[[1]]),length(listPosteriors))
+ y = fastConv(cons,max_sigma=max_sigma, length_sigma=length_sigma )
+ return( y/sum(y)/(2*max_sigma/(length_sigma-1)) )
+ }
+ }
+
+ fastConv<-function(cons, max_sigma=20, length_sigma=4001){
+ chunks<-break2chunks(G=ncol(cons))
+ if(ncol(cons)==3) chunks<-2:1
+ index_chunks_end <- cumsum(chunks)
+ index_chunks_start <- c(1,index_chunks_end[-length(index_chunks_end)]+1)
+ index_chunks <- cbind(index_chunks_start,index_chunks_end)
+
+ case <- sum(chunks!=chunks[1])
+ if(case==1) End <- max(1,((length(index_chunks)/2)-1))
+ else End <- max(1,((length(index_chunks)/2)))
+
+ firsts <- sapply(1:End,function(i){
+ indexes<-index_chunks[i,1]:index_chunks[i,2]
+ convolutionPowersOfTwoByTwos(cons[ ,indexes])[[1]]
+ })
+ if(case==0){
+ result<-calculate_bayesGHelper( convolutionPowersOfTwoByTwos(firsts) )
+ }else if(case==1){
+ last<-list(calculate_bayesGHelper(
+ convolutionPowersOfTwoByTwos( cons[ ,index_chunks[length(index_chunks)/2,1]:index_chunks[length(index_chunks)/2,2]] )
+ ),0)
+ result_first<-calculate_bayesGHelper(convolutionPowersOfTwoByTwos(firsts))
+ result<-calculate_bayesGHelper(
+ list(
+ cbind(
+ result_first,last[[1]]),
+ c(log(index_chunks_end[length(index_chunks)/2-1],2),log(index_chunks[length(index_chunks)/2,2]-index_chunks[length(index_chunks)/2,1]+1,2))
+ )
+ )
+ }
+ return(as.vector(result))
+ }
+
+ # Computes the 95% CI for a pdf
+ calcBayesCI <- function(Pdf,low=0.025,up=0.975,max_sigma=20, length_sigma=4001){
+ if(length(Pdf)!=length_sigma) return(NA)
+ sigma_s=seq(-max_sigma,max_sigma,length.out=length_sigma)
+ cdf = cumsum(Pdf)
+ cdf = cdf/cdf[length(cdf)]
+ return( c(sigma_s[findInterval(low,cdf)-1] , sigma_s[findInterval(up,cdf)]) )
+ }
+
+ # Computes a mean for a pdf
+ calcBayesMean <- function(Pdf,max_sigma=20,length_sigma=4001){
+ if(length(Pdf)!=length_sigma) return(NA)
+ sigma_s=seq(-max_sigma,max_sigma,length.out=length_sigma)
+ norm = {length_sigma-1}/2/max_sigma
+ return( (Pdf%*%sigma_s/norm) )
+ }
+
+ # Returns the mean, and the 95% CI for a pdf
+ calcBayesOutputInfo <- function(Pdf,low=0.025,up=0.975,max_sigma=20, length_sigma=4001){
+ if(is.na(Pdf))
+ return(rep(NA,3))
+ bCI = calcBayesCI(Pdf=Pdf,low=low,up=up,max_sigma=max_sigma,length_sigma=length_sigma)
+ bMean = calcBayesMean(Pdf=Pdf,max_sigma=max_sigma,length_sigma=length_sigma)
+ return(c(bMean, bCI))
+ }
+
+ # Computes the p-value of a pdf
+ computeSigmaP <- function(Pdf, length_sigma=4001, max_sigma=20){
+ if(length(Pdf)>1){
+ norm = {length_sigma-1}/2/max_sigma
+ pVal = {sum(Pdf[1:{{length_sigma-1}/2}]) + Pdf[{{length_sigma+1}/2}]/2}/norm
+ if(pVal>0.5){
+ pVal = pVal-1
+ }
+ return(pVal)
+ }else{
+ return(NA)
+ }
+ }
+
+ # Compute p-value of two distributions
+ compareTwoDistsFaster <-function(sigma_S=seq(-20,20,length.out=4001), N=10000, dens1=runif(4001,0,1), dens2=runif(4001,0,1)){
+ #print(c(length(dens1),length(dens2)))
+ if(length(dens1)>1 & length(dens2)>1 ){
+ dens1<-dens1/sum(dens1)
+ dens2<-dens2/sum(dens2)
+ cum2 <- cumsum(dens2)-dens2/2
+ tmp<- sum(sapply(1:length(dens1),function(i)return(dens1[i]*cum2[i])))
+ #print(tmp)
+ if(tmp>0.5)tmp<-tmp-1
+ return( tmp )
+ }
+ else {
+ return(NA)
+ }
+ #return (sum(sapply(1:N,function(i)(sample(sigma_S,1,prob=dens1)>sample(sigma_S,1,prob=dens2))))/N)
+ }
+
+ # get number of seqeunces contributing to the sigma (i.e. seqeunces with mutations)
+ numberOfSeqsWithMutations <- function(matMutations,test=1){
+ if(test==4)test=2
+ cdrSeqs <- 0
+ fwrSeqs <- 0
+ if(test==1){#focused
+ cdrMutations <- apply(matMutations, 1, function(x){ sum(x[c(1,2,4)]) })
+ fwrMutations <- apply(matMutations, 1, function(x){ sum(x[c(3,4,2)]) })
+ if( any(which(cdrMutations>0)) ) cdrSeqs <- sum(cdrMutations>0)
+ if( any(which(fwrMutations>0)) ) fwrSeqs <- sum(fwrMutations>0)
+ }
+ if(test==2){#local
+ cdrMutations <- apply(matMutations, 1, function(x){ sum(x[c(1,2)]) })
+ fwrMutations <- apply(matMutations, 1, function(x){ sum(x[c(3,4)]) })
+ if( any(which(cdrMutations>0)) ) cdrSeqs <- sum(cdrMutations>0)
+ if( any(which(fwrMutations>0)) ) fwrSeqs <- sum(fwrMutations>0)
+ }
+ return(c("CDR"=cdrSeqs, "FWR"=fwrSeqs))
+}
+
+
+
+shadeColor <- function(sigmaVal=NA,pVal=NA){
+ if(is.na(sigmaVal) & is.na(pVal)) return(NA)
+ if(is.na(sigmaVal) & !is.na(pVal)) sigmaVal=sign(pVal)
+ if(is.na(pVal) || pVal==1 || pVal==0){
+ returnColor = "#FFFFFF";
+ }else{
+ colVal=abs(pVal);
+
+ if(sigmaVal<0){
+ if(colVal>0.1)
+ returnColor = "#CCFFCC";
+ if(colVal<=0.1)
+ returnColor = "#99FF99";
+ if(colVal<=0.050)
+ returnColor = "#66FF66";
+ if(colVal<=0.010)
+ returnColor = "#33FF33";
+ if(colVal<=0.005)
+ returnColor = "#00FF00";
+
+ }else{
+ if(colVal>0.1)
+ returnColor = "#FFCCCC";
+ if(colVal<=0.1)
+ returnColor = "#FF9999";
+ if(colVal<=0.05)
+ returnColor = "#FF6666";
+ if(colVal<=0.01)
+ returnColor = "#FF3333";
+ if(colVal<0.005)
+ returnColor = "#FF0000";
+ }
+ }
+
+ return(returnColor)
+}
+
+
+
+plotHelp <- function(xfrac=0.05,yfrac=0.05,log=FALSE){
+ if(!log){
+ x = par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac
+ y = par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac
+ }else {
+ if(log==2){
+ x = par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac
+ y = 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac)
+ }
+ if(log==1){
+ x = 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac)
+ y = par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac
+ }
+ if(log==3){
+ x = 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac)
+ y = 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac)
+ }
+ }
+ return(c("x"=x,"y"=y))
+}
+
+# SHMulation
+
+ # Based on targeting, introduce a single mutation & then update the targeting
+ oneMutation <- function(){
+ # Pick a postion + mutation
+ posMutation = sample(1:(seqGermlineLen*4),1,replace=F,prob=as.vector(seqTargeting))
+ posNucNumb = ceiling(posMutation/4) # Nucleotide number
+ posNucKind = 4 - ( (posNucNumb*4) - posMutation ) # Nuc the position mutates to
+
+ #mutate the simulation sequence
+ seqSimVec <- s2c(seqSim)
+ seqSimVec[posNucNumb] <- NUCLEOTIDES[posNucKind]
+ seqSim <<- c2s(seqSimVec)
+
+ #update Mutability, Targeting & MutationsTypes
+ updateMutabilityNTargeting(posNucNumb)
+
+ #return(c(posNucNumb,NUCLEOTIDES[posNucKind]))
+ return(posNucNumb)
+ }
+
+ updateMutabilityNTargeting <- function(position){
+ min_i<-max((position-2),1)
+ max_i<-min((position+2),nchar(seqSim))
+ min_ii<-min(min_i,3)
+
+ #mutability - update locally
+ seqMutability[(min_i):(max_i)] <<- computeMutabilities(substr(seqSim,position-4,position+4))[(min_ii):(max_i-min_i+min_ii)]
+
+
+ #targeting - compute locally
+ seqTargeting[,min_i:max_i] <<- computeTargeting(substr(seqSim,min_i,max_i),seqMutability[min_i:max_i])
+ seqTargeting[is.na(seqTargeting)] <<- 0
+ #mutCodonPos = getCodonPos(position)
+ mutCodonPos = seq(getCodonPos(min_i)[1],getCodonPos(max_i)[3])
+ #cat(mutCodonPos,"\n")
+ mutTypeCodon = getCodonPos(position)
+ seqMutationTypes[,mutTypeCodon] <<- computeMutationTypesFast( substr(seqSim,mutTypeCodon[1],mutTypeCodon[3]) )
+ # Stop = 0
+ if(any(seqMutationTypes[,mutCodonPos]=="Stop",na.rm=T )){
+ seqTargeting[,mutCodonPos][seqMutationTypes[,mutCodonPos]=="Stop"] <<- 0
+ }
+
+
+ #Selection
+ selectedPos = (min_i*4-4)+(which(seqMutationTypes[,min_i:max_i]=="R"))
+ # CDR
+ selectedCDR = selectedPos[which(matCDR[selectedPos]==T)]
+ seqTargeting[selectedCDR] <<- seqTargeting[selectedCDR] * exp(selCDR)
+ seqTargeting[selectedCDR] <<- seqTargeting[selectedCDR]/baseLineCDR_K
+
+ # FWR
+ selectedFWR = selectedPos[which(matFWR[selectedPos]==T)]
+ seqTargeting[selectedFWR] <<- seqTargeting[selectedFWR] * exp(selFWR)
+ seqTargeting[selectedFWR] <<- seqTargeting[selectedFWR]/baseLineFWR_K
+
+ }
+
+
+
+ # Validate the mutation: if the mutation has not been sampled before validate it, else discard it.
+ validateMutation <- function(){
+ if( !(mutatedPos%in%mutatedPositions) ){ # if it's a new mutation
+ uniqueMutationsIntroduced <<- uniqueMutationsIntroduced + 1
+ mutatedPositions[uniqueMutationsIntroduced] <<- mutatedPos
+ }else{
+ if(substr(seqSim,mutatedPos,mutatedPos)==substr(seqGermline,mutatedPos,mutatedPos)){ # back to germline mutation
+ mutatedPositions <<- mutatedPositions[-which(mutatedPositions==mutatedPos)]
+ uniqueMutationsIntroduced <<- uniqueMutationsIntroduced - 1
+ }
+ }
+ }
+
+
+
+ # Places text (labels) at normalized coordinates
+ myaxis <- function(xfrac=0.05,yfrac=0.05,log=FALSE,w="text",cex=1,adj=1,thecol="black"){
+ par(xpd=TRUE)
+ if(!log)
+ text(par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac,par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac,w,cex=cex,adj=adj,col=thecol)
+ else {
+ if(log==2)
+ text(
+ par()$usr[1]-(par()$usr[2]-par()$usr[1])*xfrac,
+ 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac),
+ w,cex=cex,adj=adj,col=thecol)
+ if(log==1)
+ text(
+ 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac),
+ par()$usr[4]+(par()$usr[4]-par()$usr[3])*yfrac,
+ w,cex=cex,adj=adj,col=thecol)
+ if(log==3)
+ text(
+ 10^((par()$usr[1])-((par()$usr[2])-(par()$usr[1]))*xfrac),
+ 10^((par()$usr[4])+((par()$usr[4])-(par()$usr[3]))*yfrac),
+ w,cex=cex,adj=adj,col=thecol)
+ }
+ par(xpd=FALSE)
+ }
+
+
+
+ # Count the mutations in a sequence
+ analyzeMutations <- function( inputMatrixIndex, model = 0 , multipleMutation=0, seqWithStops=0){
+
+ paramGL = s2c(matInput[inputMatrixIndex,2])
+ paramSeq = s2c(matInput[inputMatrixIndex,1])
+
+ #if( any(paramSeq=="N") ){
+ # gapPos_Seq = which(paramSeq=="N")
+ # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "N"]
+ # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
+ #}
+ mutations_val = paramGL != paramSeq
+
+ if(any(mutations_val)){
+ mutationPos = which(mutations_val)#{1:length(mutations_val)}[mutations_val]
+ length_mutations =length(mutationPos)
+ mutationInfo = rep(NA,length_mutations)
+
+ pos<- mutationPos
+ pos_array<-array(sapply(pos,getCodonPos))
+ codonGL = paramGL[pos_array]
+ codonSeqWhole = paramSeq[pos_array]
+ codonSeq = sapply(pos,function(x){
+ seqP = paramGL[getCodonPos(x)]
+ muCodonPos = {x-1}%%3+1
+ seqP[muCodonPos] = paramSeq[x]
+ return(seqP)
+ })
+ GLcodons = apply(matrix(codonGL,length_mutations,3,byrow=TRUE),1,c2s)
+ SeqcodonsWhole = apply(matrix(codonSeqWhole,length_mutations,3,byrow=TRUE),1,c2s)
+ Seqcodons = apply(codonSeq,2,c2s)
+
+ mutationInfo = apply(rbind(GLcodons , Seqcodons),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
+ names(mutationInfo) = mutationPos
+
+ mutationInfoWhole = apply(rbind(GLcodons , SeqcodonsWhole),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
+ names(mutationInfoWhole) = mutationPos
+
+ mutationInfo <- mutationInfo[!is.na(mutationInfo)]
+ mutationInfoWhole <- mutationInfoWhole[!is.na(mutationInfoWhole)]
+
+ if(any(!is.na(mutationInfo))){
+
+ #Filter based on Stop (at the codon level)
+ if(seqWithStops==1){
+ nucleotidesAtStopCodons = names(mutationInfoWhole[mutationInfoWhole!="Stop"])
+ mutationInfo = mutationInfo[nucleotidesAtStopCodons]
+ mutationInfoWhole = mutationInfo[nucleotidesAtStopCodons]
+ }else{
+ countStops = sum(mutationInfoWhole=="Stop")
+ if(seqWithStops==2 & countStops==0) mutationInfo = NA
+ if(seqWithStops==3 & countStops>0) mutationInfo = NA
+ }
+
+ if(any(!is.na(mutationInfo))){
+ #Filter mutations based on multipleMutation
+ if(multipleMutation==1 & !is.na(mutationInfo)){
+ mutationCodons = getCodonNumb(as.numeric(names(mutationInfoWhole)))
+ tableMutationCodons <- table(mutationCodons)
+ codonsWithMultipleMutations <- as.numeric(names(tableMutationCodons[tableMutationCodons>1]))
+ if(any(codonsWithMultipleMutations)){
+ #remove the nucleotide mutations in the codons with multiple mutations
+ mutationInfo <- mutationInfo[!(mutationCodons %in% codonsWithMultipleMutations)]
+ #replace those codons with Ns in the input sequence
+ paramSeq[unlist(lapply(codonsWithMultipleMutations, getCodonNucs))] = "N"
+ matInput[inputMatrixIndex,1] <<- c2s(paramSeq)
+ }
+ }
+
+ #Filter mutations based on the model
+ if(any(mutationInfo)==T | is.na(any(mutationInfo))){
+
+ if(model==1 & !is.na(mutationInfo)){
+ mutationInfo <- mutationInfo[mutationInfo=="S"]
+ }
+ if(any(mutationInfo)==T | is.na(any(mutationInfo))) return(mutationInfo)
+ else return(NA)
+ }else{
+ return(NA)
+ }
+ }else{
+ return(NA)
+ }
+
+
+ }else{
+ return(NA)
+ }
+
+
+ }else{
+ return (NA)
+ }
+ }
+
+ analyzeMutationsFixed <- function( inputArray, model = 0 , multipleMutation=0, seqWithStops=0){
+
+ paramGL = s2c(inputArray[2])
+ paramSeq = s2c(inputArray[1])
+ inputSeq <- inputArray[1]
+ #if( any(paramSeq=="N") ){
+ # gapPos_Seq = which(paramSeq=="N")
+ # gapPos_Seq_ToReplace = gapPos_Seq[paramGL[gapPos_Seq] != "N"]
+ # paramSeq[gapPos_Seq_ToReplace] = paramGL[gapPos_Seq_ToReplace]
+ #}
+ mutations_val = paramGL != paramSeq
+
+ if(any(mutations_val)){
+ mutationPos = which(mutations_val)#{1:length(mutations_val)}[mutations_val]
+ length_mutations =length(mutationPos)
+ mutationInfo = rep(NA,length_mutations)
+
+ pos<- mutationPos
+ pos_array<-array(sapply(pos,getCodonPos))
+ codonGL = paramGL[pos_array]
+ codonSeqWhole = paramSeq[pos_array]
+ codonSeq = sapply(pos,function(x){
+ seqP = paramGL[getCodonPos(x)]
+ muCodonPos = {x-1}%%3+1
+ seqP[muCodonPos] = paramSeq[x]
+ return(seqP)
+ })
+ GLcodons = apply(matrix(codonGL,length_mutations,3,byrow=TRUE),1,c2s)
+ SeqcodonsWhole = apply(matrix(codonSeqWhole,length_mutations,3,byrow=TRUE),1,c2s)
+ Seqcodons = apply(codonSeq,2,c2s)
+
+ mutationInfo = apply(rbind(GLcodons , Seqcodons),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
+ names(mutationInfo) = mutationPos
+
+ mutationInfoWhole = apply(rbind(GLcodons , SeqcodonsWhole),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
+ names(mutationInfoWhole) = mutationPos
+
+ mutationInfo <- mutationInfo[!is.na(mutationInfo)]
+ mutationInfoWhole <- mutationInfoWhole[!is.na(mutationInfoWhole)]
+
+ if(any(!is.na(mutationInfo))){
+
+ #Filter based on Stop (at the codon level)
+ if(seqWithStops==1){
+ nucleotidesAtStopCodons = names(mutationInfoWhole[mutationInfoWhole!="Stop"])
+ mutationInfo = mutationInfo[nucleotidesAtStopCodons]
+ mutationInfoWhole = mutationInfo[nucleotidesAtStopCodons]
+ }else{
+ countStops = sum(mutationInfoWhole=="Stop")
+ if(seqWithStops==2 & countStops==0) mutationInfo = NA
+ if(seqWithStops==3 & countStops>0) mutationInfo = NA
+ }
+
+ if(any(!is.na(mutationInfo))){
+ #Filter mutations based on multipleMutation
+ if(multipleMutation==1 & !is.na(mutationInfo)){
+ mutationCodons = getCodonNumb(as.numeric(names(mutationInfoWhole)))
+ tableMutationCodons <- table(mutationCodons)
+ codonsWithMultipleMutations <- as.numeric(names(tableMutationCodons[tableMutationCodons>1]))
+ if(any(codonsWithMultipleMutations)){
+ #remove the nucleotide mutations in the codons with multiple mutations
+ mutationInfo <- mutationInfo[!(mutationCodons %in% codonsWithMultipleMutations)]
+ #replace those codons with Ns in the input sequence
+ paramSeq[unlist(lapply(codonsWithMultipleMutations, getCodonNucs))] = "N"
+ #matInput[inputMatrixIndex,1] <<- c2s(paramSeq)
+ inputSeq <- c2s(paramSeq)
+ }
+ }
+
+ #Filter mutations based on the model
+ if(any(mutationInfo)==T | is.na(any(mutationInfo))){
+
+ if(model==1 & !is.na(mutationInfo)){
+ mutationInfo <- mutationInfo[mutationInfo=="S"]
+ }
+ if(any(mutationInfo)==T | is.na(any(mutationInfo))) return(list(mutationInfo,inputSeq))
+ else return(list(NA,inputSeq))
+ }else{
+ return(list(NA,inputSeq))
+ }
+ }else{
+ return(list(NA,inputSeq))
+ }
+
+
+ }else{
+ return(list(NA,inputSeq))
+ }
+
+
+ }else{
+ return (list(NA,inputSeq))
+ }
+ }
+
+ # triMutability Background Count
+ buildMutabilityModel <- function( inputMatrixIndex, model=0 , multipleMutation=0, seqWithStops=0, stopMutations=0){
+
+ #rowOrigMatInput = matInput[inputMatrixIndex,]
+ seqGL = gsub("-", "", matInput[inputMatrixIndex,2])
+ seqInput = gsub("-", "", matInput[inputMatrixIndex,1])
+ #matInput[inputMatrixIndex,] <<- cbind(seqInput,seqGL)
+ tempInput <- cbind(seqInput,seqGL)
+ seqLength = nchar(seqGL)
+ list_analyzeMutationsFixed<- analyzeMutationsFixed(tempInput, model, multipleMutation, seqWithStops)
+ mutationCount <- list_analyzeMutationsFixed[[1]]
+ seqInput <- list_analyzeMutationsFixed[[2]]
+ BackgroundMatrix = mutabilityMatrix
+ MutationMatrix = mutabilityMatrix
+ MutationCountMatrix = mutabilityMatrix
+ if(!is.na(mutationCount)){
+ if((stopMutations==0 & model==0) | (stopMutations==1 & (sum(mutationCount=="Stop")0)) ){
+
+ fivermerStartPos = 1:(seqLength-4)
+ fivemerLength <- length(fivermerStartPos)
+ fivemerGL <- substr(rep(seqGL,length(fivermerStartPos)),(fivermerStartPos),(fivermerStartPos+4))
+ fivemerSeq <- substr(rep(seqInput,length(fivermerStartPos)),(fivermerStartPos),(fivermerStartPos+4))
+
+ #Background
+ for(fivemerIndex in 1:fivemerLength){
+ fivemer = fivemerGL[fivemerIndex]
+ if(!any(grep("N",fivemer))){
+ fivemerCodonPos = fivemerCodon(fivemerIndex)
+ fivemerReadingFrameCodon = substr(fivemer,fivemerCodonPos[1],fivemerCodonPos[3])
+ fivemerReadingFrameCodonInputSeq = substr(fivemerSeq[fivemerIndex],fivemerCodonPos[1],fivemerCodonPos[3])
+
+ # All mutations model
+ #if(!any(grep("N",fivemerReadingFrameCodon))){
+ if(model==0){
+ if(stopMutations==0){
+ if(!any(grep("N",fivemerReadingFrameCodonInputSeq)))
+ BackgroundMatrix[fivemer] <- (BackgroundMatrix[fivemer] + 1)
+ }else{
+ if( !any(grep("N",fivemerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(fivemerReadingFrameCodon)!="*" ){
+ positionWithinCodon = which(fivemerCodonPos==3)#positionsWithinCodon[(fivemerCodonPos[1]%%3)+1]
+ BackgroundMatrix[fivemer] <- (BackgroundMatrix[fivemer] + probNonStopMutations[fivemerReadingFrameCodon,positionWithinCodon])
+ }
+ }
+ }else{ # Only silent mutations
+ if( !any(grep("N",fivemerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(fivemerReadingFrameCodon)!="*" & translateCodonToAminoAcid(fivemerReadingFrameCodonInputSeq)==translateCodonToAminoAcid(fivemerReadingFrameCodon)){
+ positionWithinCodon = which(fivemerCodonPos==3)
+ BackgroundMatrix[fivemer] <- (BackgroundMatrix[fivemer] + probSMutations[fivemerReadingFrameCodon,positionWithinCodon])
+ }
+ }
+ #}
+ }
+ }
+
+ #Mutations
+ if(stopMutations==1) mutationCount = mutationCount[mutationCount!="Stop"]
+ if(model==1) mutationCount = mutationCount[mutationCount=="S"]
+ mutationPositions = as.numeric(names(mutationCount))
+ mutationCount = mutationCount[mutationPositions>2 & mutationPositions<(seqLength-1)]
+ mutationPositions = mutationPositions[mutationPositions>2 & mutationPositions<(seqLength-1)]
+ countMutations = 0
+ for(mutationPosition in mutationPositions){
+ fivemerIndex = mutationPosition-2
+ fivemer = fivemerSeq[fivemerIndex]
+ GLfivemer = fivemerGL[fivemerIndex]
+ fivemerCodonPos = fivemerCodon(fivemerIndex)
+ fivemerReadingFrameCodon = substr(fivemer,fivemerCodonPos[1],fivemerCodonPos[3])
+ fivemerReadingFrameCodonGL = substr(GLfivemer,fivemerCodonPos[1],fivemerCodonPos[3])
+ if(!any(grep("N",fivemer)) & !any(grep("N",GLfivemer))){
+ if(model==0){
+ countMutations = countMutations + 1
+ MutationMatrix[GLfivemer] <- (MutationMatrix[GLfivemer] + 1)
+ MutationCountMatrix[GLfivemer] <- (MutationCountMatrix[GLfivemer] + 1)
+ }else{
+ if( translateCodonToAminoAcid(fivemerReadingFrameCodonGL)!="*" ){
+ countMutations = countMutations + 1
+ positionWithinCodon = which(fivemerCodonPos==3)
+ glNuc = substr(fivemerReadingFrameCodonGL,positionWithinCodon,positionWithinCodon)
+ inputNuc = substr(fivemerReadingFrameCodon,positionWithinCodon,positionWithinCodon)
+ MutationMatrix[GLfivemer] <- (MutationMatrix[GLfivemer] + substitution[glNuc,inputNuc])
+ MutationCountMatrix[GLfivemer] <- (MutationCountMatrix[GLfivemer] + 1)
+ }
+ }
+ }
+ }
+
+ seqMutability = MutationMatrix/BackgroundMatrix
+ seqMutability = seqMutability/sum(seqMutability,na.rm=TRUE)
+ #cat(inputMatrixIndex,"\t",countMutations,"\n")
+ return(list("seqMutability" = seqMutability,"numbMutations" = countMutations,"seqMutabilityCount" = MutationCountMatrix, "BackgroundMatrix"=BackgroundMatrix))
+
+ }
+ }
+
+ }
+
+ #Returns the codon position containing the middle nucleotide
+ fivemerCodon <- function(fivemerIndex){
+ codonPos = list(2:4,1:3,3:5)
+ fivemerType = fivemerIndex%%3
+ return(codonPos[[fivemerType+1]])
+ }
+
+ #returns probability values for one mutation in codons resulting in R, S or Stop
+ probMutations <- function(typeOfMutation){
+ matMutationProb <- matrix(0,ncol=3,nrow=125,dimnames=list(words(alphabet = c(NUCLEOTIDES,"N"), length=3),c(1:3)))
+ for(codon in rownames(matMutationProb)){
+ if( !any(grep("N",codon)) ){
+ for(muPos in 1:3){
+ matCodon = matrix(rep(s2c(codon),3),nrow=3,ncol=3,byrow=T)
+ glNuc = matCodon[1,muPos]
+ matCodon[,muPos] = canMutateTo(glNuc)
+ substitutionRate = substitution[glNuc,matCodon[,muPos]]
+ typeOfMutations = apply(rbind(rep(codon,3),apply(matCodon,1,c2s)),2,function(x){mutationType(c2s(x[1]),c2s(x[2]))})
+ matMutationProb[codon,muPos] <- sum(substitutionRate[typeOfMutations==typeOfMutation])
+ }
+ }
+ }
+
+ return(matMutationProb)
+ }
+
+
+
+
+#Mapping Trinucleotides to fivemers
+mapTriToFivemer <- function(triMutability=triMutability_Literature_Human){
+ rownames(triMutability) <- triMutability_Names
+ Fivemer<-rep(NA,1024)
+ names(Fivemer)<-words(alphabet=NUCLEOTIDES,length=5)
+ Fivemer<-sapply(names(Fivemer),function(Word)return(sum( c(triMutability[substring(Word,3,5),1],triMutability[substring(Word,2,4),2],triMutability[substring(Word,1,3),3]),na.rm=TRUE)))
+ Fivemer<-Fivemer/sum(Fivemer)
+ return(Fivemer)
+}
+
+collapseFivemerToTri<-function(Fivemer,Weights=MutabilityWeights,position=1,NUC="A"){
+ Indices<-substring(names(Fivemer),3,3)==NUC
+ Factors<-substring(names(Fivemer[Indices]),(4-position),(6-position))
+ tapply(which(Indices),Factors,function(i)weighted.mean(Fivemer[i],Weights[i],na.rm=TRUE))
+}
+
+
+
+CountFivemerToTri<-function(Fivemer,Weights=MutabilityWeights,position=1,NUC="A"){
+ Indices<-substring(names(Fivemer),3,3)==NUC
+ Factors<-substring(names(Fivemer[Indices]),(4-position),(6-position))
+ tapply(which(Indices),Factors,function(i)sum(Weights[i],na.rm=TRUE))
+}
+
+#Uses the real counts of the mutated fivemers
+CountFivemerToTri2<-function(Fivemer,Counts=MutabilityCounts,position=1,NUC="A"){
+ Indices<-substring(names(Fivemer),3,3)==NUC
+ Factors<-substring(names(Fivemer[Indices]),(4-position),(6-position))
+ tapply(which(Indices),Factors,function(i)sum(Counts[i],na.rm=TRUE))
+}
+
+bootstrap<-function(x=c(33,12,21),M=10000,alpha=0.05){
+N<-sum(x)
+if(N){
+p<-x/N
+k<-length(x)-1
+tmp<-rmultinom(M, size = N, prob=p)
+tmp_p<-apply(tmp,2,function(y)y/N)
+(apply(tmp_p,1,function(y)quantile(y,c(alpha/2/k,1-alpha/2/k))))
+}
+else return(matrix(0,2,length(x)))
+}
+
+
+
+
+bootstrap2<-function(x=c(33,12,21),n=10,M=10000,alpha=0.05){
+
+N<-sum(x)
+k<-length(x)
+y<-rep(1:k,x)
+tmp<-sapply(1:M,function(i)sample(y,n))
+if(n>1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[,j]==i)))/n
+if(n==1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[j]==i)))/n
+(apply(tmp_p,1,function(z)quantile(z,c(alpha/2/(k-1),1-alpha/2/(k-1)))))
+}
+
+
+
+p_value<-function(x=c(33,12,21),M=100000,x_obs=c(2,5,3)){
+n=sum(x_obs)
+N<-sum(x)
+k<-length(x)
+y<-rep(1:k,x)
+tmp<-sapply(1:M,function(i)sample(y,n))
+if(n>1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[,j]==i)))
+if(n==1)tmp_p<-sapply(1:M,function(j)sapply(1:k,function(i)sum(tmp[j]==i)))
+tmp<-rbind(sapply(1:3,function(i)sum(tmp_p[i,]>=x_obs[i])/M),
+sapply(1:3,function(i)sum(tmp_p[i,]<=x_obs[i])/M))
+sapply(1:3,function(i){if(tmp[1,i]>=tmp[2,i])return(-tmp[2,i])else return(tmp[1,i])})
+}
+
+#"D:\\Sequences\\IMGT Germlines\\Human_SNPless_IGHJ.FASTA"
+# Remove SNPs from IMGT germline segment alleles
+generateUnambiguousRepertoire <- function(repertoireInFile,repertoireOutFile){
+ repertoireIn <- read.fasta(repertoireInFile, seqtype="DNA",as.string=T,set.attributes=F,forceDNAtolower=F)
+ alleleNames <- sapply(names(repertoireIn),function(x)strsplit(x,"|",fixed=TRUE)[[1]][2])
+ SNPs <- tapply(repertoireIn,sapply(alleleNames,function(x)strsplit(x,"*",fixed=TRUE)[[1]][1]),function(x){
+ Indices<-NULL
+ for(i in 1:length(x)){
+ firstSeq = s2c(x[[1]])
+ iSeq = s2c(x[[i]])
+ Indices<-c(Indices,which(firstSeq[1:320]!=iSeq[1:320] & firstSeq[1:320]!="." & iSeq[1:320]!="." ))
+ }
+ return(sort(unique(Indices)))
+ })
+ repertoireOut <- repertoireIn
+ repertoireOut <- lapply(names(repertoireOut), function(repertoireName){
+ alleleName <- strsplit(repertoireName,"|",fixed=TRUE)[[1]][2]
+ geneSegmentName <- strsplit(alleleName,"*",fixed=TRUE)[[1]][1]
+ alleleSeq <- s2c(repertoireOut[[repertoireName]])
+ alleleSeq[as.numeric(unlist(SNPs[geneSegmentName]))] <- "N"
+ alleleSeq <- c2s(alleleSeq)
+ repertoireOut[[repertoireName]] <- alleleSeq
+ })
+ names(repertoireOut) <- names(repertoireIn)
+ write.fasta(repertoireOut,names(repertoireOut),file.out=repertoireOutFile)
+
+}
+
+
+
+
+
+
+############
+groupBayes2 = function(indexes, param_resultMat){
+
+ BayesGDist_Focused_CDR = calculate_bayesG( x=param_resultMat[indexes,1], N=apply(param_resultMat[indexes,c(1,2,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[1]/(x[1]+x[2]+x[4])}))
+ BayesGDist_Focused_FWR = calculate_bayesG( x=param_resultMat[indexes,3], N=apply(param_resultMat[indexes,c(3,2,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[3]/(x[3]+x[2]+x[4])}))
+ #BayesGDist_Local_CDR = calculate_bayesG( x=param_resultMat[indexes,1], N=apply(param_resultMat[indexes,c(1,2)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[1]/(x[1]+x[2])}))
+ #BayesGDist_Local_FWR = calculate_bayesG( x=param_resultMat[indexes,3], N=apply(param_resultMat[indexes,c(3,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[3]/(x[3]+x[4])}))
+ #BayesGDist_Global_CDR = calculate_bayesG( x=param_resultMat[indexes,1], N=apply(param_resultMat[indexes,c(1,2,3,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[1]/(x[1]+x[2]+x[3]+x[4])}))
+ #BayesGDist_Global_FWR = calculate_bayesG( x=param_resultMat[indexes,3], N=apply(param_resultMat[indexes,c(1,2,3,4)],1,sum,na.rm=T), p=apply(param_resultMat[indexes,5:8],1,function(x){x[3]/(x[1]+x[2]+x[3]+x[4])}))
+ return ( list("BayesGDist_Focused_CDR"=BayesGDist_Focused_CDR,
+ "BayesGDist_Focused_FWR"=BayesGDist_Focused_FWR) )
+ #"BayesGDist_Local_CDR"=BayesGDist_Local_CDR,
+ #"BayesGDist_Local_FWR" = BayesGDist_Local_FWR))
+# "BayesGDist_Global_CDR" = BayesGDist_Global_CDR,
+# "BayesGDist_Global_FWR" = BayesGDist_Global_FWR) )
+
+
+}
+
+
+calculate_bayesG <- function( x=array(), N=array(), p=array(), max_sigma=20, length_sigma=4001){
+ G <- max(length(x),length(N),length(p))
+ x=array(x,dim=G)
+ N=array(N,dim=G)
+ p=array(p,dim=G)
+
+ indexOfZero = N>0 & p>0
+ N = N[indexOfZero]
+ x = x[indexOfZero]
+ p = p[indexOfZero]
+ G <- length(x)
+
+ if(G){
+
+ cons<-array( dim=c(length_sigma,G) )
+ if(G==1) {
+ return(calculate_bayes(x=x[G],N=N[G],p=p[G],max_sigma=max_sigma,length_sigma=length_sigma))
+ }
+ else {
+ for(g in 1:G) cons[,g] <- calculate_bayes(x=x[g],N=N[g],p=p[g],max_sigma=max_sigma,length_sigma=length_sigma)
+ listMatG <- convolutionPowersOfTwoByTwos(cons,length_sigma=length_sigma)
+ y<-calculate_bayesGHelper(listMatG,length_sigma=length_sigma)
+ return( y/sum(y)/(2*max_sigma/(length_sigma-1)) )
+ }
+ }else{
+ return(NA)
+ }
+}
+
+
+calculate_bayesGHelper <- function( listMatG,length_sigma=4001 ){
+ matG <- listMatG[[1]]
+ groups <- listMatG[[2]]
+ i = 1
+ resConv <- matG[,i]
+ denom <- 2^groups[i]
+ if(length(groups)>1){
+ while( i0)) ){
+
+# ONEmerStartPos = 1:(seqLength)
+# ONEmerLength <- length(ONEmerStartPos)
+ ONEmerGL <- s2c(seqGL)
+ ONEmerSeq <- s2c(seqInput)
+
+ #Background
+ for(ONEmerIndex in 1:seqLength){
+ ONEmer = ONEmerGL[ONEmerIndex]
+ if(ONEmer!="N"){
+ ONEmerCodonPos = getCodonPos(ONEmerIndex)
+ ONEmerReadingFrameCodon = c2s(ONEmerGL[ONEmerCodonPos])
+ ONEmerReadingFrameCodonInputSeq = c2s(ONEmerSeq[ONEmerCodonPos] )
+
+ # All mutations model
+ #if(!any(grep("N",ONEmerReadingFrameCodon))){
+ if(model==0){
+ if(stopMutations==0){
+ if(!any(grep("N",ONEmerReadingFrameCodonInputSeq)))
+ BackgroundMatrix[ONEmer] <- (BackgroundMatrix[ONEmer] + 1)
+ }else{
+ if( !any(grep("N",ONEmerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(ONEmerReadingFrameCodonInputSeq)!="*"){
+ positionWithinCodon = which(ONEmerCodonPos==ONEmerIndex)#positionsWithinCodon[(ONEmerCodonPos[1]%%3)+1]
+ BackgroundMatrix[ONEmer] <- (BackgroundMatrix[ONEmer] + probNonStopMutations[ONEmerReadingFrameCodon,positionWithinCodon])
+ }
+ }
+ }else{ # Only silent mutations
+ if( !any(grep("N",ONEmerReadingFrameCodonInputSeq)) & translateCodonToAminoAcid(ONEmerReadingFrameCodonInputSeq)!="*" & translateCodonToAminoAcid(ONEmerReadingFrameCodonInputSeq)==translateCodonToAminoAcid(ONEmerReadingFrameCodon) ){
+ positionWithinCodon = which(ONEmerCodonPos==ONEmerIndex)
+ BackgroundMatrix[ONEmer] <- (BackgroundMatrix[ONEmer] + probSMutations[ONEmerReadingFrameCodon,positionWithinCodon])
+ }
+ }
+ }
+ }
+ }
+
+ #Mutations
+ if(stopMutations==1) mutationCount = mutationCount[mutationCount!="Stop"]
+ if(model==1) mutationCount = mutationCount[mutationCount=="S"]
+ mutationPositions = as.numeric(names(mutationCount))
+ mutationCount = mutationCount[mutationPositions>2 & mutationPositions<(seqLength-1)]
+ mutationPositions = mutationPositions[mutationPositions>2 & mutationPositions<(seqLength-1)]
+ countMutations = 0
+ for(mutationPosition in mutationPositions){
+ ONEmerIndex = mutationPosition
+ ONEmer = ONEmerSeq[ONEmerIndex]
+ GLONEmer = ONEmerGL[ONEmerIndex]
+ ONEmerCodonPos = getCodonPos(ONEmerIndex)
+ ONEmerReadingFrameCodon = c2s(ONEmerSeq[ONEmerCodonPos])
+ ONEmerReadingFrameCodonGL =c2s(ONEmerGL[ONEmerCodonPos])
+ if(!any(grep("N",ONEmer)) & !any(grep("N",GLONEmer))){
+ if(model==0){
+ countMutations = countMutations + 1
+ MutationMatrix[GLONEmer] <- (MutationMatrix[GLONEmer] + 1)
+ MutationCountMatrix[GLONEmer] <- (MutationCountMatrix[GLONEmer] + 1)
+ }else{
+ if( translateCodonToAminoAcid(ONEmerReadingFrameCodonGL)!="*" ){
+ countMutations = countMutations + 1
+ positionWithinCodon = which(ONEmerCodonPos==ONEmerIndex)
+ glNuc = substr(ONEmerReadingFrameCodonGL,positionWithinCodon,positionWithinCodon)
+ inputNuc = substr(ONEmerReadingFrameCodon,positionWithinCodon,positionWithinCodon)
+ MutationMatrix[GLONEmer] <- (MutationMatrix[GLONEmer] + substitution[glNuc,inputNuc])
+ MutationCountMatrix[GLONEmer] <- (MutationCountMatrix[GLONEmer] + 1)
+ }
+ }
+ }
+ }
+
+ seqMutability = MutationMatrix/BackgroundMatrix
+ seqMutability = seqMutability/sum(seqMutability,na.rm=TRUE)
+ #cat(inputMatrixIndex,"\t",countMutations,"\n")
+ return(list("seqMutability" = seqMutability,"numbMutations" = countMutations,"seqMutabilityCount" = MutationCountMatrix, "BackgroundMatrix"=BackgroundMatrix))
+# tmp<-list("seqMutability" = seqMutability,"numbMutations" = countMutations,"seqMutabilityCount" = MutationCountMatrix)
+ }
+ }
+
+################
+# $Id: trim.R 989 2006-10-29 15:28:26Z ggorjan $
+
+trim <- function(s, recode.factor=TRUE, ...)
+ UseMethod("trim", s)
+
+trim.default <- function(s, recode.factor=TRUE, ...)
+ s
+
+trim.character <- function(s, recode.factor=TRUE, ...)
+{
+ s <- sub(pattern="^ +", replacement="", x=s)
+ s <- sub(pattern=" +$", replacement="", x=s)
+ s
+}
+
+trim.factor <- function(s, recode.factor=TRUE, ...)
+{
+ levels(s) <- trim(levels(s))
+ if(recode.factor) {
+ dots <- list(x=s, ...)
+ if(is.null(dots$sort)) dots$sort <- sort
+ s <- do.call(what=reorder.factor, args=dots)
+ }
+ s
+}
+
+trim.list <- function(s, recode.factor=TRUE, ...)
+ lapply(s, trim, recode.factor=recode.factor, ...)
+
+trim.data.frame <- function(s, recode.factor=TRUE, ...)
+{
+ s[] <- trim.list(s, recode.factor=recode.factor, ...)
+ s
+}
+#######################################
+# Compute the expected for each sequence-germline pair by codon
+getExpectedIndividualByCodon <- function(matInput){
+if( any(grep("multicore",search())) ){
+ facGL <- factor(matInput[,2])
+ facLevels = levels(facGL)
+ LisGLs_MutabilityU = mclapply(1:length(facLevels), function(x){
+ computeMutabilities(facLevels[x])
+ })
+ facIndex = match(facGL,facLevels)
+
+ LisGLs_Mutability = mclapply(1:nrow(matInput), function(x){
+ cInput = rep(NA,nchar(matInput[x,1]))
+ cInput[s2c(matInput[x,1])!="N"] = 1
+ LisGLs_MutabilityU[[facIndex[x]]] * cInput
+ })
+
+ LisGLs_Targeting = mclapply(1:dim(matInput)[1], function(x){
+ computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
+ })
+
+ LisGLs_MutationTypes = mclapply(1:length(matInput[,2]),function(x){
+ #print(x)
+ computeMutationTypes(matInput[x,2])
+ })
+
+ LisGLs_R_Exp = mclapply(1:nrow(matInput), function(x){
+ Exp_R <- rollapply(as.zoo(1:readEnd),width=3,by=3,
+ function(codonNucs){
+ RPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="R")
+ sum( LisGLs_Targeting[[x]][,codonNucs][RPos], na.rm=T )
+ }
+ )
+ })
+
+ LisGLs_S_Exp = mclapply(1:nrow(matInput), function(x){
+ Exp_S <- rollapply(as.zoo(1:readEnd),width=3,by=3,
+ function(codonNucs){
+ SPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="S")
+ sum( LisGLs_Targeting[[x]][,codonNucs][SPos], na.rm=T )
+ }
+ )
+ })
+
+ Exp_R = matrix(unlist(LisGLs_R_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
+ Exp_S = matrix(unlist(LisGLs_S_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
+ return( list( "Expected_R"=Exp_R, "Expected_S"=Exp_S) )
+ }else{
+ facGL <- factor(matInput[,2])
+ facLevels = levels(facGL)
+ LisGLs_MutabilityU = lapply(1:length(facLevels), function(x){
+ computeMutabilities(facLevels[x])
+ })
+ facIndex = match(facGL,facLevels)
+
+ LisGLs_Mutability = lapply(1:nrow(matInput), function(x){
+ cInput = rep(NA,nchar(matInput[x,1]))
+ cInput[s2c(matInput[x,1])!="N"] = 1
+ LisGLs_MutabilityU[[facIndex[x]]] * cInput
+ })
+
+ LisGLs_Targeting = lapply(1:dim(matInput)[1], function(x){
+ computeTargeting(matInput[x,2],LisGLs_Mutability[[x]])
+ })
+
+ LisGLs_MutationTypes = lapply(1:length(matInput[,2]),function(x){
+ #print(x)
+ computeMutationTypes(matInput[x,2])
+ })
+
+ LisGLs_R_Exp = lapply(1:nrow(matInput), function(x){
+ Exp_R <- rollapply(as.zoo(1:readEnd),width=3,by=3,
+ function(codonNucs){
+ RPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="R")
+ sum( LisGLs_Targeting[[x]][,codonNucs][RPos], na.rm=T )
+ }
+ )
+ })
+
+ LisGLs_S_Exp = lapply(1:nrow(matInput), function(x){
+ Exp_S <- rollapply(as.zoo(1:readEnd),width=3,by=3,
+ function(codonNucs){
+ SPos = which(LisGLs_MutationTypes[[x]][,codonNucs]=="S")
+ sum( LisGLs_Targeting[[x]][,codonNucs][SPos], na.rm=T )
+ }
+ )
+ })
+
+ Exp_R = matrix(unlist(LisGLs_R_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
+ Exp_S = matrix(unlist(LisGLs_S_Exp),nrow=nrow(matInput),ncol=readEnd/3,T)
+ return( list( "Expected_R"=Exp_R, "Expected_S"=Exp_S) )
+ }
+}
+
+# getObservedMutationsByCodon <- function(listMutations){
+# numbSeqs <- length(listMutations)
+# obsMu_R <- matrix(0,nrow=numbSeqs,ncol=readEnd/3,dimnames=list(c(1:numbSeqs),c(1:(readEnd/3))))
+# obsMu_S <- obsMu_R
+# temp <- mclapply(1:length(listMutations), function(i){
+# arrMutations = listMutations[[i]]
+# RPos = as.numeric(names(arrMutations)[arrMutations=="R"])
+# RPos <- sapply(RPos,getCodonNumb)
+# if(any(RPos)){
+# tabR <- table(RPos)
+# obsMu_R[i,as.numeric(names(tabR))] <<- tabR
+# }
+#
+# SPos = as.numeric(names(arrMutations)[arrMutations=="S"])
+# SPos <- sapply(SPos,getCodonNumb)
+# if(any(SPos)){
+# tabS <- table(SPos)
+# obsMu_S[i,names(tabS)] <<- tabS
+# }
+# }
+# )
+# return( list( "Observed_R"=obsMu_R, "Observed_S"=obsMu_S) )
+# }
+
+getObservedMutationsByCodon <- function(listMutations){
+ numbSeqs <- length(listMutations)
+ obsMu_R <- matrix(0,nrow=numbSeqs,ncol=readEnd/3,dimnames=list(c(1:numbSeqs),c(1:(readEnd/3))))
+ obsMu_S <- obsMu_R
+ temp <- lapply(1:length(listMutations), function(i){
+ arrMutations = listMutations[[i]]
+ RPos = as.numeric(names(arrMutations)[arrMutations=="R"])
+ RPos <- sapply(RPos,getCodonNumb)
+ if(any(RPos)){
+ tabR <- table(RPos)
+ obsMu_R[i,as.numeric(names(tabR))] <<- tabR
+ }
+
+ SPos = as.numeric(names(arrMutations)[arrMutations=="S"])
+ SPos <- sapply(SPos,getCodonNumb)
+ if(any(SPos)){
+ tabS <- table(SPos)
+ obsMu_S[i,names(tabS)] <<- tabS
+ }
+ }
+ )
+ return( list( "Observed_R"=obsMu_R, "Observed_S"=obsMu_S) )
+}
+
diff -r 43a1aa648537 -r ba33b94637ca baseline/Baseline_Main.r
--- a/baseline/Baseline_Main.r Thu Dec 07 03:44:38 2017 -0500
+++ b/baseline/Baseline_Main.r Tue Jan 29 03:54:09 2019 -0500
@@ -1,388 +1,388 @@
-#########################################################################################
-# License Agreement
-#
-# THIS WORK IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE
-# ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER
-# APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE
-# OR COPYRIGHT LAW IS PROHIBITED.
-#
-# BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
-# BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED
-# TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN
-# CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
-#
-# BASELIne: Bayesian Estimation of Antigen-Driven Selection in Immunoglobulin Sequences
-# Coded by: Mohamed Uduman & Gur Yaari
-# Copyright 2012 Kleinstein Lab
-# Version: 1.3 (01/23/2014)
-#########################################################################################
-
-op <- options();
-options(showWarnCalls=FALSE, showErrorCalls=FALSE, warn=-1)
-library('seqinr')
-if( F & Sys.info()[1]=="Linux"){
- library("multicore")
-}
-
-# Load functions and initialize global variables
-source("Baseline_Functions.r")
-
-# Initialize parameters with user provided arguments
- arg <- commandArgs(TRUE)
- #arg = c(2,1,5,5,0,1,"1:26:38:55:65:104:116", "test.fasta","","sample")
- #arg = c(1,1,5,5,0,1,"1:38:55:65:104:116:200", "test.fasta","","sample")
- #arg = c(1,1,5,5,1,1,"1:26:38:55:65:104:116", "/home/mu37/Wu/Wu_Cloned_gapped_sequences_D-masked.fasta","/home/mu37/Wu/","Wu")
- testID <- as.numeric(arg[1]) # 1 = Focused, 2 = Local
- species <- as.numeric(arg[2]) # 1 = Human. 2 = Mouse
- substitutionModel <- as.numeric(arg[3]) # 0 = Uniform substitution, 1 = Smith DS et al. 1996, 5 = FiveS
- mutabilityModel <- as.numeric(arg[4]) # 0 = Uniform mutablity, 1 = Tri-nucleotide (Shapiro GS et al. 2002) , 5 = FiveS
- clonal <- as.numeric(arg[5]) # 0 = Independent sequences, 1 = Clonally related, 2 = Clonally related & only non-terminal mutations
- fixIndels <- as.numeric(arg[6]) # 0 = Do nothing, 1 = Try and fix Indels
- region <- as.numeric(strsplit(arg[7],":")[[1]]) # StartPos:LastNucleotideF1:C1:F2:C2:F3:C3
- inputFilePath <- arg[8] # Full path to input file
- outputPath <- arg[9] # Full path to location of output files
- outputID <- arg[10] # ID for session output
-
-
- if(testID==5){
- traitChangeModel <- 1
- if( !is.na(any(arg[11])) ) traitChangeModel <- as.numeric(arg[11]) # 1 <- Chothia 1998
- initializeTraitChange(traitChangeModel)
- }
-
-# Initialize other parameters/variables
-
- # Initialzie the codon table ( definitions of R/S )
- computeCodonTable(testID)
-
- # Initialize
- # Test Name
- testName<-"Focused"
- if(testID==2) testName<-"Local"
- if(testID==3) testName<-"Imbalanced"
- if(testID==4) testName<-"ImbalancedSilent"
-
- # Indel placeholders initialization
- indelPos <- NULL
- delPos <- NULL
- insPos <- NULL
-
- # Initialize in Tranistion & Mutability matrixes
- substitution <- initializeSubstitutionMatrix(substitutionModel,species)
- mutability <- initializeMutabilityMatrix(mutabilityModel,species)
-
- # FWR/CDR boundaries
- flagTrim <- F
- if( is.na(region[7])){
- flagTrim <- T
- region[7]<-region[6]
- }
- readStart = min(region,na.rm=T)
- readEnd = max(region,na.rm=T)
- if(readStart>1){
- region = region - (readStart - 1)
- }
- region_Nuc = c( (region[1]*3-2) , (region[2:7]*3) )
- region_Cod = region
-
- readStart = (readStart*3)-2
- readEnd = (readEnd*3)
-
- FWR_Nuc <- c( rep(TRUE,(region_Nuc[2])),
- rep(FALSE,(region_Nuc[3]-region_Nuc[2])),
- rep(TRUE,(region_Nuc[4]-region_Nuc[3])),
- rep(FALSE,(region_Nuc[5]-region_Nuc[4])),
- rep(TRUE,(region_Nuc[6]-region_Nuc[5])),
- rep(FALSE,(region_Nuc[7]-region_Nuc[6]))
- )
- CDR_Nuc <- (1-FWR_Nuc)
- CDR_Nuc <- as.logical(CDR_Nuc)
- FWR_Nuc_Mat <- matrix( rep(FWR_Nuc,4), ncol=length(FWR_Nuc), nrow=4, byrow=T)
- CDR_Nuc_Mat <- matrix( rep(CDR_Nuc,4), ncol=length(CDR_Nuc), nrow=4, byrow=T)
-
- FWR_Codon <- c( rep(TRUE,(region[2])),
- rep(FALSE,(region[3]-region[2])),
- rep(TRUE,(region[4]-region[3])),
- rep(FALSE,(region[5]-region[4])),
- rep(TRUE,(region[6]-region[5])),
- rep(FALSE,(region[7]-region[6]))
- )
- CDR_Codon <- (1-FWR_Codon)
- CDR_Codon <- as.logical(CDR_Codon)
-
-
-# Read input FASTA file
- tryCatch(
- inputFASTA <- baseline.read.fasta(inputFilePath, seqtype="DNA",as.string=T,set.attributes=F,forceDNAtolower=F)
- , error = function(ex){
- cat("Error|Error reading input. Please enter or upload a valid FASTA file.\n")
- q()
- }
- )
-
- if (length(inputFASTA)==1) {
- cat("Error|Error reading input. Please enter or upload a valid FASTA file.\n")
- q()
- }
-
- # Process sequence IDs/names
- names(inputFASTA) <- sapply(names(inputFASTA),function(x){trim(x)})
-
- # Convert non nucleotide characters to N
- inputFASTA[length(inputFASTA)] = gsub("\t","",inputFASTA[length(inputFASTA)])
- inputFASTA <- lapply(inputFASTA,replaceNonFASTAChars)
-
- # Process the FASTA file and conver to Matrix[inputSequence, germlineSequence]
- processedInput <- processInputAdvanced(inputFASTA)
- matInput <- processedInput[[1]]
- germlines <- processedInput[[2]]
- lenGermlines = length(unique(germlines))
- groups <- processedInput[[3]]
- lenGroups = length(unique(groups))
- rm(processedInput)
- rm(inputFASTA)
-
-# # remove clones with less than 2 seqeunces
-# tableGL <- table(germlines)
-# singletons <- which(tableGL<8)
-# rowsToRemove <- match(singletons,germlines)
-# if(any(rowsToRemove)){
-# matInput <- matInput[-rowsToRemove,]
-# germlines <- germlines[-rowsToRemove]
-# groups <- groups[-rowsToRemove]
-# }
-#
-# # remove unproductive seqs
-# nonFuctionalSeqs <- sapply(rownames(matInput),function(x){any(grep("unproductive",x))})
-# if(any(nonFuctionalSeqs)){
-# if(sum(nonFuctionalSeqs)==length(germlines)){
-# write.table("Unproductive",file=paste(outputPath,outputID,".txt",sep=""),quote=F,sep="\t",row.names=F,col.names=T)
-# q()
-# }
-# matInput <- matInput[-which(nonFuctionalSeqs),]
-# germlines <- germlines[-which(nonFuctionalSeqs)]
-# germlines[1:length(germlines)] <- 1:length(germlines)
-# groups <- groups[-which(nonFuctionalSeqs)]
-# }
-#
-# if(class(matInput)=="character"){
-# write.table("All unproductive seqs",file=paste(outputPath,outputID,".txt",sep=""),quote=F,sep="\t",row.names=F,col.names=T)
-# q()
-# }
-#
-# if(nrow(matInput)<10 | is.null(nrow(matInput))){
-# write.table(paste(nrow(matInput), "seqs only",sep=""),file=paste(outputPath,outputID,".txt",sep=""),quote=F,sep="\t",row.names=F,col.names=T)
-# q()
-# }
-
-# replace leading & trailing "-" with "N:
- matInput <- t(apply(matInput,1,replaceLeadingTrailingDashes,readEnd))
-
- # Trim (nucleotide) input sequences to the last codon
- #matInput[,1] <- apply(matrix(matInput[,1]),1,trimToLastCodon)
-
-# # Check for Indels
-# if(fixIndels){
-# delPos <- fixDeletions(matInput)
-# insPos <- fixInsertions(matInput)
-# }else{
-# # Check for indels
-# indelPos <- checkForInDels(matInput)
-# indelPos <- apply(cbind(indelPos[[1]],indelPos[[2]]),1,function(x){(x[1]==T & x[2]==T)})
-# }
-
- # If indels are present, remove mutations in the seqeunce & throw warning at end
- #matInput[indelPos,] <- apply(matrix(matInput[indelPos,],nrow=sum(indelPos),ncol=2),1,function(x){x[1]=x[2]; return(x) })
-
- colnames(matInput)=c("Input","Germline")
-
- # If seqeunces are clonal, create effective sequence for each clone & modify germline/group definitions
- germlinesOriginal = NULL
- if(clonal){
- germlinesOriginal <- germlines
- collapseCloneResults <- tapply(1:nrow(matInput),germlines,function(i){
- collapseClone(matInput[i,1],matInput[i[1],2],readEnd,nonTerminalOnly=(clonal-1))
- })
- matInput = t(sapply(collapseCloneResults,function(x){return(x[[1]])}))
- names_groups = tapply(groups,germlines,function(x){names(x[1])})
- groups = tapply(groups,germlines,function(x){array(x[1],dimnames=names(x[1]))})
- names(groups) = names_groups
-
- names_germlines = tapply(germlines,germlines,function(x){names(x[1])})
- germlines = tapply( germlines,germlines,function(x){array(x[1],dimnames=names(x[1]))} )
- names(germlines) = names_germlines
- matInputErrors = sapply(collapseCloneResults,function(x){return(x[[2]])})
- }
-
-
-# Selection Analysis
-
-
-# if (length(germlines)>sequenceLimit) {
-# # Code to parallelize processing goes here
-# stop( paste("Error: Cannot process more than ", Upper_limit," sequences",sep="") )
-# }
-
-# if (length(germlines)1){
- groups <- c(groups,lenGroups+1)
- names(groups)[length(groups)] = "All sequences combined"
- bayesPDF_groups_cdr[[lenGroups+1]] = groupPosteriors(bayesPDF_groups_cdr,length_sigma=4001)
- bayesPDF_groups_fwr[[lenGroups+1]] = groupPosteriors(bayesPDF_groups_fwr,length_sigma=4001)
- }
-
- #Bayesian Outputs
- bayes_cdr = t(sapply(bayesPDF_cdr,calcBayesOutputInfo))
- bayes_fwr = t(sapply(bayesPDF_fwr,calcBayesOutputInfo))
- bayes_germlines_cdr = t(sapply(bayesPDF_germlines_cdr,calcBayesOutputInfo))
- bayes_germlines_fwr = t(sapply(bayesPDF_germlines_fwr,calcBayesOutputInfo))
- bayes_groups_cdr = t(sapply(bayesPDF_groups_cdr,calcBayesOutputInfo))
- bayes_groups_fwr = t(sapply(bayesPDF_groups_fwr,calcBayesOutputInfo))
-
- #P-values
- simgaP_cdr = sapply(bayesPDF_cdr,computeSigmaP)
- simgaP_fwr = sapply(bayesPDF_fwr,computeSigmaP)
-
- simgaP_germlines_cdr = sapply(bayesPDF_germlines_cdr,computeSigmaP)
- simgaP_germlines_fwr = sapply(bayesPDF_germlines_fwr,computeSigmaP)
-
- simgaP_groups_cdr = sapply(bayesPDF_groups_cdr,computeSigmaP)
- simgaP_groups_fwr = sapply(bayesPDF_groups_fwr,computeSigmaP)
-
-
- #Format output
-
- # Round expected mutation frequencies to 3 decimal places
- matMutationInfo[germlinesOriginal[indelPos],] = NA
- if(nrow(matMutationInfo)==1){
- matMutationInfo[5:8] = round(matMutationInfo[,5:8]/sum(matMutationInfo[,5:8],na.rm=T),3)
- }else{
- matMutationInfo[,5:8] = t(round(apply(matMutationInfo[,5:8],1,function(x){ return(x/sum(x,na.rm=T)) }),3))
- }
-
- listPDFs = list()
- nRows = length(unique(groups)) + length(unique(germlines)) + length(groups)
-
- matOutput = matrix(NA,ncol=18,nrow=nRows)
- rowNumb = 1
- for(G in unique(groups)){
- #print(G)
- matOutput[rowNumb,c(1,2,11:18)] = c("Group",names(groups)[groups==G][1],bayes_groups_cdr[G,],bayes_groups_fwr[G,],simgaP_groups_cdr[G],simgaP_groups_fwr[G])
- listPDFs[[rowNumb]] = list("CDR"=bayesPDF_groups_cdr[[G]],"FWR"=bayesPDF_groups_fwr[[G]])
- names(listPDFs)[rowNumb] = names(groups[groups==paste(G)])[1]
- #if(names(groups)[which(groups==G)[1]]!="All sequences combined"){
- gs = unique(germlines[groups==G])
- rowNumb = rowNumb+1
- if( !is.na(gs) ){
- for( g in gs ){
- matOutput[rowNumb,c(1,2,11:18)] = c("Germline",names(germlines)[germlines==g][1],bayes_germlines_cdr[g,],bayes_germlines_fwr[g,],simgaP_germlines_cdr[g],simgaP_germlines_fwr[g])
- listPDFs[[rowNumb]] = list("CDR"=bayesPDF_germlines_cdr[[g]],"FWR"=bayesPDF_germlines_fwr[[g]])
- names(listPDFs)[rowNumb] = names(germlines[germlines==paste(g)])[1]
- rowNumb = rowNumb+1
- indexesOfInterest = which(germlines==g)
- numbSeqsOfInterest = length(indexesOfInterest)
- rowNumb = seq(rowNumb,rowNumb+(numbSeqsOfInterest-1))
- matOutput[rowNumb,] = matrix( c( rep("Sequence",numbSeqsOfInterest),
- rownames(matInput)[indexesOfInterest],
- c(matMutationInfo[indexesOfInterest,1:4]),
- c(matMutationInfo[indexesOfInterest,5:8]),
- c(bayes_cdr[indexesOfInterest,]),
- c(bayes_fwr[indexesOfInterest,]),
- c(simgaP_cdr[indexesOfInterest]),
- c(simgaP_fwr[indexesOfInterest])
- ), ncol=18, nrow=numbSeqsOfInterest,byrow=F)
- increment=0
- for( ioi in indexesOfInterest){
- listPDFs[[min(rowNumb)+increment]] = list("CDR"=bayesPDF_cdr[[ioi]] , "FWR"=bayesPDF_fwr[[ioi]])
- names(listPDFs)[min(rowNumb)+increment] = rownames(matInput)[ioi]
- increment = increment + 1
- }
- rowNumb=max(rowNumb)+1
-
- }
- }
- }
- colsToFormat = 11:18
- matOutput[,colsToFormat] = formatC( matrix(as.numeric(matOutput[,colsToFormat]), nrow=nrow(matOutput), ncol=length(colsToFormat)) , digits=3)
- matOutput[matOutput== " NaN"] = NA
-
-
-
- colnames(matOutput) = c("Type", "ID", "Observed_CDR_R", "Observed_CDR_S", "Observed_FWR_R", "Observed_FWR_S",
- "Expected_CDR_R", "Expected_CDR_S", "Expected_FWR_R", "Expected_FWR_S",
- paste( rep(testName,6), rep(c("Sigma","CIlower","CIupper"),2),rep(c("CDR","FWR"),each=3), sep="_"),
- paste( rep(testName,2), rep("P",2),c("CDR","FWR"), sep="_")
- )
- fileName = paste(outputPath,outputID,".txt",sep="")
- write.table(matOutput,file=fileName,quote=F,sep="\t",row.names=T,col.names=NA)
- fileName = paste(outputPath,outputID,".RData",sep="")
- save(listPDFs,file=fileName)
-
-indelWarning = FALSE
-if(sum(indelPos)>0){
- indelWarning = "Warning: The following sequences have either gaps and/or deletions, and have been ommited from the analysis.";
- indelWarning = paste( indelWarning , "
", sep="" )
- for(indels in names(indelPos)[indelPos]){
- indelWarning = paste( indelWarning , "- ", indels, "
", sep="" )
- }
- indelWarning = paste( indelWarning , "
", sep="" )
-}
-
-cloneWarning = FALSE
-if(clonal==1){
- if(sum(matInputErrors)>0){
- cloneWarning = "Warning: The following clones have sequences of unequal length.";
- cloneWarning = paste( cloneWarning , "
", sep="" )
- for(clone in names(matInputErrors)[matInputErrors]){
- cloneWarning = paste( cloneWarning , "- ", names(germlines)[as.numeric(clone)], "
", sep="" )
- }
- cloneWarning = paste( cloneWarning , "
", sep="" )
- }
-}
-cat(paste("Success",outputID,indelWarning,cloneWarning,sep="|"))
+#########################################################################################
+# License Agreement
+#
+# THIS WORK IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE
+# ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER
+# APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE
+# OR COPYRIGHT LAW IS PROHIBITED.
+#
+# BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
+# BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED
+# TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN
+# CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+#
+# BASELIne: Bayesian Estimation of Antigen-Driven Selection in Immunoglobulin Sequences
+# Coded by: Mohamed Uduman & Gur Yaari
+# Copyright 2012 Kleinstein Lab
+# Version: 1.3 (01/23/2014)
+#########################################################################################
+
+op <- options();
+options(showWarnCalls=FALSE, showErrorCalls=FALSE, warn=-1)
+library('seqinr')
+if( F & Sys.info()[1]=="Linux"){
+ library("multicore")
+}
+
+# Load functions and initialize global variables
+source("Baseline_Functions.r")
+
+# Initialize parameters with user provided arguments
+ arg <- commandArgs(TRUE)
+ #arg = c(2,1,5,5,0,1,"1:26:38:55:65:104:116", "test.fasta","","sample")
+ #arg = c(1,1,5,5,0,1,"1:38:55:65:104:116:200", "test.fasta","","sample")
+ #arg = c(1,1,5,5,1,1,"1:26:38:55:65:104:116", "/home/mu37/Wu/Wu_Cloned_gapped_sequences_D-masked.fasta","/home/mu37/Wu/","Wu")
+ testID <- as.numeric(arg[1]) # 1 = Focused, 2 = Local
+ species <- as.numeric(arg[2]) # 1 = Human. 2 = Mouse
+ substitutionModel <- as.numeric(arg[3]) # 0 = Uniform substitution, 1 = Smith DS et al. 1996, 5 = FiveS
+ mutabilityModel <- as.numeric(arg[4]) # 0 = Uniform mutablity, 1 = Tri-nucleotide (Shapiro GS et al. 2002) , 5 = FiveS
+ clonal <- as.numeric(arg[5]) # 0 = Independent sequences, 1 = Clonally related, 2 = Clonally related & only non-terminal mutations
+ fixIndels <- as.numeric(arg[6]) # 0 = Do nothing, 1 = Try and fix Indels
+ region <- as.numeric(strsplit(arg[7],":")[[1]]) # StartPos:LastNucleotideF1:C1:F2:C2:F3:C3
+ inputFilePath <- arg[8] # Full path to input file
+ outputPath <- arg[9] # Full path to location of output files
+ outputID <- arg[10] # ID for session output
+
+
+ if(testID==5){
+ traitChangeModel <- 1
+ if( !is.na(any(arg[11])) ) traitChangeModel <- as.numeric(arg[11]) # 1 <- Chothia 1998
+ initializeTraitChange(traitChangeModel)
+ }
+
+# Initialize other parameters/variables
+
+ # Initialzie the codon table ( definitions of R/S )
+ computeCodonTable(testID)
+
+ # Initialize
+ # Test Name
+ testName<-"Focused"
+ if(testID==2) testName<-"Local"
+ if(testID==3) testName<-"Imbalanced"
+ if(testID==4) testName<-"ImbalancedSilent"
+
+ # Indel placeholders initialization
+ indelPos <- NULL
+ delPos <- NULL
+ insPos <- NULL
+
+ # Initialize in Tranistion & Mutability matrixes
+ substitution <- initializeSubstitutionMatrix(substitutionModel,species)
+ mutability <- initializeMutabilityMatrix(mutabilityModel,species)
+
+ # FWR/CDR boundaries
+ flagTrim <- F
+ if( is.na(region[7])){
+ flagTrim <- T
+ region[7]<-region[6]
+ }
+ readStart = min(region,na.rm=T)
+ readEnd = max(region,na.rm=T)
+ if(readStart>1){
+ region = region - (readStart - 1)
+ }
+ region_Nuc = c( (region[1]*3-2) , (region[2:7]*3) )
+ region_Cod = region
+
+ readStart = (readStart*3)-2
+ readEnd = (readEnd*3)
+
+ FWR_Nuc <- c( rep(TRUE,(region_Nuc[2])),
+ rep(FALSE,(region_Nuc[3]-region_Nuc[2])),
+ rep(TRUE,(region_Nuc[4]-region_Nuc[3])),
+ rep(FALSE,(region_Nuc[5]-region_Nuc[4])),
+ rep(TRUE,(region_Nuc[6]-region_Nuc[5])),
+ rep(FALSE,(region_Nuc[7]-region_Nuc[6]))
+ )
+ CDR_Nuc <- (1-FWR_Nuc)
+ CDR_Nuc <- as.logical(CDR_Nuc)
+ FWR_Nuc_Mat <- matrix( rep(FWR_Nuc,4), ncol=length(FWR_Nuc), nrow=4, byrow=T)
+ CDR_Nuc_Mat <- matrix( rep(CDR_Nuc,4), ncol=length(CDR_Nuc), nrow=4, byrow=T)
+
+ FWR_Codon <- c( rep(TRUE,(region[2])),
+ rep(FALSE,(region[3]-region[2])),
+ rep(TRUE,(region[4]-region[3])),
+ rep(FALSE,(region[5]-region[4])),
+ rep(TRUE,(region[6]-region[5])),
+ rep(FALSE,(region[7]-region[6]))
+ )
+ CDR_Codon <- (1-FWR_Codon)
+ CDR_Codon <- as.logical(CDR_Codon)
+
+
+# Read input FASTA file
+ tryCatch(
+ inputFASTA <- baseline.read.fasta(inputFilePath, seqtype="DNA",as.string=T,set.attributes=F,forceDNAtolower=F)
+ , error = function(ex){
+ cat("Error|Error reading input. Please enter or upload a valid FASTA file.\n")
+ q()
+ }
+ )
+
+ if (length(inputFASTA)==1) {
+ cat("Error|Error reading input. Please enter or upload a valid FASTA file.\n")
+ q()
+ }
+
+ # Process sequence IDs/names
+ names(inputFASTA) <- sapply(names(inputFASTA),function(x){trim(x)})
+
+ # Convert non nucleotide characters to N
+ inputFASTA[length(inputFASTA)] = gsub("\t","",inputFASTA[length(inputFASTA)])
+ inputFASTA <- lapply(inputFASTA,replaceNonFASTAChars)
+
+ # Process the FASTA file and conver to Matrix[inputSequence, germlineSequence]
+ processedInput <- processInputAdvanced(inputFASTA)
+ matInput <- processedInput[[1]]
+ germlines <- processedInput[[2]]
+ lenGermlines = length(unique(germlines))
+ groups <- processedInput[[3]]
+ lenGroups = length(unique(groups))
+ rm(processedInput)
+ rm(inputFASTA)
+
+# # remove clones with less than 2 seqeunces
+# tableGL <- table(germlines)
+# singletons <- which(tableGL<8)
+# rowsToRemove <- match(singletons,germlines)
+# if(any(rowsToRemove)){
+# matInput <- matInput[-rowsToRemove,]
+# germlines <- germlines[-rowsToRemove]
+# groups <- groups[-rowsToRemove]
+# }
+#
+# # remove unproductive seqs
+# nonFuctionalSeqs <- sapply(rownames(matInput),function(x){any(grep("unproductive",x))})
+# if(any(nonFuctionalSeqs)){
+# if(sum(nonFuctionalSeqs)==length(germlines)){
+# write.table("Unproductive",file=paste(outputPath,outputID,".txt",sep=""),quote=F,sep="\t",row.names=F,col.names=T)
+# q()
+# }
+# matInput <- matInput[-which(nonFuctionalSeqs),]
+# germlines <- germlines[-which(nonFuctionalSeqs)]
+# germlines[1:length(germlines)] <- 1:length(germlines)
+# groups <- groups[-which(nonFuctionalSeqs)]
+# }
+#
+# if(class(matInput)=="character"){
+# write.table("All unproductive seqs",file=paste(outputPath,outputID,".txt",sep=""),quote=F,sep="\t",row.names=F,col.names=T)
+# q()
+# }
+#
+# if(nrow(matInput)<10 | is.null(nrow(matInput))){
+# write.table(paste(nrow(matInput), "seqs only",sep=""),file=paste(outputPath,outputID,".txt",sep=""),quote=F,sep="\t",row.names=F,col.names=T)
+# q()
+# }
+
+# replace leading & trailing "-" with "N:
+ matInput <- t(apply(matInput,1,replaceLeadingTrailingDashes,readEnd))
+
+ # Trim (nucleotide) input sequences to the last codon
+ #matInput[,1] <- apply(matrix(matInput[,1]),1,trimToLastCodon)
+
+# # Check for Indels
+# if(fixIndels){
+# delPos <- fixDeletions(matInput)
+# insPos <- fixInsertions(matInput)
+# }else{
+# # Check for indels
+# indelPos <- checkForInDels(matInput)
+# indelPos <- apply(cbind(indelPos[[1]],indelPos[[2]]),1,function(x){(x[1]==T & x[2]==T)})
+# }
+
+ # If indels are present, remove mutations in the seqeunce & throw warning at end
+ #matInput[indelPos,] <- apply(matrix(matInput[indelPos,],nrow=sum(indelPos),ncol=2),1,function(x){x[1]=x[2]; return(x) })
+
+ colnames(matInput)=c("Input","Germline")
+
+ # If seqeunces are clonal, create effective sequence for each clone & modify germline/group definitions
+ germlinesOriginal = NULL
+ if(clonal){
+ germlinesOriginal <- germlines
+ collapseCloneResults <- tapply(1:nrow(matInput),germlines,function(i){
+ collapseClone(matInput[i,1],matInput[i[1],2],readEnd,nonTerminalOnly=(clonal-1))
+ })
+ matInput = t(sapply(collapseCloneResults,function(x){return(x[[1]])}))
+ names_groups = tapply(groups,germlines,function(x){names(x[1])})
+ groups = tapply(groups,germlines,function(x){array(x[1],dimnames=names(x[1]))})
+ names(groups) = names_groups
+
+ names_germlines = tapply(germlines,germlines,function(x){names(x[1])})
+ germlines = tapply( germlines,germlines,function(x){array(x[1],dimnames=names(x[1]))} )
+ names(germlines) = names_germlines
+ matInputErrors = sapply(collapseCloneResults,function(x){return(x[[2]])})
+ }
+
+
+# Selection Analysis
+
+
+# if (length(germlines)>sequenceLimit) {
+# # Code to parallelize processing goes here
+# stop( paste("Error: Cannot process more than ", Upper_limit," sequences",sep="") )
+# }
+
+# if (length(germlines)1){
+ groups <- c(groups,lenGroups+1)
+ names(groups)[length(groups)] = "All sequences combined"
+ bayesPDF_groups_cdr[[lenGroups+1]] = groupPosteriors(bayesPDF_groups_cdr,length_sigma=4001)
+ bayesPDF_groups_fwr[[lenGroups+1]] = groupPosteriors(bayesPDF_groups_fwr,length_sigma=4001)
+ }
+
+ #Bayesian Outputs
+ bayes_cdr = t(sapply(bayesPDF_cdr,calcBayesOutputInfo))
+ bayes_fwr = t(sapply(bayesPDF_fwr,calcBayesOutputInfo))
+ bayes_germlines_cdr = t(sapply(bayesPDF_germlines_cdr,calcBayesOutputInfo))
+ bayes_germlines_fwr = t(sapply(bayesPDF_germlines_fwr,calcBayesOutputInfo))
+ bayes_groups_cdr = t(sapply(bayesPDF_groups_cdr,calcBayesOutputInfo))
+ bayes_groups_fwr = t(sapply(bayesPDF_groups_fwr,calcBayesOutputInfo))
+
+ #P-values
+ simgaP_cdr = sapply(bayesPDF_cdr,computeSigmaP)
+ simgaP_fwr = sapply(bayesPDF_fwr,computeSigmaP)
+
+ simgaP_germlines_cdr = sapply(bayesPDF_germlines_cdr,computeSigmaP)
+ simgaP_germlines_fwr = sapply(bayesPDF_germlines_fwr,computeSigmaP)
+
+ simgaP_groups_cdr = sapply(bayesPDF_groups_cdr,computeSigmaP)
+ simgaP_groups_fwr = sapply(bayesPDF_groups_fwr,computeSigmaP)
+
+
+ #Format output
+
+ # Round expected mutation frequencies to 3 decimal places
+ matMutationInfo[germlinesOriginal[indelPos],] = NA
+ if(nrow(matMutationInfo)==1){
+ matMutationInfo[5:8] = round(matMutationInfo[,5:8]/sum(matMutationInfo[,5:8],na.rm=T),3)
+ }else{
+ matMutationInfo[,5:8] = t(round(apply(matMutationInfo[,5:8],1,function(x){ return(x/sum(x,na.rm=T)) }),3))
+ }
+
+ listPDFs = list()
+ nRows = length(unique(groups)) + length(unique(germlines)) + length(groups)
+
+ matOutput = matrix(NA,ncol=18,nrow=nRows)
+ rowNumb = 1
+ for(G in unique(groups)){
+ #print(G)
+ matOutput[rowNumb,c(1,2,11:18)] = c("Group",names(groups)[groups==G][1],bayes_groups_cdr[G,],bayes_groups_fwr[G,],simgaP_groups_cdr[G],simgaP_groups_fwr[G])
+ listPDFs[[rowNumb]] = list("CDR"=bayesPDF_groups_cdr[[G]],"FWR"=bayesPDF_groups_fwr[[G]])
+ names(listPDFs)[rowNumb] = names(groups[groups==paste(G)])[1]
+ #if(names(groups)[which(groups==G)[1]]!="All sequences combined"){
+ gs = unique(germlines[groups==G])
+ rowNumb = rowNumb+1
+ if( !is.na(gs) ){
+ for( g in gs ){
+ matOutput[rowNumb,c(1,2,11:18)] = c("Germline",names(germlines)[germlines==g][1],bayes_germlines_cdr[g,],bayes_germlines_fwr[g,],simgaP_germlines_cdr[g],simgaP_germlines_fwr[g])
+ listPDFs[[rowNumb]] = list("CDR"=bayesPDF_germlines_cdr[[g]],"FWR"=bayesPDF_germlines_fwr[[g]])
+ names(listPDFs)[rowNumb] = names(germlines[germlines==paste(g)])[1]
+ rowNumb = rowNumb+1
+ indexesOfInterest = which(germlines==g)
+ numbSeqsOfInterest = length(indexesOfInterest)
+ rowNumb = seq(rowNumb,rowNumb+(numbSeqsOfInterest-1))
+ matOutput[rowNumb,] = matrix( c( rep("Sequence",numbSeqsOfInterest),
+ rownames(matInput)[indexesOfInterest],
+ c(matMutationInfo[indexesOfInterest,1:4]),
+ c(matMutationInfo[indexesOfInterest,5:8]),
+ c(bayes_cdr[indexesOfInterest,]),
+ c(bayes_fwr[indexesOfInterest,]),
+ c(simgaP_cdr[indexesOfInterest]),
+ c(simgaP_fwr[indexesOfInterest])
+ ), ncol=18, nrow=numbSeqsOfInterest,byrow=F)
+ increment=0
+ for( ioi in indexesOfInterest){
+ listPDFs[[min(rowNumb)+increment]] = list("CDR"=bayesPDF_cdr[[ioi]] , "FWR"=bayesPDF_fwr[[ioi]])
+ names(listPDFs)[min(rowNumb)+increment] = rownames(matInput)[ioi]
+ increment = increment + 1
+ }
+ rowNumb=max(rowNumb)+1
+
+ }
+ }
+ }
+ colsToFormat = 11:18
+ matOutput[,colsToFormat] = formatC( matrix(as.numeric(matOutput[,colsToFormat]), nrow=nrow(matOutput), ncol=length(colsToFormat)) , digits=3)
+ matOutput[matOutput== " NaN"] = NA
+
+
+
+ colnames(matOutput) = c("Type", "ID", "Observed_CDR_R", "Observed_CDR_S", "Observed_FWR_R", "Observed_FWR_S",
+ "Expected_CDR_R", "Expected_CDR_S", "Expected_FWR_R", "Expected_FWR_S",
+ paste( rep(testName,6), rep(c("Sigma","CIlower","CIupper"),2),rep(c("CDR","FWR"),each=3), sep="_"),
+ paste( rep(testName,2), rep("P",2),c("CDR","FWR"), sep="_")
+ )
+ fileName = paste(outputPath,outputID,".txt",sep="")
+ write.table(matOutput,file=fileName,quote=F,sep="\t",row.names=T,col.names=NA)
+ fileName = paste(outputPath,outputID,".RData",sep="")
+ save(listPDFs,file=fileName)
+
+indelWarning = FALSE
+if(sum(indelPos)>0){
+ indelWarning = "Warning: The following sequences have either gaps and/or deletions, and have been ommited from the analysis.";
+ indelWarning = paste( indelWarning , "
", sep="" )
+ for(indels in names(indelPos)[indelPos]){
+ indelWarning = paste( indelWarning , "- ", indels, "
", sep="" )
+ }
+ indelWarning = paste( indelWarning , "
", sep="" )
+}
+
+cloneWarning = FALSE
+if(clonal==1){
+ if(sum(matInputErrors)>0){
+ cloneWarning = "Warning: The following clones have sequences of unequal length.";
+ cloneWarning = paste( cloneWarning , "
", sep="" )
+ for(clone in names(matInputErrors)[matInputErrors]){
+ cloneWarning = paste( cloneWarning , "- ", names(germlines)[as.numeric(clone)], "
", sep="" )
+ }
+ cloneWarning = paste( cloneWarning , "
", sep="" )
+ }
+}
+cat(paste("Success",outputID,indelWarning,cloneWarning,sep="|"))
diff -r 43a1aa648537 -r ba33b94637ca baseline/FiveS_Mutability.RData
diff -r 43a1aa648537 -r ba33b94637ca baseline/FiveS_Substitution.RData
diff -r 43a1aa648537 -r ba33b94637ca baseline/IMGT-reference-seqs-IGHV-2015-11-05.fa
diff -r 43a1aa648537 -r ba33b94637ca baseline/IMGTVHreferencedataset20161215.fa
diff -r 43a1aa648537 -r ba33b94637ca baseline/baseline_url.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/baseline/baseline_url.txt Tue Jan 29 03:54:09 2019 -0500
@@ -0,0 +1,1 @@
+http://selection.med.yale.edu/baseline/
\ No newline at end of file
diff -r 43a1aa648537 -r ba33b94637ca baseline/comparePDFs.r
--- a/baseline/comparePDFs.r Thu Dec 07 03:44:38 2017 -0500
+++ b/baseline/comparePDFs.r Tue Jan 29 03:54:09 2019 -0500
@@ -1,225 +1,225 @@
-options("warn"=-1)
-
-#from http://selection.med.yale.edu/baseline/Archive/Baseline%20Version%201.3/Baseline_Functions_Version1.3.r
-# Compute p-value of two distributions
-compareTwoDistsFaster <-function(sigma_S=seq(-20,20,length.out=4001), N=10000, dens1=runif(4001,0,1), dens2=runif(4001,0,1)){
-#print(c(length(dens1),length(dens2)))
-if(length(dens1)>1 & length(dens2)>1 ){
- dens1<-dens1/sum(dens1)
- dens2<-dens2/sum(dens2)
- cum2 <- cumsum(dens2)-dens2/2
- tmp<- sum(sapply(1:length(dens1),function(i)return(dens1[i]*cum2[i])))
- #print(tmp)
- if(tmp>0.5)tmp<-tmp-1
- return( tmp )
- }
- else {
- return(NA)
- }
- #return (sum(sapply(1:N,function(i)(sample(sigma_S,1,prob=dens1)>sample(sigma_S,1,prob=dens2))))/N)
-}
-
-
-require("grid")
-arg <- commandArgs(TRUE)
-#arg <- c("300143","4","5")
-arg[!arg=="clonal"]
-input <- arg[1]
-output <- arg[2]
-rowIDs <- as.numeric( sapply(arg[3:(max(3,length(arg)))],function(x){ gsub("chkbx","",x) } ) )
-
-numbSeqs = length(rowIDs)
-
-if ( is.na(rowIDs[1]) | numbSeqs>10 ) {
- stop( paste("Error: Please select between one and 10 seqeunces to compare.") )
-}
-
-#load( paste("output/",sessionID,".RData",sep="") )
-load( input )
-#input
-
-xMarks = seq(-20,20,length.out=4001)
-
-plot_grid_s<-function(pdf1,pdf2,Sample=100,cex=1,xlim=NULL,xMarks = seq(-20,20,length.out=4001)){
- yMax = max(c(abs(as.numeric(unlist(listPDFs[pdf1]))),abs(as.numeric(unlist(listPDFs[pdf2]))),0),na.rm=T) * 1.1
-
- if(length(xlim==2)){
- xMin=xlim[1]
- xMax=xlim[2]
- } else {
- xMin_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][1]
- xMin_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][1]
- xMax_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001])]
- xMax_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001])]
-
- xMin_CDR2 = xMarks[listPDFs[pdf2][[1]][["CDR"]]>0.001][1]
- xMin_FWR2 = xMarks[listPDFs[pdf2][[1]][["FWR"]]>0.001][1]
- xMax_CDR2 = xMarks[listPDFs[pdf2][[1]][["CDR"]]>0.001][length(xMarks[listPDFs[pdf2][[1]][["CDR"]]>0.001])]
- xMax_FWR2 = xMarks[listPDFs[pdf2][[1]][["FWR"]]>0.001][length(xMarks[listPDFs[pdf2][[1]][["FWR"]]>0.001])]
-
- xMin=min(c(xMin_CDR,xMin_FWR,xMin_CDR2,xMin_FWR2,0),na.rm=TRUE)
- xMax=max(c(xMax_CDR,xMax_FWR,xMax_CDR2,xMax_FWR2,0),na.rm=TRUE)
- }
-
- sigma<-approx(xMarks,xout=seq(xMin,xMax,length.out=Sample))$x
- grid.rect(gp = gpar(col=gray(0.6),fill="white",cex=cex))
- x <- sigma
- pushViewport(viewport(x=0.175,y=0.175,width=0.825,height=0.825,just=c("left","bottom"),default.units="npc"))
- #pushViewport(plotViewport(c(1.8, 1.8, 0.25, 0.25)*cex))
- pushViewport(dataViewport(x, c(yMax,-yMax),gp = gpar(cex=cex),extension=c(0.05)))
- grid.polygon(c(0,0,1,1),c(0,0.5,0.5,0),gp=gpar(col=grey(0.95),fill=grey(0.95)),default.units="npc")
- grid.polygon(c(0,0,1,1),c(1,0.5,0.5,1),gp=gpar(col=grey(0.9),fill=grey(0.9)),default.units="npc")
- grid.rect()
- grid.xaxis(gp = gpar(cex=cex/1.1))
- yticks = pretty(c(-yMax,yMax),8)
- yticks = yticks[yticks>(-yMax) & yticks<(yMax)]
- grid.yaxis(at=yticks,label=abs(yticks),gp = gpar(cex=cex/1.1))
- if(length(listPDFs[pdf1][[1]][["CDR"]])>1){
- ycdr<-approx(xMarks,listPDFs[pdf1][[1]][["CDR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
- grid.lines(unit(x,"native"), unit(ycdr,"native"),gp=gpar(col=2,lwd=2))
- }
- if(length(listPDFs[pdf1][[1]][["FWR"]])>1){
- yfwr<-approx(xMarks,listPDFs[pdf1][[1]][["FWR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
- grid.lines(unit(x,"native"), unit(-yfwr,"native"),gp=gpar(col=4,lwd=2))
- }
-
- if(length(listPDFs[pdf2][[1]][["CDR"]])>1){
- ycdr2<-approx(xMarks,listPDFs[pdf2][[1]][["CDR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
- grid.lines(unit(x,"native"), unit(ycdr2,"native"),gp=gpar(col=2,lwd=2,lty=2))
- }
- if(length(listPDFs[pdf2][[1]][["FWR"]])>1){
- yfwr2<-approx(xMarks,listPDFs[pdf2][[1]][["FWR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
- grid.lines(unit(x,"native"), unit(-yfwr2,"native"),gp=gpar(col=4,lwd=2,lty=2))
- }
-
- grid.lines(unit(c(0,1),"npc"), unit(c(0.5,0.5),"npc"),gp=gpar(col=1))
- grid.lines(unit(c(0,0),"native"), unit(c(0,1),"npc"),gp=gpar(col=1,lwd=1,lty=3))
-
- grid.text("All", x = unit(-2.5, "lines"), rot = 90,gp = gpar(cex=cex))
- grid.text( expression(paste("Selection Strength (", Sigma, ")", sep="")) , y = unit(-2.5, "lines"),gp = gpar(cex=cex))
-
- if(pdf1==pdf2 & length(listPDFs[pdf2][[1]][["FWR"]])>1 & length(listPDFs[pdf2][[1]][["CDR"]])>1 ){
- pCDRFWR = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens1=listPDFs[[pdf1]][["CDR"]], dens2=listPDFs[[pdf1]][["FWR"]])
- pval = formatC(as.numeric(pCDRFWR),digits=3)
- grid.text( substitute(expression(paste(P[CDR/FWR], "=", x, sep="")),list(x=pval))[[2]] , x = unit(0.02, "npc"),y = unit(0.98, "npc"),just=c("left", "top"),gp = gpar(cex=cex*1.2))
- }
- grid.text(paste("CDR"), x = unit(0.98, "npc"),y = unit(0.98, "npc"),just=c("right", "top"),gp = gpar(cex=cex*1.5))
- grid.text(paste("FWR"), x = unit(0.98, "npc"),y = unit(0.02, "npc"),just=c("right", "bottom"),gp = gpar(cex=cex*1.5))
- popViewport(2)
-}
-#plot_grid_s(1)
-
-
-p2col<-function(p=0.01){
- breaks=c(-.51,-0.1,-.05,-0.01,-0.005,0,0.005,0.01,0.05,0.1,0.51)
- i<-findInterval(p,breaks)
- cols = c( rgb(0.8,1,0.8), rgb(0.6,1,0.6), rgb(0.4,1,0.4), rgb(0.2,1,0.2) , rgb(0,1,0),
- rgb(1,0,0), rgb(1,.2,.2), rgb(1,.4,.4), rgb(1,.6,.6) , rgb(1,.8,.8) )
- return(cols[i])
-}
-
-
-plot_pvals<-function(pdf1,pdf2,cex=1,upper=TRUE){
- if(upper){
- pCDR1FWR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens1=listPDFs[[pdf1]][["CDR"]], dens2=listPDFs[[pdf2]][["FWR"]])
- pFWR1FWR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens1=listPDFs[[pdf1]][["FWR"]], dens2=listPDFs[[pdf2]][["FWR"]])
- pFWR1CDR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens2=listPDFs[[pdf2]][["CDR"]], dens1=listPDFs[[pdf1]][["FWR"]])
- pCDR1CDR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens2=listPDFs[[pdf2]][["CDR"]], dens1=listPDFs[[pdf1]][["CDR"]])
- grid.polygon(c(0.5,0.5,1,1),c(0,0.5,0.5,0),gp=gpar(col=p2col(pFWR1FWR2),fill=p2col(pFWR1FWR2)),default.units="npc")
- grid.polygon(c(0.5,0.5,1,1),c(1,0.5,0.5,1),gp=gpar(col=p2col(pCDR1FWR2),fill=p2col(pCDR1FWR2)),default.units="npc")
- grid.polygon(c(0.5,0.5,0,0),c(1,0.5,0.5,1),gp=gpar(col=p2col(pCDR1CDR2),fill=p2col(pCDR1CDR2)),default.units="npc")
- grid.polygon(c(0.5,0.5,0,0),c(0,0.5,0.5,0),gp=gpar(col=p2col(pFWR1CDR2),fill=p2col(pFWR1CDR2)),default.units="npc")
-
- grid.lines(c(0,1),0.5,gp=gpar(lty=2,col=gray(0.925)))
- grid.lines(0.5,c(0,1),gp=gpar(lty=2,col=gray(0.925)))
-
- grid.text(formatC(as.numeric(pFWR1FWR2),digits=3), x = unit(0.75, "npc"),y = unit(0.25, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
- grid.text(formatC(as.numeric(pCDR1FWR2),digits=3), x = unit(0.75, "npc"),y = unit(0.75, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
- grid.text(formatC(as.numeric(pCDR1CDR2),digits=3), x = unit(0.25, "npc"),y = unit(0.75, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
- grid.text(formatC(as.numeric(pFWR1CDR2),digits=3), x = unit(0.25, "npc"),y = unit(0.25, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
-
-
- # grid.text(paste("P = ",formatC(pCDRFWR,digits=3)), x = unit(0.5, "npc"),y = unit(0.98, "npc"),just=c("center", "top"),gp = gpar(cex=cex))
- # grid.text(paste("P = ",formatC(pFWRFWR,digits=3)), x = unit(0.5, "npc"),y = unit(0.02, "npc"),just=c("center", "bottom"),gp = gpar(cex=cex))
- }
- else{
- }
-}
-
-
-##################################################################################
-################## The whole OCD's matrix ########################################
-##################################################################################
-
-#pdf(width=4*numbSeqs+1/3,height=4*numbSeqs+1/3)
-pdf( output ,width=4*numbSeqs+1/3,height=4*numbSeqs+1/3)
-
-pushViewport(viewport(x=0.02,y=0.02,just = c("left", "bottom"),w =0.96,height=0.96,layout = grid.layout(numbSeqs+1,numbSeqs+1,widths=unit.c(unit(rep(1,numbSeqs),"null"),unit(4,"lines")),heights=unit.c(unit(4,"lines"),unit(rep(1,numbSeqs),"null")))))
-
-for( seqOne in 1:numbSeqs+1){
- pushViewport(viewport(layout.pos.col = seqOne-1, layout.pos.row = 1))
- if(seqOne>2){
- grid.polygon(c(0,0,0.5,0.5),c(0,0.5,0.5,0),gp=gpar(col=grey(0.5),fill=grey(0.9)),default.units="npc")
- grid.polygon(c(1,1,0.5,0.5),c(0,0.5,0.5,0),gp=gpar(col=grey(0.5),fill=grey(0.95)),default.units="npc")
- grid.polygon(c(0,0,1,1),c(1,0.5,0.5,1),gp=gpar(col=grey(0.5)),default.units="npc")
-
- grid.text(y=.25,x=0.75,"FWR",gp = gpar(cex=1.5),just="center")
- grid.text(y=.25,x=0.25,"CDR",gp = gpar(cex=1.5),just="center")
- }
- grid.rect(gp = gpar(col=grey(0.9)))
- grid.text(y=.75,substr(paste(names(listPDFs)[rowIDs[seqOne-1]]),1,16),gp = gpar(cex=2),just="center")
- popViewport(1)
-}
-
-for( seqOne in 1:numbSeqs+1){
- pushViewport(viewport(layout.pos.row = seqOne, layout.pos.col = numbSeqs+1))
- if(seqOne<=numbSeqs){
- grid.polygon(c(0,0.5,0.5,0),c(0,0,0.5,0.5),gp=gpar(col=grey(0.5),fill=grey(0.95)),default.units="npc")
- grid.polygon(c(0,0.5,0.5,0),c(1,1,0.5,0.5),gp=gpar(col=grey(0.5),fill=grey(0.9)),default.units="npc")
- grid.polygon(c(1,0.5,0.5,1),c(0,0,1,1),gp=gpar(col=grey(0.5)),default.units="npc")
- grid.text(x=.25,y=0.75,"CDR",gp = gpar(cex=1.5),just="center",rot=270)
- grid.text(x=.25,y=0.25,"FWR",gp = gpar(cex=1.5),just="center",rot=270)
- }
- grid.rect(gp = gpar(col=grey(0.9)))
- grid.text(x=0.75,substr(paste(names(listPDFs)[rowIDs[seqOne-1]]),1,16),gp = gpar(cex=2),rot=270,just="center")
- popViewport(1)
-}
-
-for( seqOne in 1:numbSeqs+1){
- for(seqTwo in 1:numbSeqs+1){
- pushViewport(viewport(layout.pos.col = seqTwo-1, layout.pos.row = seqOne))
- if(seqTwo>seqOne){
- plot_pvals(rowIDs[seqOne-1],rowIDs[seqTwo-1],cex=2)
- grid.rect()
- }
- popViewport(1)
- }
-}
-
-
-xMin=0
-xMax=0.01
-for(pdf1 in rowIDs){
- xMin_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][1]
- xMin_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][1]
- xMax_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001])]
- xMax_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001])]
- xMin=min(c(xMin_CDR,xMin_FWR,xMin),na.rm=TRUE)
- xMax=max(c(xMax_CDR,xMax_FWR,xMax),na.rm=TRUE)
-}
-
-
-
-for(i in 1:numbSeqs+1){
- for(j in (i-1):numbSeqs){
- pushViewport(viewport(layout.pos.col = i-1, layout.pos.row = j+1))
- grid.rect()
- plot_grid_s(rowIDs[i-1],rowIDs[j],cex=1)
- popViewport(1)
- }
-}
-
-dev.off()
-
-cat("Success", paste(rowIDs,collapse="_"),sep=":")
-
+options("warn"=-1)
+
+#from http://selection.med.yale.edu/baseline/Archive/Baseline%20Version%201.3/Baseline_Functions_Version1.3.r
+# Compute p-value of two distributions
+compareTwoDistsFaster <-function(sigma_S=seq(-20,20,length.out=4001), N=10000, dens1=runif(4001,0,1), dens2=runif(4001,0,1)){
+#print(c(length(dens1),length(dens2)))
+if(length(dens1)>1 & length(dens2)>1 ){
+ dens1<-dens1/sum(dens1)
+ dens2<-dens2/sum(dens2)
+ cum2 <- cumsum(dens2)-dens2/2
+ tmp<- sum(sapply(1:length(dens1),function(i)return(dens1[i]*cum2[i])))
+ #print(tmp)
+ if(tmp>0.5)tmp<-tmp-1
+ return( tmp )
+ }
+ else {
+ return(NA)
+ }
+ #return (sum(sapply(1:N,function(i)(sample(sigma_S,1,prob=dens1)>sample(sigma_S,1,prob=dens2))))/N)
+}
+
+
+require("grid")
+arg <- commandArgs(TRUE)
+#arg <- c("300143","4","5")
+arg[!arg=="clonal"]
+input <- arg[1]
+output <- arg[2]
+rowIDs <- as.numeric( sapply(arg[3:(max(3,length(arg)))],function(x){ gsub("chkbx","",x) } ) )
+
+numbSeqs = length(rowIDs)
+
+if ( is.na(rowIDs[1]) | numbSeqs>10 ) {
+ stop( paste("Error: Please select between one and 10 seqeunces to compare.") )
+}
+
+#load( paste("output/",sessionID,".RData",sep="") )
+load( input )
+#input
+
+xMarks = seq(-20,20,length.out=4001)
+
+plot_grid_s<-function(pdf1,pdf2,Sample=100,cex=1,xlim=NULL,xMarks = seq(-20,20,length.out=4001)){
+ yMax = max(c(abs(as.numeric(unlist(listPDFs[pdf1]))),abs(as.numeric(unlist(listPDFs[pdf2]))),0),na.rm=T) * 1.1
+
+ if(length(xlim==2)){
+ xMin=xlim[1]
+ xMax=xlim[2]
+ } else {
+ xMin_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][1]
+ xMin_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][1]
+ xMax_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001])]
+ xMax_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001])]
+
+ xMin_CDR2 = xMarks[listPDFs[pdf2][[1]][["CDR"]]>0.001][1]
+ xMin_FWR2 = xMarks[listPDFs[pdf2][[1]][["FWR"]]>0.001][1]
+ xMax_CDR2 = xMarks[listPDFs[pdf2][[1]][["CDR"]]>0.001][length(xMarks[listPDFs[pdf2][[1]][["CDR"]]>0.001])]
+ xMax_FWR2 = xMarks[listPDFs[pdf2][[1]][["FWR"]]>0.001][length(xMarks[listPDFs[pdf2][[1]][["FWR"]]>0.001])]
+
+ xMin=min(c(xMin_CDR,xMin_FWR,xMin_CDR2,xMin_FWR2,0),na.rm=TRUE)
+ xMax=max(c(xMax_CDR,xMax_FWR,xMax_CDR2,xMax_FWR2,0),na.rm=TRUE)
+ }
+
+ sigma<-approx(xMarks,xout=seq(xMin,xMax,length.out=Sample))$x
+ grid.rect(gp = gpar(col=gray(0.6),fill="white",cex=cex))
+ x <- sigma
+ pushViewport(viewport(x=0.175,y=0.175,width=0.825,height=0.825,just=c("left","bottom"),default.units="npc"))
+ #pushViewport(plotViewport(c(1.8, 1.8, 0.25, 0.25)*cex))
+ pushViewport(dataViewport(x, c(yMax,-yMax),gp = gpar(cex=cex),extension=c(0.05)))
+ grid.polygon(c(0,0,1,1),c(0,0.5,0.5,0),gp=gpar(col=grey(0.95),fill=grey(0.95)),default.units="npc")
+ grid.polygon(c(0,0,1,1),c(1,0.5,0.5,1),gp=gpar(col=grey(0.9),fill=grey(0.9)),default.units="npc")
+ grid.rect()
+ grid.xaxis(gp = gpar(cex=cex/1.1))
+ yticks = pretty(c(-yMax,yMax),8)
+ yticks = yticks[yticks>(-yMax) & yticks<(yMax)]
+ grid.yaxis(at=yticks,label=abs(yticks),gp = gpar(cex=cex/1.1))
+ if(length(listPDFs[pdf1][[1]][["CDR"]])>1){
+ ycdr<-approx(xMarks,listPDFs[pdf1][[1]][["CDR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
+ grid.lines(unit(x,"native"), unit(ycdr,"native"),gp=gpar(col=2,lwd=2))
+ }
+ if(length(listPDFs[pdf1][[1]][["FWR"]])>1){
+ yfwr<-approx(xMarks,listPDFs[pdf1][[1]][["FWR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
+ grid.lines(unit(x,"native"), unit(-yfwr,"native"),gp=gpar(col=4,lwd=2))
+ }
+
+ if(length(listPDFs[pdf2][[1]][["CDR"]])>1){
+ ycdr2<-approx(xMarks,listPDFs[pdf2][[1]][["CDR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
+ grid.lines(unit(x,"native"), unit(ycdr2,"native"),gp=gpar(col=2,lwd=2,lty=2))
+ }
+ if(length(listPDFs[pdf2][[1]][["FWR"]])>1){
+ yfwr2<-approx(xMarks,listPDFs[pdf2][[1]][["FWR"]],xout=seq(xMin,xMax,length.out=Sample),yleft=0,yright=0)$y
+ grid.lines(unit(x,"native"), unit(-yfwr2,"native"),gp=gpar(col=4,lwd=2,lty=2))
+ }
+
+ grid.lines(unit(c(0,1),"npc"), unit(c(0.5,0.5),"npc"),gp=gpar(col=1))
+ grid.lines(unit(c(0,0),"native"), unit(c(0,1),"npc"),gp=gpar(col=1,lwd=1,lty=3))
+
+ grid.text("All", x = unit(-2.5, "lines"), rot = 90,gp = gpar(cex=cex))
+ grid.text( expression(paste("Selection Strength (", Sigma, ")", sep="")) , y = unit(-2.5, "lines"),gp = gpar(cex=cex))
+
+ if(pdf1==pdf2 & length(listPDFs[pdf2][[1]][["FWR"]])>1 & length(listPDFs[pdf2][[1]][["CDR"]])>1 ){
+ pCDRFWR = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens1=listPDFs[[pdf1]][["CDR"]], dens2=listPDFs[[pdf1]][["FWR"]])
+ pval = formatC(as.numeric(pCDRFWR),digits=3)
+ grid.text( substitute(expression(paste(P[CDR/FWR], "=", x, sep="")),list(x=pval))[[2]] , x = unit(0.02, "npc"),y = unit(0.98, "npc"),just=c("left", "top"),gp = gpar(cex=cex*1.2))
+ }
+ grid.text(paste("CDR"), x = unit(0.98, "npc"),y = unit(0.98, "npc"),just=c("right", "top"),gp = gpar(cex=cex*1.5))
+ grid.text(paste("FWR"), x = unit(0.98, "npc"),y = unit(0.02, "npc"),just=c("right", "bottom"),gp = gpar(cex=cex*1.5))
+ popViewport(2)
+}
+#plot_grid_s(1)
+
+
+p2col<-function(p=0.01){
+ breaks=c(-.51,-0.1,-.05,-0.01,-0.005,0,0.005,0.01,0.05,0.1,0.51)
+ i<-findInterval(p,breaks)
+ cols = c( rgb(0.8,1,0.8), rgb(0.6,1,0.6), rgb(0.4,1,0.4), rgb(0.2,1,0.2) , rgb(0,1,0),
+ rgb(1,0,0), rgb(1,.2,.2), rgb(1,.4,.4), rgb(1,.6,.6) , rgb(1,.8,.8) )
+ return(cols[i])
+}
+
+
+plot_pvals<-function(pdf1,pdf2,cex=1,upper=TRUE){
+ if(upper){
+ pCDR1FWR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens1=listPDFs[[pdf1]][["CDR"]], dens2=listPDFs[[pdf2]][["FWR"]])
+ pFWR1FWR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens1=listPDFs[[pdf1]][["FWR"]], dens2=listPDFs[[pdf2]][["FWR"]])
+ pFWR1CDR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens2=listPDFs[[pdf2]][["CDR"]], dens1=listPDFs[[pdf1]][["FWR"]])
+ pCDR1CDR2 = compareTwoDistsFaster(sigma_S=xMarks, N=10000, dens2=listPDFs[[pdf2]][["CDR"]], dens1=listPDFs[[pdf1]][["CDR"]])
+ grid.polygon(c(0.5,0.5,1,1),c(0,0.5,0.5,0),gp=gpar(col=p2col(pFWR1FWR2),fill=p2col(pFWR1FWR2)),default.units="npc")
+ grid.polygon(c(0.5,0.5,1,1),c(1,0.5,0.5,1),gp=gpar(col=p2col(pCDR1FWR2),fill=p2col(pCDR1FWR2)),default.units="npc")
+ grid.polygon(c(0.5,0.5,0,0),c(1,0.5,0.5,1),gp=gpar(col=p2col(pCDR1CDR2),fill=p2col(pCDR1CDR2)),default.units="npc")
+ grid.polygon(c(0.5,0.5,0,0),c(0,0.5,0.5,0),gp=gpar(col=p2col(pFWR1CDR2),fill=p2col(pFWR1CDR2)),default.units="npc")
+
+ grid.lines(c(0,1),0.5,gp=gpar(lty=2,col=gray(0.925)))
+ grid.lines(0.5,c(0,1),gp=gpar(lty=2,col=gray(0.925)))
+
+ grid.text(formatC(as.numeric(pFWR1FWR2),digits=3), x = unit(0.75, "npc"),y = unit(0.25, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
+ grid.text(formatC(as.numeric(pCDR1FWR2),digits=3), x = unit(0.75, "npc"),y = unit(0.75, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
+ grid.text(formatC(as.numeric(pCDR1CDR2),digits=3), x = unit(0.25, "npc"),y = unit(0.75, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
+ grid.text(formatC(as.numeric(pFWR1CDR2),digits=3), x = unit(0.25, "npc"),y = unit(0.25, "npc"),just=c("center", "center"),gp = gpar(cex=cex))
+
+
+ # grid.text(paste("P = ",formatC(pCDRFWR,digits=3)), x = unit(0.5, "npc"),y = unit(0.98, "npc"),just=c("center", "top"),gp = gpar(cex=cex))
+ # grid.text(paste("P = ",formatC(pFWRFWR,digits=3)), x = unit(0.5, "npc"),y = unit(0.02, "npc"),just=c("center", "bottom"),gp = gpar(cex=cex))
+ }
+ else{
+ }
+}
+
+
+##################################################################################
+################## The whole OCD's matrix ########################################
+##################################################################################
+
+#pdf(width=4*numbSeqs+1/3,height=4*numbSeqs+1/3)
+pdf( output ,width=4*numbSeqs+1/3,height=4*numbSeqs+1/3)
+
+pushViewport(viewport(x=0.02,y=0.02,just = c("left", "bottom"),w =0.96,height=0.96,layout = grid.layout(numbSeqs+1,numbSeqs+1,widths=unit.c(unit(rep(1,numbSeqs),"null"),unit(4,"lines")),heights=unit.c(unit(4,"lines"),unit(rep(1,numbSeqs),"null")))))
+
+for( seqOne in 1:numbSeqs+1){
+ pushViewport(viewport(layout.pos.col = seqOne-1, layout.pos.row = 1))
+ if(seqOne>2){
+ grid.polygon(c(0,0,0.5,0.5),c(0,0.5,0.5,0),gp=gpar(col=grey(0.5),fill=grey(0.9)),default.units="npc")
+ grid.polygon(c(1,1,0.5,0.5),c(0,0.5,0.5,0),gp=gpar(col=grey(0.5),fill=grey(0.95)),default.units="npc")
+ grid.polygon(c(0,0,1,1),c(1,0.5,0.5,1),gp=gpar(col=grey(0.5)),default.units="npc")
+
+ grid.text(y=.25,x=0.75,"FWR",gp = gpar(cex=1.5),just="center")
+ grid.text(y=.25,x=0.25,"CDR",gp = gpar(cex=1.5),just="center")
+ }
+ grid.rect(gp = gpar(col=grey(0.9)))
+ grid.text(y=.75,substr(paste(names(listPDFs)[rowIDs[seqOne-1]]),1,16),gp = gpar(cex=2),just="center")
+ popViewport(1)
+}
+
+for( seqOne in 1:numbSeqs+1){
+ pushViewport(viewport(layout.pos.row = seqOne, layout.pos.col = numbSeqs+1))
+ if(seqOne<=numbSeqs){
+ grid.polygon(c(0,0.5,0.5,0),c(0,0,0.5,0.5),gp=gpar(col=grey(0.5),fill=grey(0.95)),default.units="npc")
+ grid.polygon(c(0,0.5,0.5,0),c(1,1,0.5,0.5),gp=gpar(col=grey(0.5),fill=grey(0.9)),default.units="npc")
+ grid.polygon(c(1,0.5,0.5,1),c(0,0,1,1),gp=gpar(col=grey(0.5)),default.units="npc")
+ grid.text(x=.25,y=0.75,"CDR",gp = gpar(cex=1.5),just="center",rot=270)
+ grid.text(x=.25,y=0.25,"FWR",gp = gpar(cex=1.5),just="center",rot=270)
+ }
+ grid.rect(gp = gpar(col=grey(0.9)))
+ grid.text(x=0.75,substr(paste(names(listPDFs)[rowIDs[seqOne-1]]),1,16),gp = gpar(cex=2),rot=270,just="center")
+ popViewport(1)
+}
+
+for( seqOne in 1:numbSeqs+1){
+ for(seqTwo in 1:numbSeqs+1){
+ pushViewport(viewport(layout.pos.col = seqTwo-1, layout.pos.row = seqOne))
+ if(seqTwo>seqOne){
+ plot_pvals(rowIDs[seqOne-1],rowIDs[seqTwo-1],cex=2)
+ grid.rect()
+ }
+ popViewport(1)
+ }
+}
+
+
+xMin=0
+xMax=0.01
+for(pdf1 in rowIDs){
+ xMin_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][1]
+ xMin_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][1]
+ xMax_CDR = xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["CDR"]]>0.001])]
+ xMax_FWR = xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001][length(xMarks[listPDFs[pdf1][[1]][["FWR"]]>0.001])]
+ xMin=min(c(xMin_CDR,xMin_FWR,xMin),na.rm=TRUE)
+ xMax=max(c(xMax_CDR,xMax_FWR,xMax),na.rm=TRUE)
+}
+
+
+
+for(i in 1:numbSeqs+1){
+ for(j in (i-1):numbSeqs){
+ pushViewport(viewport(layout.pos.col = i-1, layout.pos.row = j+1))
+ grid.rect()
+ plot_grid_s(rowIDs[i-1],rowIDs[j],cex=1)
+ popViewport(1)
+ }
+}
+
+dev.off()
+
+cat("Success", paste(rowIDs,collapse="_"),sep=":")
+
diff -r 43a1aa648537 -r ba33b94637ca baseline/script_imgt.py
--- a/baseline/script_imgt.py Thu Dec 07 03:44:38 2017 -0500
+++ b/baseline/script_imgt.py Tue Jan 29 03:54:09 2019 -0500
@@ -1,86 +1,86 @@
-#import xlrd #avoid dep
-import argparse
-import re
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--input", help="Excel input file containing one or more sheets where column G has the gene annotation, H has the sequence id and J has the sequence")
-parser.add_argument("--ref", help="Reference file")
-parser.add_argument("--output", help="Output file")
-parser.add_argument("--id", help="ID to be used at the '>>>' line in the output")
-
-args = parser.parse_args()
-
-print "script_imgt.py"
-print "input:", args.input
-print "ref:", args.ref
-print "output:", args.output
-print "id:", args.id
-
-refdic = dict()
-with open(args.ref, 'rU') as ref:
- currentSeq = ""
- currentId = ""
- for line in ref:
- if line.startswith(">"):
- if currentSeq is not "" and currentId is not "":
- refdic[currentId[1:]] = currentSeq
- currentId = line.rstrip()
- currentSeq = ""
- else:
- currentSeq += line.rstrip()
- refdic[currentId[1:]] = currentSeq
-
-print "Have", str(len(refdic)), "reference sequences"
-
-vPattern = [r"(IGHV[0-9]-[0-9ab]+-?[0-9]?D?\*\d{1,2})"]#,
-# r"(TRBV[0-9]{1,2}-?[0-9]?-?[123]?)",
-# r"(IGKV[0-3]D?-[0-9]{1,2})",
-# r"(IGLV[0-9]-[0-9]{1,2})",
-# r"(TRAV[0-9]{1,2}(-[1-46])?(/DV[45678])?)",
-# r"(TRGV[234589])",
-# r"(TRDV[1-3])"]
-
-#vPattern = re.compile(r"|".join(vPattern))
-vPattern = re.compile("|".join(vPattern))
-
-def filterGene(s, pattern):
- if type(s) is not str:
- return None
- res = pattern.search(s)
- if res:
- return res.group(0)
- return None
-
-
-
-currentSeq = ""
-currentId = ""
-first=True
-with open(args.input, 'r') as i:
- with open(args.output, 'a') as o:
- o.write(">>>" + args.id + "\n")
- outputdic = dict()
- for line in i:
- if first:
- first = False
- continue
- linesplt = line.split("\t")
- ref = filterGene(linesplt[1], vPattern)
- if not ref or not linesplt[2].rstrip():
- continue
- if ref in outputdic:
- outputdic[ref] += [(linesplt[0].replace(">", ""), linesplt[2].replace(">", "").rstrip())]
- else:
- outputdic[ref] = [(linesplt[0].replace(">", ""), linesplt[2].replace(">", "").rstrip())]
- #print outputdic
-
- for k in outputdic.keys():
- if k in refdic:
- o.write(">>" + k + "\n")
- o.write(refdic[k] + "\n")
- for seq in outputdic[k]:
- #print seq
- o.write(">" + seq[0] + "\n")
- o.write(seq[1] + "\n")
- else:
- print k + " not in reference, skipping " + k
+#import xlrd #avoid dep
+import argparse
+import re
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--input", help="Excel input file containing one or more sheets where column G has the gene annotation, H has the sequence id and J has the sequence")
+parser.add_argument("--ref", help="Reference file")
+parser.add_argument("--output", help="Output file")
+parser.add_argument("--id", help="ID to be used at the '>>>' line in the output")
+
+args = parser.parse_args()
+
+print "script_imgt.py"
+print "input:", args.input
+print "ref:", args.ref
+print "output:", args.output
+print "id:", args.id
+
+refdic = dict()
+with open(args.ref, 'rU') as ref:
+ currentSeq = ""
+ currentId = ""
+ for line in ref:
+ if line.startswith(">"):
+ if currentSeq is not "" and currentId is not "":
+ refdic[currentId[1:]] = currentSeq
+ currentId = line.rstrip()
+ currentSeq = ""
+ else:
+ currentSeq += line.rstrip()
+ refdic[currentId[1:]] = currentSeq
+
+print "Have", str(len(refdic)), "reference sequences"
+
+vPattern = [r"(IGHV[0-9]-[0-9ab]+-?[0-9]?D?\*\d{1,2})"]#,
+# r"(TRBV[0-9]{1,2}-?[0-9]?-?[123]?)",
+# r"(IGKV[0-3]D?-[0-9]{1,2})",
+# r"(IGLV[0-9]-[0-9]{1,2})",
+# r"(TRAV[0-9]{1,2}(-[1-46])?(/DV[45678])?)",
+# r"(TRGV[234589])",
+# r"(TRDV[1-3])"]
+
+#vPattern = re.compile(r"|".join(vPattern))
+vPattern = re.compile("|".join(vPattern))
+
+def filterGene(s, pattern):
+ if type(s) is not str:
+ return None
+ res = pattern.search(s)
+ if res:
+ return res.group(0)
+ return None
+
+
+
+currentSeq = ""
+currentId = ""
+first=True
+with open(args.input, 'r') as i:
+ with open(args.output, 'a') as o:
+ o.write(">>>" + args.id + "\n")
+ outputdic = dict()
+ for line in i:
+ if first:
+ first = False
+ continue
+ linesplt = line.split("\t")
+ ref = filterGene(linesplt[1], vPattern)
+ if not ref or not linesplt[2].rstrip():
+ continue
+ if ref in outputdic:
+ outputdic[ref] += [(linesplt[0].replace(">", ""), linesplt[2].replace(">", "").rstrip())]
+ else:
+ outputdic[ref] = [(linesplt[0].replace(">", ""), linesplt[2].replace(">", "").rstrip())]
+ #print outputdic
+
+ for k in outputdic.keys():
+ if k in refdic:
+ o.write(">>" + k + "\n")
+ o.write(refdic[k] + "\n")
+ for seq in outputdic[k]:
+ #print seq
+ o.write(">" + seq[0] + "\n")
+ o.write(seq[1] + "\n")
+ else:
+ print k + " not in reference, skipping " + k
diff -r 43a1aa648537 -r ba33b94637ca baseline/script_xlsx.py
--- a/baseline/script_xlsx.py Thu Dec 07 03:44:38 2017 -0500
+++ b/baseline/script_xlsx.py Tue Jan 29 03:54:09 2019 -0500
@@ -1,58 +1,58 @@
-import xlrd
-import argparse
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--input", help="Excel input file containing one or more sheets where column G has the gene annotation, H has the sequence id and J has the sequence")
-parser.add_argument("--ref", help="Reference file")
-parser.add_argument("--output", help="Output file")
-
-args = parser.parse_args()
-
-gene_column = 6
-id_column = 7
-seq_column = 8
-LETTERS = [x for x in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
-
-
-refdic = dict()
-with open(args.ref, 'r') as ref:
- currentSeq = ""
- currentId = ""
- for line in ref.readlines():
- if line[0] is ">":
- if currentSeq is not "" and currentId is not "":
- refdic[currentId[1:]] = currentSeq
- currentId = line.rstrip()
- currentSeq = ""
- else:
- currentSeq += line.rstrip()
- refdic[currentId[1:]] = currentSeq
-
-currentSeq = ""
-currentId = ""
-with xlrd.open_workbook(args.input, 'r') as wb:
- with open(args.output, 'a') as o:
- for sheet in wb.sheets():
- if sheet.cell(1,gene_column).value.find("IGHV") < 0:
- print "Genes not in column " + LETTERS[gene_column] + ", skipping sheet " + sheet.name
- continue
- o.write(">>>" + sheet.name + "\n")
- outputdic = dict()
- for rowindex in range(1, sheet.nrows):
- ref = sheet.cell(rowindex, gene_column).value.replace(">", "")
- if ref in outputdic:
- outputdic[ref] += [(sheet.cell(rowindex, id_column).value.replace(">", ""), sheet.cell(rowindex, seq_column).value)]
- else:
- outputdic[ref] = [(sheet.cell(rowindex, id_column).value.replace(">", ""), sheet.cell(rowindex, seq_column).value)]
- #print outputdic
-
- for k in outputdic.keys():
- if k in refdic:
- o.write(">>" + k + "\n")
- o.write(refdic[k] + "\n")
- for seq in outputdic[k]:
- #print seq
- o.write(">" + seq[0] + "\n")
- o.write(seq[1] + "\n")
- else:
- print k + " not in reference, skipping " + k
+import xlrd
+import argparse
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--input", help="Excel input file containing one or more sheets where column G has the gene annotation, H has the sequence id and J has the sequence")
+parser.add_argument("--ref", help="Reference file")
+parser.add_argument("--output", help="Output file")
+
+args = parser.parse_args()
+
+gene_column = 6
+id_column = 7
+seq_column = 8
+LETTERS = [x for x in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
+
+
+refdic = dict()
+with open(args.ref, 'r') as ref:
+ currentSeq = ""
+ currentId = ""
+ for line in ref.readlines():
+ if line[0] is ">":
+ if currentSeq is not "" and currentId is not "":
+ refdic[currentId[1:]] = currentSeq
+ currentId = line.rstrip()
+ currentSeq = ""
+ else:
+ currentSeq += line.rstrip()
+ refdic[currentId[1:]] = currentSeq
+
+currentSeq = ""
+currentId = ""
+with xlrd.open_workbook(args.input, 'r') as wb:
+ with open(args.output, 'a') as o:
+ for sheet in wb.sheets():
+ if sheet.cell(1,gene_column).value.find("IGHV") < 0:
+ print "Genes not in column " + LETTERS[gene_column] + ", skipping sheet " + sheet.name
+ continue
+ o.write(">>>" + sheet.name + "\n")
+ outputdic = dict()
+ for rowindex in range(1, sheet.nrows):
+ ref = sheet.cell(rowindex, gene_column).value.replace(">", "")
+ if ref in outputdic:
+ outputdic[ref] += [(sheet.cell(rowindex, id_column).value.replace(">", ""), sheet.cell(rowindex, seq_column).value)]
+ else:
+ outputdic[ref] = [(sheet.cell(rowindex, id_column).value.replace(">", ""), sheet.cell(rowindex, seq_column).value)]
+ #print outputdic
+
+ for k in outputdic.keys():
+ if k in refdic:
+ o.write(">>" + k + "\n")
+ o.write(refdic[k] + "\n")
+ for seq in outputdic[k]:
+ #print seq
+ o.write(">" + seq[0] + "\n")
+ o.write(seq[1] + "\n")
+ else:
+ print k + " not in reference, skipping " + k
diff -r 43a1aa648537 -r ba33b94637ca baseline/wrapper.sh
diff -r 43a1aa648537 -r ba33b94637ca change_o/DefineClones.py
diff -r 43a1aa648537 -r ba33b94637ca change_o/MakeDb.py
diff -r 43a1aa648537 -r ba33b94637ca change_o/change_o_url.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/change_o/change_o_url.txt Tue Jan 29 03:54:09 2019 -0500
@@ -0,0 +1,1 @@
+https://changeo.readthedocs.io/en/version-0.4.4/
\ No newline at end of file
diff -r 43a1aa648537 -r ba33b94637ca change_o/define_clones.sh
diff -r 43a1aa648537 -r ba33b94637ca change_o/makedb.sh
diff -r 43a1aa648537 -r ba33b94637ca merge_and_filter.r
--- a/merge_and_filter.r Thu Dec 07 03:44:38 2017 -0500
+++ b/merge_and_filter.r Tue Jan 29 03:54:09 2019 -0500
@@ -1,303 +1,303 @@
-args <- commandArgs(trailingOnly = TRUE)
-
-
-summaryfile = args[1]
-sequencesfile = args[2]
-mutationanalysisfile = args[3]
-mutationstatsfile = args[4]
-hotspotsfile = args[5]
-aafile = args[6]
-gene_identification_file= args[7]
-output = args[8]
-before.unique.file = args[9]
-unmatchedfile = args[10]
-method=args[11]
-functionality=args[12]
-unique.type=args[13]
-filter.unique=args[14]
-filter.unique.count=as.numeric(args[15])
-class.filter=args[16]
-empty.region.filter=args[17]
-
-print(paste("filter.unique.count:", filter.unique.count))
-
-summ = read.table(summaryfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-sequences = read.table(sequencesfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-mutationanalysis = read.table(mutationanalysisfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-mutationstats = read.table(mutationstatsfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-hotspots = read.table(hotspotsfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-AAs = read.table(aafile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-gene_identification = read.table(gene_identification_file, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
-
-fix_column_names = function(df){
- if("V.DOMAIN.Functionality" %in% names(df)){
- names(df)[names(df) == "V.DOMAIN.Functionality"] = "Functionality"
- print("found V.DOMAIN.Functionality, changed")
- }
- if("V.DOMAIN.Functionality.comment" %in% names(df)){
- names(df)[names(df) == "V.DOMAIN.Functionality.comment"] = "Functionality.comment"
- print("found V.DOMAIN.Functionality.comment, changed")
- }
- return(df)
-}
-
-fix_non_unique_ids = function(df){
- df$Sequence.ID = paste(df$Sequence.ID, 1:nrow(df))
- return(df)
-}
-
-summ = fix_column_names(summ)
-sequences = fix_column_names(sequences)
-mutationanalysis = fix_column_names(mutationanalysis)
-mutationstats = fix_column_names(mutationstats)
-hotspots = fix_column_names(hotspots)
-AAs = fix_column_names(AAs)
-
-if(method == "blastn"){
- #"qseqid\tsseqid\tpident\tlength\tmismatch\tgapopen\tqstart\tqend\tsstart\tsend\tevalue\tbitscore"
- gene_identification = gene_identification[!duplicated(gene_identification$qseqid),]
- ref_length = data.frame(sseqid=c("ca1", "ca2", "cg1", "cg2", "cg3", "cg4", "cm"), ref.length=c(81,81,141,141,141,141,52))
- gene_identification = merge(gene_identification, ref_length, by="sseqid", all.x=T)
- gene_identification$chunk_hit_percentage = (gene_identification$length / gene_identification$ref.length) * 100
- gene_identification = gene_identification[,c("qseqid", "chunk_hit_percentage", "pident", "qstart", "sseqid")]
- colnames(gene_identification) = c("Sequence.ID", "chunk_hit_percentage", "nt_hit_percentage", "start_locations", "best_match")
-}
-
-#print("Summary analysis files columns")
-#print(names(summ))
-
-
-
-input.sequence.count = nrow(summ)
-print(paste("Number of sequences in summary file:", input.sequence.count))
-
-filtering.steps = data.frame(character(0), numeric(0))
-
-filtering.steps = rbind(filtering.steps, c("Input", input.sequence.count))
-
-filtering.steps[,1] = as.character(filtering.steps[,1])
-filtering.steps[,2] = as.character(filtering.steps[,2])
-#filtering.steps[,3] = as.numeric(filtering.steps[,3])
-
-#print("summary files columns")
-#print(names(summ))
-
-summ = merge(summ, gene_identification, by="Sequence.ID")
-
-print(paste("Number of sequences after merging with gene identification:", nrow(summ)))
-
-summ = summ[summ$Functionality != "No results",]
-
-print(paste("Number of sequences after 'No results' filter:", nrow(summ)))
-
-filtering.steps = rbind(filtering.steps, c("After 'No results' filter", nrow(summ)))
-
-if(functionality == "productive"){
- summ = summ[summ$Functionality == "productive (see comment)" | summ$Functionality == "productive",]
-} else if (functionality == "unproductive"){
- summ = summ[summ$Functionality == "unproductive (see comment)" | summ$Functionality == "unproductive",]
-} else if (functionality == "remove_unknown"){
- summ = summ[summ$Functionality != "No results" & summ$Functionality != "unknown (see comment)" & summ$Functionality != "unknown",]
-}
-
-print(paste("Number of sequences after functionality filter:", nrow(summ)))
-
-filtering.steps = rbind(filtering.steps, c("After functionality filter", nrow(summ)))
-
-if(F){ #to speed up debugging
- set.seed(1)
- summ = summ[sample(nrow(summ), floor(nrow(summ) * 0.03)),]
- print(paste("Number of sequences after sampling 3%:", nrow(summ)))
-
- filtering.steps = rbind(filtering.steps, c("Number of sequences after sampling 3%", nrow(summ)))
-}
-
-print("mutation analysis files columns")
-print(names(mutationanalysis[,!(names(mutationanalysis) %in% names(summ)[-1])]))
-
-result = merge(summ, mutationanalysis[,!(names(mutationanalysis) %in% names(summ)[-1])], by="Sequence.ID")
-
-print(paste("Number of sequences after merging with mutation analysis file:", nrow(result)))
-
-#print("mutation stats files columns")
-#print(names(mutationstats[,!(names(mutationstats) %in% names(result)[-1])]))
-
-result = merge(result, mutationstats[,!(names(mutationstats) %in% names(result)[-1])], by="Sequence.ID")
-
-print(paste("Number of sequences after merging with mutation stats file:", nrow(result)))
-
-print("hotspots files columns")
-print(names(hotspots[,!(names(hotspots) %in% names(result)[-1])]))
-
-result = merge(result, hotspots[,!(names(hotspots) %in% names(result)[-1])], by="Sequence.ID")
-
-print(paste("Number of sequences after merging with hotspots file:", nrow(result)))
-
-print("sequences files columns")
-print(c("FR1.IMGT", "CDR1.IMGT", "FR2.IMGT", "CDR2.IMGT", "FR3.IMGT", "CDR3.IMGT"))
-
-sequences = sequences[,c("Sequence.ID", "FR1.IMGT", "CDR1.IMGT", "FR2.IMGT", "CDR2.IMGT", "FR3.IMGT", "CDR3.IMGT")]
-names(sequences) = c("Sequence.ID", "FR1.IMGT.seq", "CDR1.IMGT.seq", "FR2.IMGT.seq", "CDR2.IMGT.seq", "FR3.IMGT.seq", "CDR3.IMGT.seq")
-result = merge(result, sequences, by="Sequence.ID", all.x=T)
-
-print("sequences files columns")
-print("CDR3.IMGT")
-
-AAs = AAs[,c("Sequence.ID", "CDR3.IMGT")]
-names(AAs) = c("Sequence.ID", "CDR3.IMGT.AA")
-result = merge(result, AAs, by="Sequence.ID", all.x=T)
-
-print(paste("Number of sequences in result after merging with sequences:", nrow(result)))
-
-result$VGene = gsub("^Homsap ", "", result$V.GENE.and.allele)
-result$VGene = gsub("[*].*", "", result$VGene)
-result$DGene = gsub("^Homsap ", "", result$D.GENE.and.allele)
-result$DGene = gsub("[*].*", "", result$DGene)
-result$JGene = gsub("^Homsap ", "", result$J.GENE.and.allele)
-result$JGene = gsub("[*].*", "", result$JGene)
-
-splt = strsplit(class.filter, "_")[[1]]
-chunk_hit_threshold = as.numeric(splt[1])
-nt_hit_threshold = as.numeric(splt[2])
-
-higher_than=(result$chunk_hit_percentage >= chunk_hit_threshold & result$nt_hit_percentage >= nt_hit_threshold)
-
-if(!all(higher_than, na.rm=T)){ #check for no unmatched
- result[!higher_than,"best_match"] = paste("unmatched,", result[!higher_than,"best_match"])
-}
-
-if(class.filter == "101_101"){
- result$best_match = "all"
-}
-
-write.table(x=result, file=gsub("merged.txt$", "before_filters.txt", output), sep="\t",quote=F,row.names=F,col.names=T)
-
-print(paste("Number of empty CDR1 sequences:", sum(result$CDR1.IMGT.seq == "", na.rm=T)))
-print(paste("Number of empty FR2 sequences:", sum(result$FR2.IMGT.seq == "", na.rm=T)))
-print(paste("Number of empty CDR2 sequences:", sum(result$CDR2.IMGT.seq == "", na.rm=T)))
-print(paste("Number of empty FR3 sequences:", sum(result$FR3.IMGT.seq == "", na.rm=T)))
-
-if(empty.region.filter == "leader"){
- result = result[result$FR1.IMGT.seq != "" & result$CDR1.IMGT.seq != "" & result$FR2.IMGT.seq != "" & result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
-} else if(empty.region.filter == "FR1"){
- result = result[result$CDR1.IMGT.seq != "" & result$FR2.IMGT.seq != "" & result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
-} else if(empty.region.filter == "CDR1"){
- result = result[result$FR2.IMGT.seq != "" & result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
-} else if(empty.region.filter == "FR2"){
- result = result[result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
-}
-
-print(paste("After removal sequences that are missing a gene region:", nrow(result)))
-filtering.steps = rbind(filtering.steps, c("After removal sequences that are missing a gene region", nrow(result)))
-
-if(empty.region.filter == "leader"){
- result = result[!(grepl("n|N", result$FR1.IMGT.seq) | grepl("n|N", result$FR2.IMGT.seq) | grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR1.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
-} else if(empty.region.filter == "FR1"){
- result = result[!(grepl("n|N", result$FR2.IMGT.seq) | grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR1.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
-} else if(empty.region.filter == "CDR1"){
- result = result[!(grepl("n|N", result$FR2.IMGT.seq) | grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
-} else if(empty.region.filter == "FR2"){
- result = result[!(grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
-}
-
-print(paste("Number of sequences in result after n filtering:", nrow(result)))
-filtering.steps = rbind(filtering.steps, c("After N filter", nrow(result)))
-
-cleanup_columns = c("FR1.IMGT.Nb.of.mutations",
- "CDR1.IMGT.Nb.of.mutations",
- "FR2.IMGT.Nb.of.mutations",
- "CDR2.IMGT.Nb.of.mutations",
- "FR3.IMGT.Nb.of.mutations")
-
-for(col in cleanup_columns){
- result[,col] = gsub("\\(.*\\)", "", result[,col])
- result[,col] = as.numeric(result[,col])
- result[is.na(result[,col]),] = 0
-}
-
-write.table(result, before.unique.file, sep="\t", quote=F,row.names=F,col.names=T)
-
-if(filter.unique != "no"){
- clmns = names(result)
- if(filter.unique == "remove_vjaa"){
- result$unique.def = paste(result$VGene, result$JGene, result$CDR3.IMGT.AA)
- } else if(empty.region.filter == "leader"){
- result$unique.def = paste(result$FR1.IMGT.seq, result$CDR1.IMGT.seq, result$FR2.IMGT.seq, result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
- } else if(empty.region.filter == "FR1"){
- result$unique.def = paste(result$CDR1.IMGT.seq, result$FR2.IMGT.seq, result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
- } else if(empty.region.filter == "CDR1"){
- result$unique.def = paste(result$FR2.IMGT.seq, result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
- } else if(empty.region.filter == "FR2"){
- result$unique.def = paste(result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
- }
-
- if(grepl("remove", filter.unique)){
- result = result[duplicated(result$unique.def) | duplicated(result$unique.def, fromLast=T),]
- unique.defs = data.frame(table(result$unique.def))
- unique.defs = unique.defs[unique.defs$Freq >= filter.unique.count,]
- result = result[result$unique.def %in% unique.defs$Var1,]
- }
-
- if(filter.unique != "remove_vjaa"){
- result$unique.def = paste(result$unique.def, gsub(",.*", "", result$best_match)) #keep the unique sequences that are in multiple classes, gsub so the unmatched don't have a class after it
- }
-
- result = result[!duplicated(result$unique.def),]
-}
-
-write.table(result, gsub("before_unique_filter.txt", "after_unique_filter.txt", before.unique.file), sep="\t", quote=F,row.names=F,col.names=T)
-
-filtering.steps = rbind(filtering.steps, c("After filter unique sequences", nrow(result)))
-
-print(paste("Number of sequences in result after unique filtering:", nrow(result)))
-
-if(nrow(summ) == 0){
- stop("No data remaining after filter")
-}
-
-result$best_match_class = gsub(",.*", "", result$best_match) #gsub so the unmatched don't have a class after it
-
-#result$past = ""
-#cls = unlist(strsplit(unique.type, ","))
-#for (i in 1:nrow(result)){
-# result[i,"past"] = paste(result[i,cls], collapse=":")
-#}
-
-
-
-result$past = do.call(paste, c(result[unlist(strsplit(unique.type, ","))], sep = ":"))
-
-result.matched = result[!grepl("unmatched", result$best_match),]
-result.unmatched = result[grepl("unmatched", result$best_match),]
-
-result = rbind(result.matched, result.unmatched)
-
-result = result[!(duplicated(result$past)), ]
-
-result = result[,!(names(result) %in% c("past", "best_match_class"))]
-
-print(paste("Number of sequences in result after", unique.type, "filtering:", nrow(result)))
-
-filtering.steps = rbind(filtering.steps, c("After remove duplicates based on filter", nrow(result)))
-
-unmatched = result[grepl("^unmatched", result$best_match),c("Sequence.ID", "chunk_hit_percentage", "nt_hit_percentage", "start_locations", "best_match")]
-
-print(paste("Number of rows in result:", nrow(result)))
-print(paste("Number of rows in unmatched:", nrow(unmatched)))
-
-matched.sequences = result[!grepl("^unmatched", result$best_match),]
-
-write.table(x=matched.sequences, file=gsub("merged.txt$", "filtered.txt", output), sep="\t",quote=F,row.names=F,col.names=T)
-
-matched.sequences.count = nrow(matched.sequences)
-unmatched.sequences.count = sum(grepl("^unmatched", result$best_match))
-
-filtering.steps = rbind(filtering.steps, c("Number of matched sequences", matched.sequences.count))
-filtering.steps = rbind(filtering.steps, c("Number of unmatched sequences", unmatched.sequences.count))
-filtering.steps[,2] = as.numeric(filtering.steps[,2])
-filtering.steps$perc = round(filtering.steps[,2] / input.sequence.count * 100, 2)
-
-write.table(x=filtering.steps, file=gsub("unmatched", "filtering_steps", unmatchedfile), sep="\t",quote=F,row.names=F,col.names=F)
-
-write.table(x=result, file=output, sep="\t",quote=F,row.names=F,col.names=T)
-write.table(x=unmatched, file=unmatchedfile, sep="\t",quote=F,row.names=F,col.names=T)
+args <- commandArgs(trailingOnly = TRUE)
+
+
+summaryfile = args[1]
+sequencesfile = args[2]
+mutationanalysisfile = args[3]
+mutationstatsfile = args[4]
+hotspotsfile = args[5]
+aafile = args[6]
+gene_identification_file= args[7]
+output = args[8]
+before.unique.file = args[9]
+unmatchedfile = args[10]
+method=args[11]
+functionality=args[12]
+unique.type=args[13]
+filter.unique=args[14]
+filter.unique.count=as.numeric(args[15])
+class.filter=args[16]
+empty.region.filter=args[17]
+
+print(paste("filter.unique.count:", filter.unique.count))
+
+summ = read.table(summaryfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+sequences = read.table(sequencesfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+mutationanalysis = read.table(mutationanalysisfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+mutationstats = read.table(mutationstatsfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+hotspots = read.table(hotspotsfile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+AAs = read.table(aafile, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+gene_identification = read.table(gene_identification_file, header=T, sep="\t", fill=T, stringsAsFactors=F, quote="")
+
+fix_column_names = function(df){
+ if("V.DOMAIN.Functionality" %in% names(df)){
+ names(df)[names(df) == "V.DOMAIN.Functionality"] = "Functionality"
+ print("found V.DOMAIN.Functionality, changed")
+ }
+ if("V.DOMAIN.Functionality.comment" %in% names(df)){
+ names(df)[names(df) == "V.DOMAIN.Functionality.comment"] = "Functionality.comment"
+ print("found V.DOMAIN.Functionality.comment, changed")
+ }
+ return(df)
+}
+
+fix_non_unique_ids = function(df){
+ df$Sequence.ID = paste(df$Sequence.ID, 1:nrow(df))
+ return(df)
+}
+
+summ = fix_column_names(summ)
+sequences = fix_column_names(sequences)
+mutationanalysis = fix_column_names(mutationanalysis)
+mutationstats = fix_column_names(mutationstats)
+hotspots = fix_column_names(hotspots)
+AAs = fix_column_names(AAs)
+
+if(method == "blastn"){
+ #"qseqid\tsseqid\tpident\tlength\tmismatch\tgapopen\tqstart\tqend\tsstart\tsend\tevalue\tbitscore"
+ gene_identification = gene_identification[!duplicated(gene_identification$qseqid),]
+ ref_length = data.frame(sseqid=c("ca1", "ca2", "cg1", "cg2", "cg3", "cg4", "cm"), ref.length=c(81,81,141,141,141,141,52))
+ gene_identification = merge(gene_identification, ref_length, by="sseqid", all.x=T)
+ gene_identification$chunk_hit_percentage = (gene_identification$length / gene_identification$ref.length) * 100
+ gene_identification = gene_identification[,c("qseqid", "chunk_hit_percentage", "pident", "qstart", "sseqid")]
+ colnames(gene_identification) = c("Sequence.ID", "chunk_hit_percentage", "nt_hit_percentage", "start_locations", "best_match")
+}
+
+#print("Summary analysis files columns")
+#print(names(summ))
+
+
+
+input.sequence.count = nrow(summ)
+print(paste("Number of sequences in summary file:", input.sequence.count))
+
+filtering.steps = data.frame(character(0), numeric(0))
+
+filtering.steps = rbind(filtering.steps, c("Input", input.sequence.count))
+
+filtering.steps[,1] = as.character(filtering.steps[,1])
+filtering.steps[,2] = as.character(filtering.steps[,2])
+#filtering.steps[,3] = as.numeric(filtering.steps[,3])
+
+#print("summary files columns")
+#print(names(summ))
+
+summ = merge(summ, gene_identification, by="Sequence.ID")
+
+print(paste("Number of sequences after merging with gene identification:", nrow(summ)))
+
+summ = summ[summ$Functionality != "No results",]
+
+print(paste("Number of sequences after 'No results' filter:", nrow(summ)))
+
+filtering.steps = rbind(filtering.steps, c("After 'No results' filter", nrow(summ)))
+
+if(functionality == "productive"){
+ summ = summ[summ$Functionality == "productive (see comment)" | summ$Functionality == "productive",]
+} else if (functionality == "unproductive"){
+ summ = summ[summ$Functionality == "unproductive (see comment)" | summ$Functionality == "unproductive",]
+} else if (functionality == "remove_unknown"){
+ summ = summ[summ$Functionality != "No results" & summ$Functionality != "unknown (see comment)" & summ$Functionality != "unknown",]
+}
+
+print(paste("Number of sequences after functionality filter:", nrow(summ)))
+
+filtering.steps = rbind(filtering.steps, c("After functionality filter", nrow(summ)))
+
+if(F){ #to speed up debugging
+ set.seed(1)
+ summ = summ[sample(nrow(summ), floor(nrow(summ) * 0.03)),]
+ print(paste("Number of sequences after sampling 3%:", nrow(summ)))
+
+ filtering.steps = rbind(filtering.steps, c("Number of sequences after sampling 3%", nrow(summ)))
+}
+
+print("mutation analysis files columns")
+print(names(mutationanalysis[,!(names(mutationanalysis) %in% names(summ)[-1])]))
+
+result = merge(summ, mutationanalysis[,!(names(mutationanalysis) %in% names(summ)[-1])], by="Sequence.ID")
+
+print(paste("Number of sequences after merging with mutation analysis file:", nrow(result)))
+
+#print("mutation stats files columns")
+#print(names(mutationstats[,!(names(mutationstats) %in% names(result)[-1])]))
+
+result = merge(result, mutationstats[,!(names(mutationstats) %in% names(result)[-1])], by="Sequence.ID")
+
+print(paste("Number of sequences after merging with mutation stats file:", nrow(result)))
+
+print("hotspots files columns")
+print(names(hotspots[,!(names(hotspots) %in% names(result)[-1])]))
+
+result = merge(result, hotspots[,!(names(hotspots) %in% names(result)[-1])], by="Sequence.ID")
+
+print(paste("Number of sequences after merging with hotspots file:", nrow(result)))
+
+print("sequences files columns")
+print(c("FR1.IMGT", "CDR1.IMGT", "FR2.IMGT", "CDR2.IMGT", "FR3.IMGT", "CDR3.IMGT"))
+
+sequences = sequences[,c("Sequence.ID", "FR1.IMGT", "CDR1.IMGT", "FR2.IMGT", "CDR2.IMGT", "FR3.IMGT", "CDR3.IMGT")]
+names(sequences) = c("Sequence.ID", "FR1.IMGT.seq", "CDR1.IMGT.seq", "FR2.IMGT.seq", "CDR2.IMGT.seq", "FR3.IMGT.seq", "CDR3.IMGT.seq")
+result = merge(result, sequences, by="Sequence.ID", all.x=T)
+
+print("sequences files columns")
+print("CDR3.IMGT")
+
+AAs = AAs[,c("Sequence.ID", "CDR3.IMGT")]
+names(AAs) = c("Sequence.ID", "CDR3.IMGT.AA")
+result = merge(result, AAs, by="Sequence.ID", all.x=T)
+
+print(paste("Number of sequences in result after merging with sequences:", nrow(result)))
+
+result$VGene = gsub("^Homsap ", "", result$V.GENE.and.allele)
+result$VGene = gsub("[*].*", "", result$VGene)
+result$DGene = gsub("^Homsap ", "", result$D.GENE.and.allele)
+result$DGene = gsub("[*].*", "", result$DGene)
+result$JGene = gsub("^Homsap ", "", result$J.GENE.and.allele)
+result$JGene = gsub("[*].*", "", result$JGene)
+
+splt = strsplit(class.filter, "_")[[1]]
+chunk_hit_threshold = as.numeric(splt[1])
+nt_hit_threshold = as.numeric(splt[2])
+
+higher_than=(result$chunk_hit_percentage >= chunk_hit_threshold & result$nt_hit_percentage >= nt_hit_threshold)
+
+if(!all(higher_than, na.rm=T)){ #check for no unmatched
+ result[!higher_than,"best_match"] = paste("unmatched,", result[!higher_than,"best_match"])
+}
+
+if(class.filter == "101_101"){
+ result$best_match = "all"
+}
+
+write.table(x=result, file=gsub("merged.txt$", "before_filters.txt", output), sep="\t",quote=F,row.names=F,col.names=T)
+
+print(paste("Number of empty CDR1 sequences:", sum(result$CDR1.IMGT.seq == "", na.rm=T)))
+print(paste("Number of empty FR2 sequences:", sum(result$FR2.IMGT.seq == "", na.rm=T)))
+print(paste("Number of empty CDR2 sequences:", sum(result$CDR2.IMGT.seq == "", na.rm=T)))
+print(paste("Number of empty FR3 sequences:", sum(result$FR3.IMGT.seq == "", na.rm=T)))
+
+if(empty.region.filter == "leader"){
+ result = result[result$FR1.IMGT.seq != "" & result$CDR1.IMGT.seq != "" & result$FR2.IMGT.seq != "" & result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
+} else if(empty.region.filter == "FR1"){
+ result = result[result$CDR1.IMGT.seq != "" & result$FR2.IMGT.seq != "" & result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
+} else if(empty.region.filter == "CDR1"){
+ result = result[result$FR2.IMGT.seq != "" & result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
+} else if(empty.region.filter == "FR2"){
+ result = result[result$CDR2.IMGT.seq != "" & result$FR3.IMGT.seq != "", ]
+}
+
+print(paste("After removal sequences that are missing a gene region:", nrow(result)))
+filtering.steps = rbind(filtering.steps, c("After removal sequences that are missing a gene region", nrow(result)))
+
+if(empty.region.filter == "leader"){
+ result = result[!(grepl("n|N", result$FR1.IMGT.seq) | grepl("n|N", result$FR2.IMGT.seq) | grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR1.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
+} else if(empty.region.filter == "FR1"){
+ result = result[!(grepl("n|N", result$FR2.IMGT.seq) | grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR1.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
+} else if(empty.region.filter == "CDR1"){
+ result = result[!(grepl("n|N", result$FR2.IMGT.seq) | grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
+} else if(empty.region.filter == "FR2"){
+ result = result[!(grepl("n|N", result$FR3.IMGT.seq) | grepl("n|N", result$CDR2.IMGT.seq) | grepl("n|N", result$CDR3.IMGT.seq)),]
+}
+
+print(paste("Number of sequences in result after n filtering:", nrow(result)))
+filtering.steps = rbind(filtering.steps, c("After N filter", nrow(result)))
+
+cleanup_columns = c("FR1.IMGT.Nb.of.mutations",
+ "CDR1.IMGT.Nb.of.mutations",
+ "FR2.IMGT.Nb.of.mutations",
+ "CDR2.IMGT.Nb.of.mutations",
+ "FR3.IMGT.Nb.of.mutations")
+
+for(col in cleanup_columns){
+ result[,col] = gsub("\\(.*\\)", "", result[,col])
+ result[,col] = as.numeric(result[,col])
+ result[is.na(result[,col]),] = 0
+}
+
+write.table(result, before.unique.file, sep="\t", quote=F,row.names=F,col.names=T)
+
+if(filter.unique != "no"){
+ clmns = names(result)
+ if(filter.unique == "remove_vjaa"){
+ result$unique.def = paste(result$VGene, result$JGene, result$CDR3.IMGT.AA)
+ } else if(empty.region.filter == "leader"){
+ result$unique.def = paste(result$FR1.IMGT.seq, result$CDR1.IMGT.seq, result$FR2.IMGT.seq, result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
+ } else if(empty.region.filter == "FR1"){
+ result$unique.def = paste(result$CDR1.IMGT.seq, result$FR2.IMGT.seq, result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
+ } else if(empty.region.filter == "CDR1"){
+ result$unique.def = paste(result$FR2.IMGT.seq, result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
+ } else if(empty.region.filter == "FR2"){
+ result$unique.def = paste(result$CDR2.IMGT.seq, result$FR3.IMGT.seq, result$CDR3.IMGT.seq)
+ }
+
+ if(grepl("remove", filter.unique)){
+ result = result[duplicated(result$unique.def) | duplicated(result$unique.def, fromLast=T),]
+ unique.defs = data.frame(table(result$unique.def))
+ unique.defs = unique.defs[unique.defs$Freq >= filter.unique.count,]
+ result = result[result$unique.def %in% unique.defs$Var1,]
+ }
+
+ if(filter.unique != "remove_vjaa"){
+ result$unique.def = paste(result$unique.def, gsub(",.*", "", result$best_match)) #keep the unique sequences that are in multiple classes, gsub so the unmatched don't have a class after it
+ }
+
+ result = result[!duplicated(result$unique.def),]
+}
+
+write.table(result, gsub("before_unique_filter.txt", "after_unique_filter.txt", before.unique.file), sep="\t", quote=F,row.names=F,col.names=T)
+
+filtering.steps = rbind(filtering.steps, c("After filter unique sequences", nrow(result)))
+
+print(paste("Number of sequences in result after unique filtering:", nrow(result)))
+
+if(nrow(summ) == 0){
+ stop("No data remaining after filter")
+}
+
+result$best_match_class = gsub(",.*", "", result$best_match) #gsub so the unmatched don't have a class after it
+
+#result$past = ""
+#cls = unlist(strsplit(unique.type, ","))
+#for (i in 1:nrow(result)){
+# result[i,"past"] = paste(result[i,cls], collapse=":")
+#}
+
+
+
+result$past = do.call(paste, c(result[unlist(strsplit(unique.type, ","))], sep = ":"))
+
+result.matched = result[!grepl("unmatched", result$best_match),]
+result.unmatched = result[grepl("unmatched", result$best_match),]
+
+result = rbind(result.matched, result.unmatched)
+
+result = result[!(duplicated(result$past)), ]
+
+result = result[,!(names(result) %in% c("past", "best_match_class"))]
+
+print(paste("Number of sequences in result after", unique.type, "filtering:", nrow(result)))
+
+filtering.steps = rbind(filtering.steps, c("After remove duplicates based on filter", nrow(result)))
+
+unmatched = result[grepl("^unmatched", result$best_match),c("Sequence.ID", "chunk_hit_percentage", "nt_hit_percentage", "start_locations", "best_match")]
+
+print(paste("Number of rows in result:", nrow(result)))
+print(paste("Number of rows in unmatched:", nrow(unmatched)))
+
+matched.sequences = result[!grepl("^unmatched", result$best_match),]
+
+write.table(x=matched.sequences, file=gsub("merged.txt$", "filtered.txt", output), sep="\t",quote=F,row.names=F,col.names=T)
+
+matched.sequences.count = nrow(matched.sequences)
+unmatched.sequences.count = sum(grepl("^unmatched", result$best_match))
+
+filtering.steps = rbind(filtering.steps, c("Number of matched sequences", matched.sequences.count))
+filtering.steps = rbind(filtering.steps, c("Number of unmatched sequences", unmatched.sequences.count))
+filtering.steps[,2] = as.numeric(filtering.steps[,2])
+filtering.steps$perc = round(filtering.steps[,2] / input.sequence.count * 100, 2)
+
+write.table(x=filtering.steps, file=gsub("unmatched", "filtering_steps", unmatchedfile), sep="\t",quote=F,row.names=F,col.names=F)
+
+write.table(x=result, file=output, sep="\t",quote=F,row.names=F,col.names=T)
+write.table(x=unmatched, file=unmatchedfile, sep="\t",quote=F,row.names=F,col.names=T)
diff -r 43a1aa648537 -r ba33b94637ca shm_clonality.htm
--- a/shm_clonality.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_clonality.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,144 +1,144 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
References
-
-
Gupta,
-Namita T. and Vander Heiden, Jason A. and Uduman, Mohamed and Gadala-Maria,
-Daniel and Yaari, Gur and Kleinstein, Steven H. (2015). Change-O: a toolkit for analyzing large-scale B cell
-immunoglobulin repertoire sequencing data: Table 1. In Bioinformatics, 31 (20), pp.
-3356–3358. [doi:10.1093/bioinformatics/btv359][Link]
-
-
-
-
All, IGA, IGG, IGM and IGE tabs
-
-
In
-these tabs information on the clonal relation of transcripts can be found. To
-calculate clonal relation Change-O is used (Gupta et al, PMID: 26069265).
-Transcripts are considered clonally related if they have maximal three nucleotides
-difference in their CDR3 sequence and the same first V segment (as assigned by
-IMGT). Results are represented in a table format showing the clone size and the
-number of clones or sequences with this clone size. Change-O settings used are
-the nucleotide hamming distance substitution model with
-a complete distance of maximal three. For clonal assignment the first gene
-segments were used, and the distances were not normalized. In case of
-asymmetric distances, the minimal distance was used.
-
-
-
-
Overlap
-tab
-
-
This
-tab gives information on with which (sub)classe(s) each unique analyzed region
-(based on the exact nucleotide sequence of the analyzes region and the CDR3
-nucleotide sequence) is found with. This gives information if the combination
-of the exact same nucleotide sequence of the analyzed region and the CDR3
-sequence can be found in multiple (sub)classes.
-
-
Please note that this tab is based on all
-sequences before filter unique sequences and the remove duplicates based on
-filters are applied. In this table only sequences occuring more than once are
-included.
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
References
+
+
Gupta,
+Namita T. and Vander Heiden, Jason A. and Uduman, Mohamed and Gadala-Maria,
+Daniel and Yaari, Gur and Kleinstein, Steven H. (2015). Change-O: a toolkit for analyzing large-scale B cell
+immunoglobulin repertoire sequencing data: Table 1. In Bioinformatics, 31 (20), pp.
+3356–3358. [doi:10.1093/bioinformatics/btv359][Link]
+
+
+
+
All, IGA, IGG, IGM and IGE tabs
+
+
In
+these tabs information on the clonal relation of transcripts can be found. To
+calculate clonal relation Change-O is used (Gupta et al, PMID: 26069265).
+Transcripts are considered clonally related if they have maximal three nucleotides
+difference in their CDR3 sequence and the same first V segment (as assigned by
+IMGT). Results are represented in a table format showing the clone size and the
+number of clones or sequences with this clone size. Change-O settings used are
+the nucleotide hamming distance substitution model with
+a complete distance of maximal three. For clonal assignment the first gene
+segments were used, and the distances were not normalized. In case of
+asymmetric distances, the minimal distance was used.
+
+
+
+
Overlap
+tab
+
+
This
+tab gives information on with which (sub)classe(s) each unique analyzed region
+(based on the exact nucleotide sequence of the analyzes region and the CDR3
+nucleotide sequence) is found with. This gives information if the combination
+of the exact same nucleotide sequence of the analyzed region and the CDR3
+sequence can be found in multiple (sub)classes.
+
+
Please note that this tab is based on all
+sequences before filter unique sequences and the remove duplicates based on
+filters are applied. In this table only sequences occuring more than once are
+included.
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_csr.htm
--- a/shm_csr.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_csr.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,95 +1,95 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
The
-graphs in this tab give insight into the subclass distribution of IGG and IGA
-transcripts. Human Cµ, Cα, Cγ and Cε
-constant genes are assigned using a custom script
-specifically designed for human (sub)class assignment in repertoire data as
-described in van Schouwenburg and IJspeert et al, submitted for publication. In
-this script the reference sequences for the subclasses are divided in 8
-nucleotide chunks which overlap by 4 nucleotides. These overlapping chunks are
-then individually aligned in the right order to each input sequence. The
-percentage of the chunks identified in each rearrangement is calculated in the
-‘chunk hit percentage’. Cα and Cγ
-subclasses are very homologous and only differ in a few nucleotides. To assign
-subclasses the ‘nt hit percentage’ is calculated.
-This percentage indicates how well the chunks covering the subclass specific
-nucleotide match with the different subclasses. Information
-on normal distribution of subclasses in healthy individuals of different ages
-can be found in IJspeert and van Schouwenburg et al, PMID: 27799928.
-
-
IGA
-subclass distribution
-
-
Pie
-chart showing the relative distribution of IGA1 and IGA2 transcripts in the
-sample.
-
-
IGG
-subclass distribution
-
-
Pie
-chart showing the relative distribution of IGG1, IGG2, IGG3 and IGG4
-transcripts in the sample.
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
The
+graphs in this tab give insight into the subclass distribution of IGG and IGA
+transcripts. Human Cµ, Cα, Cγ and Cε
+constant genes are assigned using a custom script
+specifically designed for human (sub)class assignment in repertoire data as
+described in van Schouwenburg and IJspeert et al, submitted for publication. In
+this script the reference sequences for the subclasses are divided in 8
+nucleotide chunks which overlap by 4 nucleotides. These overlapping chunks are
+then individually aligned in the right order to each input sequence. The
+percentage of the chunks identified in each rearrangement is calculated in the
+‘chunk hit percentage’. Cα and Cγ
+subclasses are very homologous and only differ in a few nucleotides. To assign
+subclasses the ‘nt hit percentage’ is calculated.
+This percentage indicates how well the chunks covering the subclass specific
+nucleotide match with the different subclasses. Information
+on normal distribution of subclasses in healthy individuals of different ages
+can be found in IJspeert and van Schouwenburg et al, PMID: 27799928.
+
+
IGA
+subclass distribution
+
+
Pie
+chart showing the relative distribution of IGA1 and IGA2 transcripts in the
+sample.
+
+
IGG
+subclass distribution
+
+
Pie
+chart showing the relative distribution of IGG1, IGG2, IGG3 and IGG4
+transcripts in the sample.
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_csr.r
diff -r 43a1aa648537 -r ba33b94637ca shm_csr.xml
--- a/shm_csr.xml Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_csr.xml Tue Jan 29 03:54:09 2019 -0500
@@ -1,14 +1,13 @@
-
+ numpy
+ xlrd
+ r-ggplot2
+ r-reshape2
+ r-scales
+ r-seqinr
+ r-data.table
#if str ( $filter_unique.filter_unique_select ) == "remove":
diff -r 43a1aa648537 -r ba33b94637ca shm_downloads.htm
--- a/shm_downloads.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_downloads.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,538 +1,538 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Info
-
-
The complete
-dataset:
-Allows downloading of the complete parsed data set.
-
-
The filtered
-dataset:
-Allows downloading of all parsed IMGT information of all transcripts that
-passed the chosen filter settings.
-
-
The alignment
-info on the unmatched sequences: Provides information of the subclass
-alignment of all unmatched sequences. For each sequence the chunck hit
-percentage and the nt hit percentage is shown together with the best matched
-subclass.
-
-
SHM Overview
-
-
The SHM Overview
-table as a dataset: Allows downloading of the SHM Overview
-table as a data set.
-
-
Motif data per
-sequence ID: Provides a file that contains information for each
-transcript on the number of mutations present in WA/TW and RGYW/WRCY motives.
-
-
Mutation data
-per sequence ID: Provides a file containing information
-on the number of sequences bases, the number and location of mutations and the
-type of mutations found in each transcript.
-
-
Base count for
-every sequence: links to a page showing for each transcript the
-sequence of the analysed region (as dependent on the sequence starts at filter),
-the assigned subclass and the number of sequenced A,C,G and T’s.
-
-
The data used to
-generate the percentage of mutations in AID and pol eta motives plot:
-Provides a file containing the values used to generate the percentage of
-mutations in AID and pol eta motives plot in the SHM overview tab.
-
-
The
-data used to generate the relative mutation patterns plot:
-Provides a download with the data used to generate the relative mutation
-patterns plot in the SHM overview tab.
-
-
The
-data used to generate the absolute mutation patterns plot:
-Provides a download with the data used to generate the absolute mutation
-patterns plot in the SHM overview tab.
-
-
SHM Frequency
-
-
The data
-generate the frequency scatter plot: Allows
-downloading the data used to generate the frequency scatter plot in the SHM
-frequency tab.
-
-
The data used to
-generate the frequency by class plot: Allows
-downloading the data used to generate frequency by class plot included in the
-SHM frequency tab.
-
-
The data for
-frequency by subclass: Provides information of the number and
-percentage of sequences that have 0%, 0-2%, 2-5%, 5-10%, 10-15%, 15-20%,
->20% SHM. Information is provided for each subclass.
-
-
-
-
Transition
-Tables
-
-
The data for the
-'all' transition plot: Contains the information used to
-generate the transition table for all sequences.
-
-
The data for the
-'IGA' transition plot: Contains the information used to
-generate the transition table for all IGA sequences.
-
-
The data for the
-'IGA1' transition plot: Contains the information used to
-generate the transition table for all IGA1 sequences.
-
-
The data for the
-'IGA2' transition plot: Contains the information used to
-generate the transition table for all IGA2 sequences.
-
-
The data for the
-'IGG' transition plot : Contains the information used to
-generate the transition table for all IGG sequences.
-
-
The data for the
-'IGG1' transition plot: Contains the information used to
-generate the transition table for all IGG1 sequences.
-
-
The data for the
-'IGG2' transition plot: Contains the information used to
-generate the transition table for all IGG2 sequences.
-
-
The data for the
-'IGG3' transition plot: Contains the information used to
-generate the transition table for all IGG3 sequences.
-
-
The data for the
-'IGG4' transition plot: Contains the information used to
-generate the transition table for all IGG4 sequences.
-
-
The data for the
-'IGM' transition plot : Contains the information used to
-generate the transition table for all IGM sequences.
-
-
The data for the
-'IGE' transition plot: Contains the
-information used to generate the transition table for all IGE sequences.
-
-
Antigen
-selection
-
-
AA mutation data
-per sequence ID: Provides for each transcript information on whether
-there is replacement mutation at each amino acid location (as defined by IMGT).
-For all amino acids outside of the analysed region the value 0 is given.
-
-
Presence of AA
-per sequence ID: Provides for each transcript information on which
-amino acid location (as defined by IMGT) is present. 0 is absent, 1
-is present.
-
-
The data used to
-generate the aa mutation frequency plot: Provides the
-data used to generate the aa mutation frequency plot for all sequences in the
-antigen selection tab.
-
-
The data used to
-generate the aa mutation frequency plot for IGA: Provides the
-data used to generate the aa mutation frequency plot for all IGA sequences in
-the antigen selection tab.
-
-
The data used to
-generate the aa mutation frequency plot for IGG: Provides the
-data used to generate the aa mutation frequency plot for all IGG sequences in
-the antigen selection tab.
-
-
The data used to
-generate the aa mutation frequency plot for IGM: Provides the
-data used to generate the aa mutation frequency plot for all IGM sequences in
-the antigen selection tab.
-
-
The data used to
-generate the aa mutation frequency plot for IGE: Provides the
-data used to generate the aa mutation frequency plot for all IGE sequences in
-the antigen selection tab.
-
-
Baseline PDF (http://selection.med.yale.edu/baseline/): PDF
-containing the Antigen selection (BASELINe) graph for all
-sequences.
-
-
Baseline data:
-Table output of the BASELINe analysis. Calculation of antigen selection as
-performed by BASELINe are shown for each individual sequence and the sum of all
-sequences.
-
-
Baseline IGA
-PDF:
-PDF containing the Antigen selection (BASELINe) graph for all
-sequences.
-
-
Baseline IGA
-data:
-Table output of the BASELINe analysis. Calculation of antigen selection as
-performed by BASELINe are shown for each individual IGA sequence and the sum of
-all IGA sequences.
-
-
Baseline IGG
-PDF:
-PDF containing the Antigen selection (BASELINe) graph for all IGG
-sequences.
-
-
Baseline IGG
-data:
-Table output of the BASELINe analysis. Calculation of antigen selection as
-performed by BASELINe are shown for each individual IGG sequence and the sum of
-all IGG sequences.
-
-
Baseline IGM PDF: PDF
-containing the Antigen selection (BASELINe) graph for all IGM
-sequences.
-
-
Baseline IGM
-data:
-Table output of the BASELINe analysis. Calculation of antigen selection as
-performed by BASELINe are shown for each individual IGM sequence and the sum of
-all IGM sequences.
-
-
Baseline IGE
-PDF:
-PDF containing the Antigen selection (BASELINe) graph for all IGE
-sequences.
-
-
-
Baseline IGE
-data:
-Table output of the BASELINe analysis. Calculation of antigen selection as
-performed by BASELINe are shown for each individual IGE sequence and the sum of
-all IGE sequences.
-
-
CSR
-
-
The data for the
-IGA
-subclass distribution plot : Data used for
-the generation of the IGA subclass distribution plot provided
-in the CSR tab.
-
-
The data for the
-IGA
-subclass distribution plot : Data used for the generation of the IGG
-subclass distribution plot provided in the CSR tab.
-
-
Clonal relation
-
-
Sequence overlap
-between subclasses: Link to the overlap table as provided
-under the clonality overlap tab.
-
-
The Change-O DB
-file with defined clones and subclass annotation:
-Downloads a table with the calculation of clonal relation between all
-sequences. For each individual transcript the results of the clonal assignment
-as provided by Change-O are provided. Sequences with the same number in the CLONE
-column are considered clonally related.
-
-
The Change-O DB
-defined clones summary file: Gives a summary of the total number of
-clones in all sequences and their clone size.
-
-
The Change-O DB
-file with defined clones of IGA: Downloads a table with the
-calculation of clonal relation between all IGA sequences. For each individual
-transcript the results of the clonal assignment as provided by Change-O are
-provided. Sequences with the same number in the CLONE column are considered
-clonally related.
-
-
The Change-O DB
-defined clones summary file of IGA: Gives a summary
-of the total number of clones in all IGA sequences and their clone size.
-
-
The Change-O DB
-file with defined clones of IGG: Downloads a table with the
-calculation of clonal relation between all IGG sequences. For each individual
-transcript the results of the clonal assignment as provided by Change-O are
-provided. Sequences with the same number in the CLONE column are considered
-clonally related.
-
-
The Change-O DB
-defined clones summary file of IGG: Gives a summary
-of the total number of clones in all IGG sequences and their clone size.
-
-
The Change-O DB
-file with defined clones of IGM: Downloads a table
-with the calculation of clonal relation between all IGM sequences. For each
-individual transcript the results of the clonal assignment as provided by
-Change-O are provided. Sequences with the same number in the CLONE column are
-considered clonally related.
-
-
The Change-O DB
-defined clones summary file of IGM: Gives a summary
-of the total number of clones in all IGM sequences and their clone size.
-
-
The Change-O DB
-file with defined clones of IGE: Downloads a table with the
-calculation of clonal relation between all IGE sequences. For each individual
-transcript the results of the clonal assignment as provided by Change-O are
-provided. Sequences with the same number in the CLONE column are considered
-clonally related.
-
-
The Change-O DB
-defined clones summary file of IGE: Gives a summary
-of the total number of clones in all IGE sequences and their clone size.
-
-
Filtered IMGT
-output files
-
-
An IMGT archive
-with just the matched and filtered sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGA sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all IGA
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGA1 sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all IGA1
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGA2 sequences: Downloads a .txz
-file with the same format as downloaded IMGT files that contains all IGA2
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGG sequences: Downloads a .txz
-file with the same format as downloaded IMGT files that contains all IGG
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGG1 sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all IGG1
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGG2 sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all IGG2
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGG3 sequences: Downloads a .txz
-file with the same format as downloaded IMGT files that contains all IGG3
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGG4 sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all IGG4
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGM sequences: Downloads a .txz
-file with the same format as downloaded IMGT files that contains all IGM
-sequences that have passed the chosen filter settings.
-
-
An IMGT archive
-with just the matched and filtered IGE sequences: Downloads a
-.txz file with the same format as downloaded IMGT files that contains all IGE
-sequences that have passed the chosen filter settings.
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Info
+
+
The complete
+dataset:
+Allows downloading of the complete parsed data set.
+
+
The filtered
+dataset:
+Allows downloading of all parsed IMGT information of all transcripts that
+passed the chosen filter settings.
+
+
The alignment
+info on the unmatched sequences: Provides information of the subclass
+alignment of all unmatched sequences. For each sequence the chunck hit
+percentage and the nt hit percentage is shown together with the best matched
+subclass.
+
+
SHM Overview
+
+
The SHM Overview
+table as a dataset: Allows downloading of the SHM Overview
+table as a data set.
+
+
Motif data per
+sequence ID: Provides a file that contains information for each
+transcript on the number of mutations present in WA/TW and RGYW/WRCY motives.
+
+
Mutation data
+per sequence ID: Provides a file containing information
+on the number of sequences bases, the number and location of mutations and the
+type of mutations found in each transcript.
+
+
Base count for
+every sequence: links to a page showing for each transcript the
+sequence of the analysed region (as dependent on the sequence starts at filter),
+the assigned subclass and the number of sequenced A,C,G and T’s.
+
+
The data used to
+generate the percentage of mutations in AID and pol eta motives plot:
+Provides a file containing the values used to generate the percentage of
+mutations in AID and pol eta motives plot in the SHM overview tab.
+
+
The
+data used to generate the relative mutation patterns plot:
+Provides a download with the data used to generate the relative mutation
+patterns plot in the SHM overview tab.
+
+
The
+data used to generate the absolute mutation patterns plot:
+Provides a download with the data used to generate the absolute mutation
+patterns plot in the SHM overview tab.
+
+
SHM Frequency
+
+
The data
+generate the frequency scatter plot: Allows
+downloading the data used to generate the frequency scatter plot in the SHM
+frequency tab.
+
+
The data used to
+generate the frequency by class plot: Allows
+downloading the data used to generate frequency by class plot included in the
+SHM frequency tab.
+
+
The data for
+frequency by subclass: Provides information of the number and
+percentage of sequences that have 0%, 0-2%, 2-5%, 5-10%, 10-15%, 15-20%,
+>20% SHM. Information is provided for each subclass.
+
+
+
+
Transition
+Tables
+
+
The data for the
+'all' transition plot: Contains the information used to
+generate the transition table for all sequences.
+
+
The data for the
+'IGA' transition plot: Contains the information used to
+generate the transition table for all IGA sequences.
+
+
The data for the
+'IGA1' transition plot: Contains the information used to
+generate the transition table for all IGA1 sequences.
+
+
The data for the
+'IGA2' transition plot: Contains the information used to
+generate the transition table for all IGA2 sequences.
+
+
The data for the
+'IGG' transition plot : Contains the information used to
+generate the transition table for all IGG sequences.
+
+
The data for the
+'IGG1' transition plot: Contains the information used to
+generate the transition table for all IGG1 sequences.
+
+
The data for the
+'IGG2' transition plot: Contains the information used to
+generate the transition table for all IGG2 sequences.
+
+
The data for the
+'IGG3' transition plot: Contains the information used to
+generate the transition table for all IGG3 sequences.
+
+
The data for the
+'IGG4' transition plot: Contains the information used to
+generate the transition table for all IGG4 sequences.
+
+
The data for the
+'IGM' transition plot : Contains the information used to
+generate the transition table for all IGM sequences.
+
+
The data for the
+'IGE' transition plot: Contains the
+information used to generate the transition table for all IGE sequences.
+
+
Antigen
+selection
+
+
AA mutation data
+per sequence ID: Provides for each transcript information on whether
+there is replacement mutation at each amino acid location (as defined by IMGT).
+For all amino acids outside of the analysed region the value 0 is given.
+
+
Presence of AA
+per sequence ID: Provides for each transcript information on which
+amino acid location (as defined by IMGT) is present. 0 is absent, 1
+is present.
+
+
The data used to
+generate the aa mutation frequency plot: Provides the
+data used to generate the aa mutation frequency plot for all sequences in the
+antigen selection tab.
+
+
The data used to
+generate the aa mutation frequency plot for IGA: Provides the
+data used to generate the aa mutation frequency plot for all IGA sequences in
+the antigen selection tab.
+
+
The data used to
+generate the aa mutation frequency plot for IGG: Provides the
+data used to generate the aa mutation frequency plot for all IGG sequences in
+the antigen selection tab.
+
+
The data used to
+generate the aa mutation frequency plot for IGM: Provides the
+data used to generate the aa mutation frequency plot for all IGM sequences in
+the antigen selection tab.
+
+
The data used to
+generate the aa mutation frequency plot for IGE: Provides the
+data used to generate the aa mutation frequency plot for all IGE sequences in
+the antigen selection tab.
+
+
Baseline PDF (http://selection.med.yale.edu/baseline/): PDF
+containing the Antigen selection (BASELINe) graph for all
+sequences.
+
+
Baseline data:
+Table output of the BASELINe analysis. Calculation of antigen selection as
+performed by BASELINe are shown for each individual sequence and the sum of all
+sequences.
+
+
Baseline IGA
+PDF:
+PDF containing the Antigen selection (BASELINe) graph for all
+sequences.
+
+
Baseline IGA
+data:
+Table output of the BASELINe analysis. Calculation of antigen selection as
+performed by BASELINe are shown for each individual IGA sequence and the sum of
+all IGA sequences.
+
+
Baseline IGG
+PDF:
+PDF containing the Antigen selection (BASELINe) graph for all IGG
+sequences.
+
+
Baseline IGG
+data:
+Table output of the BASELINe analysis. Calculation of antigen selection as
+performed by BASELINe are shown for each individual IGG sequence and the sum of
+all IGG sequences.
+
+
Baseline IGM PDF: PDF
+containing the Antigen selection (BASELINe) graph for all IGM
+sequences.
+
+
Baseline IGM
+data:
+Table output of the BASELINe analysis. Calculation of antigen selection as
+performed by BASELINe are shown for each individual IGM sequence and the sum of
+all IGM sequences.
+
+
Baseline IGE
+PDF:
+PDF containing the Antigen selection (BASELINe) graph for all IGE
+sequences.
+
+
+
Baseline IGE
+data:
+Table output of the BASELINe analysis. Calculation of antigen selection as
+performed by BASELINe are shown for each individual IGE sequence and the sum of
+all IGE sequences.
+
+
CSR
+
+
The data for the
+IGA
+subclass distribution plot : Data used for
+the generation of the IGA subclass distribution plot provided
+in the CSR tab.
+
+
The data for the
+IGA
+subclass distribution plot : Data used for the generation of the IGG
+subclass distribution plot provided in the CSR tab.
+
+
Clonal relation
+
+
Sequence overlap
+between subclasses: Link to the overlap table as provided
+under the clonality overlap tab.
+
+
The Change-O DB
+file with defined clones and subclass annotation:
+Downloads a table with the calculation of clonal relation between all
+sequences. For each individual transcript the results of the clonal assignment
+as provided by Change-O are provided. Sequences with the same number in the CLONE
+column are considered clonally related.
+
+
The Change-O DB
+defined clones summary file: Gives a summary of the total number of
+clones in all sequences and their clone size.
+
+
The Change-O DB
+file with defined clones of IGA: Downloads a table with the
+calculation of clonal relation between all IGA sequences. For each individual
+transcript the results of the clonal assignment as provided by Change-O are
+provided. Sequences with the same number in the CLONE column are considered
+clonally related.
+
+
The Change-O DB
+defined clones summary file of IGA: Gives a summary
+of the total number of clones in all IGA sequences and their clone size.
+
+
The Change-O DB
+file with defined clones of IGG: Downloads a table with the
+calculation of clonal relation between all IGG sequences. For each individual
+transcript the results of the clonal assignment as provided by Change-O are
+provided. Sequences with the same number in the CLONE column are considered
+clonally related.
+
+
The Change-O DB
+defined clones summary file of IGG: Gives a summary
+of the total number of clones in all IGG sequences and their clone size.
+
+
The Change-O DB
+file with defined clones of IGM: Downloads a table
+with the calculation of clonal relation between all IGM sequences. For each
+individual transcript the results of the clonal assignment as provided by
+Change-O are provided. Sequences with the same number in the CLONE column are
+considered clonally related.
+
+
The Change-O DB
+defined clones summary file of IGM: Gives a summary
+of the total number of clones in all IGM sequences and their clone size.
+
+
The Change-O DB
+file with defined clones of IGE: Downloads a table with the
+calculation of clonal relation between all IGE sequences. For each individual
+transcript the results of the clonal assignment as provided by Change-O are
+provided. Sequences with the same number in the CLONE column are considered
+clonally related.
+
+
The Change-O DB
+defined clones summary file of IGE: Gives a summary
+of the total number of clones in all IGE sequences and their clone size.
+
+
Filtered IMGT
+output files
+
+
An IMGT archive
+with just the matched and filtered sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGA sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all IGA
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGA1 sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all IGA1
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGA2 sequences: Downloads a .txz
+file with the same format as downloaded IMGT files that contains all IGA2
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGG sequences: Downloads a .txz
+file with the same format as downloaded IMGT files that contains all IGG
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGG1 sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all IGG1
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGG2 sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all IGG2
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGG3 sequences: Downloads a .txz
+file with the same format as downloaded IMGT files that contains all IGG3
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGG4 sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all IGG4
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGM sequences: Downloads a .txz
+file with the same format as downloaded IMGT files that contains all IGM
+sequences that have passed the chosen filter settings.
+
+
An IMGT archive
+with just the matched and filtered IGE sequences: Downloads a
+.txz file with the same format as downloaded IMGT files that contains all IGE
+sequences that have passed the chosen filter settings.
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_first.htm
--- a/shm_first.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_first.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,127 +1,127 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Table showing the order of each
-filtering step and the number and percentage of sequences after each filtering
-step.
-
-
Input: The
-number of sequences in the original IMGT file. This is always 100% of the
-sequences.
-
-
After "no results" filter: IMGT
-classifies sequences either as "productive", "unproductive", "unknown", or "no
-results". Here, the number and percentages of sequences that are not classified
-as "no results" are reported.
-
-
After functionality filter: The
-number and percentages of sequences that have passed the functionality filter. The
-filtering performed is dependent on the settings of the functionality filter.
-Details on the functionality filter can be found on the start page of
-the SHM&CSR pipeline.
-
-
After
-removal sequences that are missing a gene region:
-In this step all sequences that are missing a gene region (FR1, CDR1, FR2,
-CDR2, FR3) that should be present are removed from analysis. The sequence
-regions that should be present are dependent on the settings of the sequence
-starts at filter. The number and
-percentage of sequences that pass this filter step are reported.
-
-
After
-N filter: In this step all sequences that contain
-an ambiguous base (n) in the analysed region or the CDR3 are removed from the
-analysis. The analysed region is determined by the setting of the sequence
-starts at filter. The number and percentage of sequences that pass this filter
-step are reported.
-
-
After
-filter unique sequences: The number and
-percentage of sequences that pass the "filter unique sequences" filter. Details
-on this filter can be found on the start page of
-the SHM&CSR pipeline
-
-
After
-remove duplicate based on filter: The number and
-percentage of sequences that passed the remove duplicate filter. Details on the
-"remove duplicate filter based on filter" can be found on the start page of the
-SHM&CSR pipeline.
-
-
Number of matches sequences:
-The number and percentage of sequences that passed all the filters described
-above and have a (sub)class assigned.
-
-
Number
-of unmatched sequences: The number and percentage
-of sequences that passed all the filters described above and do not have
-subclass assigned.
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Table showing the order of each
+filtering step and the number and percentage of sequences after each filtering
+step.
+
+
Input: The
+number of sequences in the original IMGT file. This is always 100% of the
+sequences.
+
+
After "no results" filter: IMGT
+classifies sequences either as "productive", "unproductive", "unknown", or "no
+results". Here, the number and percentages of sequences that are not classified
+as "no results" are reported.
+
+
After functionality filter: The
+number and percentages of sequences that have passed the functionality filter. The
+filtering performed is dependent on the settings of the functionality filter.
+Details on the functionality filter can be found on the start page of
+the SHM&CSR pipeline.
+
+
After
+removal sequences that are missing a gene region:
+In this step all sequences that are missing a gene region (FR1, CDR1, FR2,
+CDR2, FR3) that should be present are removed from analysis. The sequence
+regions that should be present are dependent on the settings of the sequence
+starts at filter. The number and
+percentage of sequences that pass this filter step are reported.
+
+
After
+N filter: In this step all sequences that contain
+an ambiguous base (n) in the analysed region or the CDR3 are removed from the
+analysis. The analysed region is determined by the setting of the sequence
+starts at filter. The number and percentage of sequences that pass this filter
+step are reported.
+
+
After
+filter unique sequences: The number and
+percentage of sequences that pass the "filter unique sequences" filter. Details
+on this filter can be found on the start page of
+the SHM&CSR pipeline
+
+
After
+remove duplicate based on filter: The number and
+percentage of sequences that passed the remove duplicate filter. Details on the
+"remove duplicate filter based on filter" can be found on the start page of the
+SHM&CSR pipeline.
+
+
Number of matches sequences:
+The number and percentage of sequences that passed all the filters described
+above and have a (sub)class assigned.
+
+
Number
+of unmatched sequences: The number and percentage
+of sequences that passed all the filters described above and do not have
+subclass assigned.
+
+
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_frequency.htm
--- a/shm_frequency.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_frequency.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,87 +1,87 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
SHM
-frequency tab
-
-
Graphs
-
-
These
-graphs give insight into the level of SHM. The data represented in these graphs
-can be downloaded in the download tab. More
-information on the values found in healthy individuals of different ages can be
-found in IJspeert and van Schouwenburg et al, PMID: 27799928.
-
-
Frequency
-scatter plot
-
-
A
-dot plot showing the percentage of SHM in each transcript divided into the
-different (sub)classes. In the graph each dot
-represents an individual transcript.
-
-
Mutation
-frequency by class
-
-
A
-bar graph showing the percentage of transcripts that contain 0%, 0-2%, 2-5%,
-5-10% 10-15%, 15-20% or more than 20% SHM for each subclass.
-
-
Hanna IJspeert, Pauline A. van
-Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
-Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
-of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
-Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
SHM
+frequency tab
+
+
Graphs
+
+
These
+graphs give insight into the level of SHM. The data represented in these graphs
+can be downloaded in the download tab. More
+information on the values found in healthy individuals of different ages can be
+found in IJspeert and van Schouwenburg et al, PMID: 27799928.
+
+
Frequency
+scatter plot
+
+
A
+dot plot showing the percentage of SHM in each transcript divided into the
+different (sub)classes. In the graph each dot
+represents an individual transcript.
+
+
Mutation
+frequency by class
+
+
A
+bar graph showing the percentage of transcripts that contain 0%, 0-2%, 2-5%,
+5-10% 10-15%, 15-20% or more than 20% SHM for each subclass.
+
+
Hanna IJspeert, Pauline A. van
+Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
+Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
+of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
+Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_overview.htm
--- a/shm_overview.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_overview.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,332 +1,332 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Info
-table
-
-
This
-table contains information on different characteristics of SHM. For all
-characteristics information can be found for all sequences or only sequences of
-a certain (sub)class. All results are based on the sequences that passed the filter
-settings chosen on the start page of the SHM & CSR pipeline and only
-include details on the analysed region as determined by the setting of the
-sequence starts at filter. All data in this table can be downloaded via the
-“downloads” tab.
-
-
Mutation
-frequency:
-
-
These values
-give information on the level of SHM. More information
-on the values found in healthy individuals of different ages can be found in IJspeert
-and van Schouwenburg et al, PMID: 27799928
-
-
Number
-of mutations: Shows the number of total
-mutations / the number of sequenced bases (the % of mutated bases).
-
-
Median
-number of mutations: Shows the median % of
-SHM of all sequences.
-
-
Patterns
-of SHM:
-
-
These values
-give insights into the targeting and patterns of SHM. These values can give
-insight into the repair pathways used to repair the U:G mismatches introduced
-by AID. More information
-on the values found in healthy individuals of different ages can be found in
-IJspeert and van Schouwenburg et al, PMID: 27799928
-
-
Transitions:
-Shows the number of transition mutations / the number of total mutations (the
-percentage of mutations that are transitions). Transition mutations are C>T,
-T>C, A>G, G>A.
-
-
Transversions:
-Shows the number of transversion mutations / the number of total mutations (the
-percentage of mutations that are transitions). Transversion mutations are
-C>A, C>G, T>A, T>G, A>T, A>C, G>T, G>C.
-
-
Transitions
-at GC: Shows the number of transitions at GC locations (C>T,
-G>A) / the total number of mutations at GC locations (the percentage of
-mutations at GC locations that are transitions).
-
-
Targeting
-of GC: Shows the number of mutations at GC
-locations / the total number of mutations (the percentage of total mutations
-that are at GC locations).
-
-
Transitions
-at AT: Shows the number of transitions at AT
-locations (T>C, A>G) / the total number of mutations at AT locations (the
-percentage of mutations at AT locations that are transitions).
-
-
Targeting
-of AT: Shows the number of mutations at AT
-locations / the total number of mutations (the percentage of total mutations
-that are at AT locations).
-
-
RGYW:
-Shows
-the number of mutations that are in a RGYW motive / The number of total mutations
-(the percentage of mutations that are in a RGYW motive). RGYW motives are known to be
-preferentially targeted by AID (R=Purine,
-Y=pyrimidine, W = A or T).
-
-
WRCY:
-Shows the number of mutations
-that are in a WRCY motive / The number of
-total mutations (the percentage of mutations that are in a WRCY motive). WRCY
-motives are known to be preferentially targeted by AID (R=Purine,
-Y=pyrimidine, W = A or T).
-
-
WA:
-Shows
-the number of mutations that are in a WA motive / The number of total mutations
-(the percentage of mutations that are in a WA motive). It is described that
-polymerase eta preferentially makes errors at WA motives (W
-= A or T).
-
-
TW:
-Shows the number of mutations that are in a TW motive / The number of total mutations
-(the percentage of mutations that are in a TW motive). It is described that
-polymerase eta preferentially makes errors at TW motives (W
-= A or T).
-
-
Antigen
-selection:
-
-
These
-values give insight into antigen selection. It has been described that during
-antigen selection, there is selection against replacement mutations in the FR
-regions as these can cause instability of the B-cell receptor. In contrast
-replacement mutations in the CDR regions are important for changing the
-affinity of the B-cell receptor and therefore there is selection for this type
-of mutations. Silent mutations do not alter the amino acid sequence and
-therefore do not play a role in selection. More information on the values found
-in healthy individuals of different ages can be found in IJspeert and van
-Schouwenburg et al, PMID: 27799928
-
-
FR
-R/S: Shows the number of replacement
-mutations in the FR regions / The number of silent mutations in the FR regions
-(the number of replacement mutations in the FR regions divided by the number of
-silent mutations in the FR regions)
-
-
CDR
-R/S: Shows the number of replacement
-mutations in the CDR regions / The number of silent mutations in the CDR
-regions (the number of replacement mutations in the CDR regions divided by the
-number of silent mutations in the CDR regions)
-
-
Number
-of sequences nucleotides:
-
-
These
-values give information on the number of sequenced nucleotides.
-
-
Nt
-in FR: Shows the number of sequences bases
-that are located in the FR regions / The total number of sequenced bases (the
-percentage of sequenced bases that are present in the FR regions).
-
-
Nt
-in CDR: Shows the number of sequenced bases
-that are located in the CDR regions / The total number of sequenced bases (the percentage of
-sequenced bases that are present in the CDR regions).
-
-
A:
-Shows the total number of sequenced
-adenines / The total number of sequenced bases (the percentage of sequenced
-bases that were adenines).
-
-
C:
-Shows
-the total number of sequenced cytosines / The total number of sequenced bases
-(the percentage of sequenced bases that were cytosines).
-
-
T:
-Shows
-the total number of sequenced thymines
-/ The total number of sequenced bases (the percentage of sequenced bases that
-were thymines).
-
-
G:
-Shows the total number of sequenced guanines / The total number of
-sequenced bases (the percentage of sequenced bases that were guanines).
-
-
Graphs
-
-
These graphs visualize
-information on the patterns and targeting of SHM and thereby give information
-into the repair pathways used to repair the U:G mismatches introduced by AID. The
-data represented in these graphs can be downloaded in the download tab. More
-information on the values found in healthy individuals of different ages can be
-found in IJspeert and van Schouwenburg et al, PMID: 27799928.
-
-
-
Percentage
-of mutations in AID and pol eta motives
-
-
Visualizes
-for each
-(sub)class the percentage of mutations that are present in AID (RGYW or
-WRCY) or polymerase eta motives (WA or TW) in the different subclasses (R=Purine,
-Y=pyrimidine, W = A or T).
-
-
Relative
-mutation patterns
-
-
Visualizes
-for each (sub)class the distribution of mutations between mutations at AT
-locations and transitions or transversions at GC locations.
-
-
Absolute
-mutation patterns
-
-
Visualized
-for each (sub)class the percentage of sequenced AT and GC bases that are
-mutated. The mutations at GC bases are divided into transition and transversion
-mutations.
-
-
Hanna IJspeert, Pauline A. van
-Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
-Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
-of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
-Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Info
+table
+
+
This
+table contains information on different characteristics of SHM. For all
+characteristics information can be found for all sequences or only sequences of
+a certain (sub)class. All results are based on the sequences that passed the filter
+settings chosen on the start page of the SHM & CSR pipeline and only
+include details on the analysed region as determined by the setting of the
+sequence starts at filter. All data in this table can be downloaded via the
+“downloads” tab.
+
+
Mutation
+frequency:
+
+
These values
+give information on the level of SHM. More information
+on the values found in healthy individuals of different ages can be found in IJspeert
+and van Schouwenburg et al, PMID: 27799928
+
+
Number
+of mutations: Shows the number of total
+mutations / the number of sequenced bases (the % of mutated bases).
+
+
Median
+number of mutations: Shows the median % of
+SHM of all sequences.
+
+
Patterns
+of SHM:
+
+
These values
+give insights into the targeting and patterns of SHM. These values can give
+insight into the repair pathways used to repair the U:G mismatches introduced
+by AID. More information
+on the values found in healthy individuals of different ages can be found in
+IJspeert and van Schouwenburg et al, PMID: 27799928
+
+
Transitions:
+Shows the number of transition mutations / the number of total mutations (the
+percentage of mutations that are transitions). Transition mutations are C>T,
+T>C, A>G, G>A.
+
+
Transversions:
+Shows the number of transversion mutations / the number of total mutations (the
+percentage of mutations that are transitions). Transversion mutations are
+C>A, C>G, T>A, T>G, A>T, A>C, G>T, G>C.
+
+
Transitions
+at GC: Shows the number of transitions at GC locations (C>T,
+G>A) / the total number of mutations at GC locations (the percentage of
+mutations at GC locations that are transitions).
+
+
Targeting
+of GC: Shows the number of mutations at GC
+locations / the total number of mutations (the percentage of total mutations
+that are at GC locations).
+
+
Transitions
+at AT: Shows the number of transitions at AT
+locations (T>C, A>G) / the total number of mutations at AT locations (the
+percentage of mutations at AT locations that are transitions).
+
+
Targeting
+of AT: Shows the number of mutations at AT
+locations / the total number of mutations (the percentage of total mutations
+that are at AT locations).
+
+
RGYW:
+Shows
+the number of mutations that are in a RGYW motive / The number of total mutations
+(the percentage of mutations that are in a RGYW motive). RGYW motives are known to be
+preferentially targeted by AID (R=Purine,
+Y=pyrimidine, W = A or T).
+
+
WRCY:
+Shows the number of mutations
+that are in a WRCY motive / The number of
+total mutations (the percentage of mutations that are in a WRCY motive). WRCY
+motives are known to be preferentially targeted by AID (R=Purine,
+Y=pyrimidine, W = A or T).
+
+
WA:
+Shows
+the number of mutations that are in a WA motive / The number of total mutations
+(the percentage of mutations that are in a WA motive). It is described that
+polymerase eta preferentially makes errors at WA motives (W
+= A or T).
+
+
TW:
+Shows the number of mutations that are in a TW motive / The number of total mutations
+(the percentage of mutations that are in a TW motive). It is described that
+polymerase eta preferentially makes errors at TW motives (W
+= A or T).
+
+
Antigen
+selection:
+
+
These
+values give insight into antigen selection. It has been described that during
+antigen selection, there is selection against replacement mutations in the FR
+regions as these can cause instability of the B-cell receptor. In contrast
+replacement mutations in the CDR regions are important for changing the
+affinity of the B-cell receptor and therefore there is selection for this type
+of mutations. Silent mutations do not alter the amino acid sequence and
+therefore do not play a role in selection. More information on the values found
+in healthy individuals of different ages can be found in IJspeert and van
+Schouwenburg et al, PMID: 27799928
+
+
FR
+R/S: Shows the number of replacement
+mutations in the FR regions / The number of silent mutations in the FR regions
+(the number of replacement mutations in the FR regions divided by the number of
+silent mutations in the FR regions)
+
+
CDR
+R/S: Shows the number of replacement
+mutations in the CDR regions / The number of silent mutations in the CDR
+regions (the number of replacement mutations in the CDR regions divided by the
+number of silent mutations in the CDR regions)
+
+
Number
+of sequences nucleotides:
+
+
These
+values give information on the number of sequenced nucleotides.
+
+
Nt
+in FR: Shows the number of sequences bases
+that are located in the FR regions / The total number of sequenced bases (the
+percentage of sequenced bases that are present in the FR regions).
+
+
Nt
+in CDR: Shows the number of sequenced bases
+that are located in the CDR regions / The total number of sequenced bases (the percentage of
+sequenced bases that are present in the CDR regions).
+
+
A:
+Shows the total number of sequenced
+adenines / The total number of sequenced bases (the percentage of sequenced
+bases that were adenines).
+
+
C:
+Shows
+the total number of sequenced cytosines / The total number of sequenced bases
+(the percentage of sequenced bases that were cytosines).
+
+
T:
+Shows
+the total number of sequenced thymines
+/ The total number of sequenced bases (the percentage of sequenced bases that
+were thymines).
+
+
G:
+Shows the total number of sequenced guanines / The total number of
+sequenced bases (the percentage of sequenced bases that were guanines).
+
+
Graphs
+
+
These graphs visualize
+information on the patterns and targeting of SHM and thereby give information
+into the repair pathways used to repair the U:G mismatches introduced by AID. The
+data represented in these graphs can be downloaded in the download tab. More
+information on the values found in healthy individuals of different ages can be
+found in IJspeert and van Schouwenburg et al, PMID: 27799928.
+
+
+
Percentage
+of mutations in AID and pol eta motives
+
+
Visualizes
+for each
+(sub)class the percentage of mutations that are present in AID (RGYW or
+WRCY) or polymerase eta motives (WA or TW) in the different subclasses (R=Purine,
+Y=pyrimidine, W = A or T).
+
+
Relative
+mutation patterns
+
+
Visualizes
+for each (sub)class the distribution of mutations between mutations at AT
+locations and transitions or transversions at GC locations.
+
+
Absolute
+mutation patterns
+
+
Visualized
+for each (sub)class the percentage of sequenced AT and GC bases that are
+mutated. The mutations at GC bases are divided into transition and transversion
+mutations.
+
+
Hanna IJspeert, Pauline A. van
+Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
+Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
+of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
+Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_selection.htm
--- a/shm_selection.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_selection.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,128 +1,128 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
References
-
-
Yaari, G. and Uduman, M. and Kleinstein, S. H. (2012). Quantifying
-selection in high-throughput Immunoglobulin sequencing data sets. In Nucleic Acids Research, 40 (17),
-pp. e134–e134. [doi:10.1093/nar/gks457][Link]
-
-
Graphs
-
-
AA
-mutation frequency
-
-
For
-each class, the frequency of replacement mutations at each amino acid position
-is shown, which is calculated by dividing the number of replacement mutations
-at a particular amino acid position/the number sequences that have an amino
-acid at that particular position. Since the length of the CDR1 and CDR2 region
-is not the same for every VH gene, some amino acids positions are absent.
-Therefore we calculate the frequency using the number of amino acids present at
-that that particular location.
-
-
Antigen
-selection (BASELINe)
-
-
Shows
-the results of the analysis of antigen selection as performed using BASELINe.
-Details on the analysis performed by BASELINe can be found in Yaari et al,
-PMID: 22641856. The settings used for the analysis are:
-focused, SHM targeting model: human Tri-nucleotide, custom bounderies. The
-custom boundries are dependent on the ‘sequence starts at filter’.
-
-
Leader:
-1:26:38:55:65:104:-
-
-
FR1: 27:27:38:55:65:104:-
-
-
CDR1: 27:27:38:55:65:104:-
-
-
FR2: 27:27:38:55:65:104:-
-
-
Hanna IJspeert, Pauline A. van
-Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
-Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
-of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
-Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
References
+
+
Yaari, G. and Uduman, M. and Kleinstein, S. H. (2012). Quantifying
+selection in high-throughput Immunoglobulin sequencing data sets. In Nucleic Acids Research, 40 (17),
+pp. e134–e134. [doi:10.1093/nar/gks457][Link]
+
+
Graphs
+
+
AA
+mutation frequency
+
+
For
+each class, the frequency of replacement mutations at each amino acid position
+is shown, which is calculated by dividing the number of replacement mutations
+at a particular amino acid position/the number sequences that have an amino
+acid at that particular position. Since the length of the CDR1 and CDR2 region
+is not the same for every VH gene, some amino acids positions are absent.
+Therefore we calculate the frequency using the number of amino acids present at
+that that particular location.
+
+
Antigen
+selection (BASELINe)
+
+
Shows
+the results of the analysis of antigen selection as performed using BASELINe.
+Details on the analysis performed by BASELINe can be found in Yaari et al,
+PMID: 22641856. The settings used for the analysis are:
+focused, SHM targeting model: human Tri-nucleotide, custom bounderies. The
+custom boundries are dependent on the ‘sequence starts at filter’.
+
+
Leader:
+1:26:38:55:65:104:-
+
+
FR1: 27:27:38:55:65:104:-
+
+
CDR1: 27:27:38:55:65:104:-
+
+
FR2: 27:27:38:55:65:104:-
+
+
Hanna IJspeert, Pauline A. van
+Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
+Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
+of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
+Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca shm_transition.htm
--- a/shm_transition.htm Thu Dec 07 03:44:38 2017 -0500
+++ b/shm_transition.htm Tue Jan 29 03:54:09 2019 -0500
@@ -1,120 +1,120 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
These graphs and
-tables give insight into the targeting and patterns of SHM. This can give
-insight into the DNA repair pathways used to solve the U:G mismatches
-introduced by AID. More information on the values found in healthy individuals
-of different ages can be found in IJspeert and van Schouwenburg et al, PMID:
-27799928.
-
-
Graphs
-
-
-
Heatmap transition
-information
-
-
Heatmaps visualizing for each subclass the frequency
-of all possible substitutions. On the x-axes the original base is shown, while
-the y-axes shows the new base. The darker the shade of blue, the more frequent
-this type of substitution is occurring.
-
-
Bargraph
-transition information
-
-
Bar graph
-visualizing for each original base the distribution of substitutions into the other
-bases. A graph is included for each (sub)class.
-
-
Tables
-
-
Transition
-tables are shown for each (sub)class. All the original bases are listed
-horizontally, while the new bases are listed vertically.
-
-
Hanna IJspeert, Pauline A. van
-Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
-Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
-of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
-Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
These graphs and
+tables give insight into the targeting and patterns of SHM. This can give
+insight into the DNA repair pathways used to solve the U:G mismatches
+introduced by AID. More information on the values found in healthy individuals
+of different ages can be found in IJspeert and van Schouwenburg et al, PMID:
+27799928.
+
+
Graphs
+
+
+
Heatmap transition
+information
+
+
Heatmaps visualizing for each subclass the frequency
+of all possible substitutions. On the x-axes the original base is shown, while
+the y-axes shows the new base. The darker the shade of blue, the more frequent
+this type of substitution is occurring.
+
+
Bargraph
+transition information
+
+
Bar graph
+visualizing for each original base the distribution of substitutions into the other
+bases. A graph is included for each (sub)class.
+
+
Tables
+
+
Transition
+tables are shown for each (sub)class. All the original bases are listed
+horizontally, while the new bases are listed vertically.
+
+
Hanna IJspeert, Pauline A. van
+Schouwenburg, David van Zessen, Ingrid Pico-Knijnenburg, Gertjan J. Driessen,
+Andrew P. Stubbs, and Mirjam van der Burg (2016). Evaluation
+of the Antigen-Experienced B-Cell Receptor Repertoire in Healthy Children and
+Adults. In Frontiers in Immunolog, 7, pp. e410-410. [doi:10.3389/fimmu.2016.00410][Link]
+
+
+
+
+
+
diff -r 43a1aa648537 -r ba33b94637ca wrapper.sh