comparison report_clonality/RScript.r @ 5:bcec7bb4e089 draft

Uploaded
author davidvanzessen
date Mon, 12 Dec 2016 05:22:57 -0500
parents
children d001d0c05dbe
comparison
equal deleted inserted replaced
4:5ffd52fc35c4 5:bcec7bb4e089
1 # ---------------------- load/install packages ----------------------
2
3 if (!("gridExtra" %in% rownames(installed.packages()))) {
4 install.packages("gridExtra", repos="http://cran.xl-mirror.nl/")
5 }
6 library(gridExtra)
7 if (!("ggplot2" %in% rownames(installed.packages()))) {
8 install.packages("ggplot2", repos="http://cran.xl-mirror.nl/")
9 }
10 library(ggplot2)
11 if (!("plyr" %in% rownames(installed.packages()))) {
12 install.packages("plyr", repos="http://cran.xl-mirror.nl/")
13 }
14 library(plyr)
15
16 if (!("data.table" %in% rownames(installed.packages()))) {
17 install.packages("data.table", repos="http://cran.xl-mirror.nl/")
18 }
19 library(data.table)
20
21 if (!("reshape2" %in% rownames(installed.packages()))) {
22 install.packages("reshape2", repos="http://cran.xl-mirror.nl/")
23 }
24 library(reshape2)
25
26 if (!("lymphclon" %in% rownames(installed.packages()))) {
27 install.packages("lymphclon", repos="http://cran.xl-mirror.nl/")
28 }
29 library(lymphclon)
30
31 # ---------------------- parameters ----------------------
32
33 args <- commandArgs(trailingOnly = TRUE)
34
35 infile = args[1] #path to input file
36 outfile = args[2] #path to output file
37 outdir = args[3] #path to output folder (html/images/data)
38 clonaltype = args[4] #clonaltype definition, or 'none' for no unique filtering
39 ct = unlist(strsplit(clonaltype, ","))
40 species = args[5] #human or mouse
41 locus = args[6] # IGH, IGK, IGL, TRB, TRA, TRG or TRD
42 filterproductive = ifelse(args[7] == "yes", T, F) #should unproductive sequences be filtered out? (yes/no)
43 clonality_method = args[8]
44
45
46 # ---------------------- Data preperation ----------------------
47
48 print("Report Clonality - Data preperation")
49
50 inputdata = read.table(infile, sep="\t", header=TRUE, fill=T, comment.char="")
51
52 print(paste("nrows: ", nrow(inputdata)))
53
54 setwd(outdir)
55
56 # remove weird rows
57 inputdata = inputdata[inputdata$Sample != "",]
58
59 print(paste("nrows: ", nrow(inputdata)))
60
61 #remove the allele from the V,D and J genes
62 inputdata$Top.V.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.V.Gene)
63 inputdata$Top.D.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.D.Gene)
64 inputdata$Top.J.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.J.Gene)
65
66 print(paste("nrows: ", nrow(inputdata)))
67
68 #filter uniques
69 inputdata.removed = inputdata[NULL,]
70
71 print(paste("nrows: ", nrow(inputdata)))
72
73 inputdata$clonaltype = 1:nrow(inputdata)
74
75 #keep track of the count of sequences in samples or samples/replicates for the front page overview
76 input.sample.count = data.frame(data.table(inputdata)[, list(All=.N), by=c("Sample")])
77 input.rep.count = data.frame(data.table(inputdata)[, list(All=.N), by=c("Sample", "Replicate")])
78
79 PRODF = inputdata
80 UNPROD = inputdata
81 if(filterproductive){
82 if("Functionality" %in% colnames(inputdata)) { # "Functionality" is an IMGT column
83 #PRODF = inputdata[inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)", ]
84 PRODF = inputdata[inputdata$Functionality %in% c("productive (see comment)","productive"),]
85
86 PRODF.count = data.frame(data.table(PRODF)[, list(count=.N), by=c("Sample")])
87
88 UNPROD = inputdata[inputdata$Functionality %in% c("unproductive (see comment)","unproductive"), ]
89 } else {
90 PRODF = inputdata[inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" , ]
91 UNPROD = inputdata[!(inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" ), ]
92 }
93 }
94
95 prod.sample.count = data.frame(data.table(PRODF)[, list(Productive=.N), by=c("Sample")])
96 prod.rep.count = data.frame(data.table(PRODF)[, list(Productive=.N), by=c("Sample", "Replicate")])
97
98 unprod.sample.count = data.frame(data.table(UNPROD)[, list(Unproductive=.N), by=c("Sample")])
99 unprod.rep.count = data.frame(data.table(UNPROD)[, list(Unproductive=.N), by=c("Sample", "Replicate")])
100
101 clonalityFrame = PRODF
102
103 #remove duplicates based on the clonaltype
104 if(clonaltype != "none"){
105 clonaltype = paste(clonaltype, ",Sample", sep="") #add sample column to clonaltype, unique within samples
106 PRODF$clonaltype = do.call(paste, c(PRODF[unlist(strsplit(clonaltype, ","))], sep = ":"))
107 PRODF = PRODF[!duplicated(PRODF$clonaltype), ]
108
109 UNPROD$clonaltype = do.call(paste, c(UNPROD[unlist(strsplit(clonaltype, ","))], sep = ":"))
110 UNPROD = UNPROD[!duplicated(UNPROD$clonaltype), ]
111
112 #again for clonalityFrame but with sample+replicate
113 clonalityFrame$clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(clonaltype, ","))], sep = ":"))
114 clonalityFrame$clonality_clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(paste(clonaltype, ",Replicate", sep=""), ","))], sep = ":"))
115 clonalityFrame = clonalityFrame[!duplicated(clonalityFrame$clonality_clonaltype), ]
116 }
117
118 print("SAMPLE TABLE:")
119 print(table(PRODF$Sample))
120
121 prod.unique.sample.count = data.frame(data.table(PRODF)[, list(Productive_unique=.N), by=c("Sample")])
122 prod.unique.rep.count = data.frame(data.table(PRODF)[, list(Productive_unique=.N), by=c("Sample", "Replicate")])
123
124 unprod.unique.sample.count = data.frame(data.table(UNPROD)[, list(Unproductive_unique=.N), by=c("Sample")])
125 unprod.unique.rep.count = data.frame(data.table(UNPROD)[, list(Unproductive_unique=.N), by=c("Sample", "Replicate")])
126
127 PRODF$freq = 1
128
129 if(any(grepl(pattern="_", x=PRODF$ID))){ #the frequency can be stored in the ID with the pattern ".*_freq_.*"
130 PRODF$freq = gsub("^[0-9]+_", "", PRODF$ID)
131 PRODF$freq = gsub("_.*", "", PRODF$freq)
132 PRODF$freq = as.numeric(PRODF$freq)
133 if(any(is.na(PRODF$freq))){ #if there was an "_" in the ID, but not the frequency, go back to frequency of 1 for every sequence
134 PRODF$freq = 1
135 }
136 }
137
138
139
140 #write the complete dataset that is left over, will be the input if 'none' for clonaltype and 'no' for filterproductive
141 write.table(PRODF, "allUnique.txt", sep="\t",quote=F,row.names=F,col.names=T)
142 write.table(PRODF, "allUnique.csv", sep=",",quote=F,row.names=F,col.names=T)
143 write.table(UNPROD, "allUnproductive.csv", sep=",",quote=F,row.names=F,col.names=T)
144
145 #write the samples to a file
146 sampleFile <- file("samples.txt")
147 un = unique(inputdata$Sample)
148 un = paste(un, sep="\n")
149 writeLines(un, sampleFile)
150 close(sampleFile)
151
152 # ---------------------- Counting the productive/unproductive and unique sequences ----------------------
153
154 print("Report Clonality - counting productive/unproductive/unique")
155
156 #create the table on the overview page with the productive/unique counts per sample/replicate
157 #first for sample
158 sample.count = merge(input.sample.count, prod.sample.count, by="Sample", all.x=T)
159 sample.count$perc_prod = round(sample.count$Productive / sample.count$All * 100)
160 sample.count = merge(sample.count, prod.unique.sample.count, by="Sample", all.x=T)
161 sample.count$perc_prod_un = round(sample.count$Productive_unique / sample.count$All * 100)
162
163 sample.count = merge(sample.count , unprod.sample.count, by="Sample", all.x=T)
164 sample.count$perc_unprod = round(sample.count$Unproductive / sample.count$All * 100)
165 sample.count = merge(sample.count, unprod.unique.sample.count, by="Sample", all.x=T)
166 sample.count$perc_unprod_un = round(sample.count$Unproductive_unique / sample.count$All * 100)
167
168 #then sample/replicate
169 rep.count = merge(input.rep.count, prod.rep.count, by=c("Sample", "Replicate"), all.x=T)
170 rep.count$perc_prod = round(rep.count$Productive / rep.count$All * 100)
171 rep.count = merge(rep.count, prod.unique.rep.count, by=c("Sample", "Replicate"), all.x=T)
172 rep.count$perc_prod_un = round(rep.count$Productive_unique / rep.count$All * 100)
173
174 rep.count = merge(rep.count, unprod.rep.count, by=c("Sample", "Replicate"), all.x=T)
175 rep.count$perc_unprod = round(rep.count$Unproductive / rep.count$All * 100)
176 rep.count = merge(rep.count, unprod.unique.rep.count, by=c("Sample", "Replicate"), all.x=T)
177 rep.count$perc_unprod_un = round(rep.count$Unproductive_unique / rep.count$All * 100)
178
179 rep.count$Sample = paste(rep.count$Sample, rep.count$Replicate, sep="_")
180 rep.count = rep.count[,names(rep.count) != "Replicate"]
181
182 count = rbind(sample.count, rep.count)
183
184
185
186 write.table(x=count, file="productive_counting.txt", sep=",",quote=F,row.names=F,col.names=F)
187
188 # ---------------------- V+J+CDR3 sequence count ----------------------
189
190 VJCDR3.count = data.frame(table(clonalityFrame$Top.V.Gene, clonalityFrame$Top.J.Gene, clonalityFrame$CDR3.Seq.DNA))
191 names(VJCDR3.count) = c("Top.V.Gene", "Top.J.Gene", "CDR3.Seq.DNA", "Count")
192
193 VJCDR3.count = VJCDR3.count[VJCDR3.count$Count > 0,]
194 VJCDR3.count = VJCDR3.count[order(-VJCDR3.count$Count),]
195
196 write.table(x=VJCDR3.count, file="VJCDR3_count.txt", sep="\t",quote=F,row.names=F,col.names=T)
197
198 # ---------------------- Frequency calculation for V, D and J ----------------------
199
200 print("Report Clonality - frequency calculation V, D and J")
201
202 PRODFV = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.V.Gene")])
203 Total = ddply(PRODFV, .(Sample), function(x) data.frame(Total = sum(x$Length)))
204 PRODFV = merge(PRODFV, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
205 PRODFV = ddply(PRODFV, c("Sample", "Top.V.Gene"), summarise, relFreq= (Length*100 / Total))
206
207 PRODFD = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.D.Gene")])
208 Total = ddply(PRODFD, .(Sample), function(x) data.frame(Total = sum(x$Length)))
209 PRODFD = merge(PRODFD, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
210 PRODFD = ddply(PRODFD, c("Sample", "Top.D.Gene"), summarise, relFreq= (Length*100 / Total))
211
212 PRODFJ = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.J.Gene")])
213 Total = ddply(PRODFJ, .(Sample), function(x) data.frame(Total = sum(x$Length)))
214 PRODFJ = merge(PRODFJ, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
215 PRODFJ = ddply(PRODFJ, c("Sample", "Top.J.Gene"), summarise, relFreq= (Length*100 / Total))
216
217 # ---------------------- Setting up the gene names for the different species/loci ----------------------
218
219 print("Report Clonality - getting genes for species/loci")
220
221 Vchain = ""
222 Dchain = ""
223 Jchain = ""
224
225 if(species == "custom"){
226 print("Custom genes: ")
227 splt = unlist(strsplit(locus, ";"))
228 print(paste("V:", splt[1]))
229 print(paste("D:", splt[2]))
230 print(paste("J:", splt[3]))
231
232 Vchain = unlist(strsplit(splt[1], ","))
233 Vchain = data.frame(v.name = Vchain, chr.orderV = 1:length(Vchain))
234
235 Dchain = unlist(strsplit(splt[2], ","))
236 if(length(Dchain) > 0){
237 Dchain = data.frame(v.name = Dchain, chr.orderD = 1:length(Dchain))
238 } else {
239 Dchain = data.frame(v.name = character(0), chr.orderD = numeric(0))
240 }
241
242 Jchain = unlist(strsplit(splt[3], ","))
243 Jchain = data.frame(v.name = Jchain, chr.orderJ = 1:length(Jchain))
244
245 } else {
246 genes = read.table("genes.txt", sep="\t", header=TRUE, fill=T, comment.char="")
247
248 Vchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "V",c("IMGT.GENE.DB", "chr.order")]
249 colnames(Vchain) = c("v.name", "chr.orderV")
250 Dchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "D",c("IMGT.GENE.DB", "chr.order")]
251 colnames(Dchain) = c("v.name", "chr.orderD")
252 Jchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "J",c("IMGT.GENE.DB", "chr.order")]
253 colnames(Jchain) = c("v.name", "chr.orderJ")
254 }
255 useD = TRUE
256 if(nrow(Dchain) == 0){
257 useD = FALSE
258 cat("No D Genes in this species/locus")
259 }
260 print(paste(nrow(Vchain), "genes in V"))
261 print(paste(nrow(Dchain), "genes in D"))
262 print(paste(nrow(Jchain), "genes in J"))
263
264 # ---------------------- merge with the frequency count ----------------------
265
266 PRODFV = merge(PRODFV, Vchain, by.x='Top.V.Gene', by.y='v.name', all.x=TRUE)
267
268 PRODFD = merge(PRODFD, Dchain, by.x='Top.D.Gene', by.y='v.name', all.x=TRUE)
269
270 PRODFJ = merge(PRODFJ, Jchain, by.x='Top.J.Gene', by.y='v.name', all.x=TRUE)
271
272 # ---------------------- Create the V, D and J frequency plots and write the data.frame for every plot to a file ----------------------
273
274 print("Report Clonality - V, D and J frequency plots")
275
276 pV = ggplot(PRODFV)
277 pV = pV + geom_bar( aes( x=factor(reorder(Top.V.Gene, chr.orderV)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
278 pV = pV + xlab("Summary of V gene") + ylab("Frequency") + ggtitle("Relative frequency of V gene usage")
279 write.table(x=PRODFV, file="VFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
280
281 png("VPlot.png",width = 1280, height = 720)
282 pV
283 dev.off();
284
285 if(useD){
286 pD = ggplot(PRODFD)
287 pD = pD + geom_bar( aes( x=factor(reorder(Top.D.Gene, chr.orderD)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
288 pD = pD + xlab("Summary of D gene") + ylab("Frequency") + ggtitle("Relative frequency of D gene usage")
289 write.table(x=PRODFD, file="DFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
290
291 png("DPlot.png",width = 800, height = 600)
292 print(pD)
293 dev.off();
294 }
295
296 pJ = ggplot(PRODFJ)
297 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
298 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage")
299 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
300
301 png("JPlot.png",width = 800, height = 600)
302 pJ
303 dev.off();
304
305 pJ = ggplot(PRODFJ)
306 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
307 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage")
308 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
309
310 png("JPlot.png",width = 800, height = 600)
311 pJ
312 dev.off();
313
314 # ---------------------- Now the frequency plots of the V, D and J families ----------------------
315
316 print("Report Clonality - V, D and J family plots")
317
318 VGenes = PRODF[,c("Sample", "Top.V.Gene")]
319 VGenes$Top.V.Gene = gsub("-.*", "", VGenes$Top.V.Gene)
320 VGenes = data.frame(data.table(VGenes)[, list(Count=.N), by=c("Sample", "Top.V.Gene")])
321 TotalPerSample = data.frame(data.table(VGenes)[, list(total=sum(.SD$Count)), by=Sample])
322 VGenes = merge(VGenes, TotalPerSample, by="Sample")
323 VGenes$Frequency = VGenes$Count * 100 / VGenes$total
324 VPlot = ggplot(VGenes)
325 VPlot = VPlot + geom_bar(aes( x = Top.V.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
326 ggtitle("Distribution of V gene families") +
327 ylab("Percentage of sequences")
328 png("VFPlot.png")
329 VPlot
330 dev.off();
331 write.table(x=VGenes, file="VFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
332
333 if(useD){
334 DGenes = PRODF[,c("Sample", "Top.D.Gene")]
335 DGenes$Top.D.Gene = gsub("-.*", "", DGenes$Top.D.Gene)
336 DGenes = data.frame(data.table(DGenes)[, list(Count=.N), by=c("Sample", "Top.D.Gene")])
337 TotalPerSample = data.frame(data.table(DGenes)[, list(total=sum(.SD$Count)), by=Sample])
338 DGenes = merge(DGenes, TotalPerSample, by="Sample")
339 DGenes$Frequency = DGenes$Count * 100 / DGenes$total
340 DPlot = ggplot(DGenes)
341 DPlot = DPlot + geom_bar(aes( x = Top.D.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
342 ggtitle("Distribution of D gene families") +
343 ylab("Percentage of sequences")
344 png("DFPlot.png")
345 print(DPlot)
346 dev.off();
347 write.table(x=DGenes, file="DFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
348 }
349
350 # ---------------------- Plotting the cdr3 length ----------------------
351
352 print("Report Clonality - CDR3 length plot")
353
354 CDR3Length = data.frame(data.table(PRODF)[, list(Count=.N), by=c("Sample", "CDR3.Length.DNA")])
355 TotalPerSample = data.frame(data.table(CDR3Length)[, list(total=sum(.SD$Count)), by=Sample])
356 CDR3Length = merge(CDR3Length, TotalPerSample, by="Sample")
357 CDR3Length$Frequency = CDR3Length$Count * 100 / CDR3Length$total
358 CDR3LengthPlot = ggplot(CDR3Length)
359 CDR3LengthPlot = CDR3LengthPlot + geom_bar(aes( x = CDR3.Length.DNA, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
360 ggtitle("Length distribution of CDR3") +
361 xlab("CDR3 Length") +
362 ylab("Percentage of sequences")
363 png("CDR3LengthPlot.png",width = 1280, height = 720)
364 CDR3LengthPlot
365 dev.off()
366 write.table(x=CDR3Length, file="CDR3LengthPlot.csv", sep=",",quote=F,row.names=F,col.names=T)
367
368 # ---------------------- Plot the heatmaps ----------------------
369
370 #get the reverse order for the V and D genes
371 revVchain = Vchain
372 revDchain = Dchain
373 revVchain$chr.orderV = rev(revVchain$chr.orderV)
374 revDchain$chr.orderD = rev(revDchain$chr.orderD)
375
376 if(useD){
377 print("Report Clonality - Heatmaps VD")
378 plotVD <- function(dat){
379 if(length(dat[,1]) == 0){
380 return()
381 }
382
383 img = ggplot() +
384 geom_tile(data=dat, aes(x=factor(reorder(Top.D.Gene, chr.orderD)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) +
385 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
386 scale_fill_gradient(low="gold", high="blue", na.value="white") +
387 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
388 xlab("D genes") +
389 ylab("V Genes")
390
391 png(paste("HeatmapVD_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Dchain$v.name)), height=100+(15*length(Vchain$v.name)))
392 print(img)
393 dev.off()
394 write.table(x=acast(dat, Top.V.Gene~Top.D.Gene, value.var="Length"), file=paste("HeatmapVD_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA)
395 }
396
397 VandDCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.D.Gene", "Sample")])
398
399 VandDCount$l = log(VandDCount$Length)
400 maxVD = data.frame(data.table(VandDCount)[, list(max=max(l)), by=c("Sample")])
401 VandDCount = merge(VandDCount, maxVD, by.x="Sample", by.y="Sample", all.x=T)
402 VandDCount$relLength = VandDCount$l / VandDCount$max
403
404 cartegianProductVD = expand.grid(Top.V.Gene = Vchain$v.name, Top.D.Gene = Dchain$v.name)
405
406 completeVD = merge(VandDCount, cartegianProductVD, by.x=c("Top.V.Gene", "Top.D.Gene"), by.y=c("Top.V.Gene", "Top.D.Gene"), all=TRUE)
407
408 completeVD = merge(completeVD, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE)
409
410 completeVD = merge(completeVD, Dchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE)
411
412 fltr = is.nan(completeVD$relLength)
413 if(all(fltr)){
414 completeVD[fltr,"relLength"] = 0
415 }
416
417 VDList = split(completeVD, f=completeVD[,"Sample"])
418 lapply(VDList, FUN=plotVD)
419 }
420
421 print("Report Clonality - Heatmaps VJ")
422
423 plotVJ <- function(dat){
424 if(length(dat[,1]) == 0){
425 return()
426 }
427 cat(paste(unique(dat[3])[1,1]))
428 img = ggplot() +
429 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) +
430 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
431 scale_fill_gradient(low="gold", high="blue", na.value="white") +
432 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
433 xlab("J genes") +
434 ylab("V Genes")
435
436 png(paste("HeatmapVJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Vchain$v.name)))
437 print(img)
438 dev.off()
439 write.table(x=acast(dat, Top.V.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapVJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA)
440 }
441
442 VandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.J.Gene", "Sample")])
443
444 VandJCount$l = log(VandJCount$Length)
445 maxVJ = data.frame(data.table(VandJCount)[, list(max=max(l)), by=c("Sample")])
446 VandJCount = merge(VandJCount, maxVJ, by.x="Sample", by.y="Sample", all.x=T)
447 VandJCount$relLength = VandJCount$l / VandJCount$max
448
449 cartegianProductVJ = expand.grid(Top.V.Gene = Vchain$v.name, Top.J.Gene = Jchain$v.name)
450
451 completeVJ = merge(VandJCount, cartegianProductVJ, all.y=TRUE)
452 completeVJ = merge(completeVJ, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE)
453 completeVJ = merge(completeVJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE)
454
455 fltr = is.nan(completeVJ$relLength)
456 if(any(fltr)){
457 completeVJ[fltr,"relLength"] = 1
458 }
459
460 VJList = split(completeVJ, f=completeVJ[,"Sample"])
461 lapply(VJList, FUN=plotVJ)
462
463
464
465 if(useD){
466 print("Report Clonality - Heatmaps DJ")
467 plotDJ <- function(dat){
468 if(length(dat[,1]) == 0){
469 return()
470 }
471 img = ggplot() +
472 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.D.Gene, chr.orderD)), fill=relLength)) +
473 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
474 scale_fill_gradient(low="gold", high="blue", na.value="white") +
475 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
476 xlab("J genes") +
477 ylab("D Genes")
478
479 png(paste("HeatmapDJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Dchain$v.name)))
480 print(img)
481 dev.off()
482 write.table(x=acast(dat, Top.D.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapDJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA)
483 }
484
485
486 DandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.D.Gene", "Top.J.Gene", "Sample")])
487
488 DandJCount$l = log(DandJCount$Length)
489 maxDJ = data.frame(data.table(DandJCount)[, list(max=max(l)), by=c("Sample")])
490 DandJCount = merge(DandJCount, maxDJ, by.x="Sample", by.y="Sample", all.x=T)
491 DandJCount$relLength = DandJCount$l / DandJCount$max
492
493 cartegianProductDJ = expand.grid(Top.D.Gene = Dchain$v.name, Top.J.Gene = Jchain$v.name)
494
495 completeDJ = merge(DandJCount, cartegianProductDJ, all.y=TRUE)
496 completeDJ = merge(completeDJ, revDchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE)
497 completeDJ = merge(completeDJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE)
498
499 fltr = is.nan(completeDJ$relLength)
500 if(any(fltr)){
501 completeDJ[fltr, "relLength"] = 1
502 }
503
504 DJList = split(completeDJ, f=completeDJ[,"Sample"])
505 lapply(DJList, FUN=plotDJ)
506 }
507
508
509 # ---------------------- output tables for the circos plots ----------------------
510
511 print("Report Clonality - Circos data")
512
513 for(smpl in unique(PRODF$Sample)){
514 PRODF.sample = PRODF[PRODF$Sample == smpl,]
515
516 fltr = PRODF.sample$Top.V.Gene == ""
517 if(any(fltr, na.rm=T)){
518 PRODF.sample[fltr, "Top.V.Gene"] = "NA"
519 }
520
521 fltr = PRODF.sample$Top.D.Gene == ""
522 if(any(fltr, na.rm=T)){
523 PRODF.sample[fltr, "Top.D.Gene"] = "NA"
524 }
525
526 fltr = PRODF.sample$Top.J.Gene == ""
527 if(any(fltr, na.rm=T)){
528 PRODF.sample[fltr, "Top.J.Gene"] = "NA"
529 }
530
531 v.d = table(PRODF.sample$Top.V.Gene, PRODF.sample$Top.D.Gene)
532 v.j = table(PRODF.sample$Top.V.Gene, PRODF.sample$Top.J.Gene)
533 d.j = table(PRODF.sample$Top.D.Gene, PRODF.sample$Top.J.Gene)
534
535 write.table(v.d, file=paste(smpl, "_VD_circos.txt", sep=""), sep="\t", quote=F, row.names=T, col.names=NA)
536 write.table(v.j, file=paste(smpl, "_VJ_circos.txt", sep=""), sep="\t", quote=F, row.names=T, col.names=NA)
537 write.table(d.j, file=paste(smpl, "_DJ_circos.txt", sep=""), sep="\t", quote=F, row.names=T, col.names=NA)
538 }
539
540 # ---------------------- calculating the clonality score ----------------------
541
542 if("Replicate" %in% colnames(inputdata)) #can only calculate clonality score when replicate information is available
543 {
544 print("Report Clonality - Clonality")
545 write.table(clonalityFrame, "clonalityComplete.csv", sep=",",quote=F,row.names=F,col.names=T)
546 if(clonality_method == "boyd"){
547 samples = split(clonalityFrame, clonalityFrame$Sample, drop=T)
548
549 for (sample in samples){
550 res = data.frame(paste=character(0))
551 sample_id = unique(sample$Sample)[[1]]
552 for(replicate in unique(sample$Replicate)){
553 tmp = sample[sample$Replicate == replicate,]
554 clone_table = data.frame(table(tmp$clonaltype))
555 clone_col_name = paste("V", replicate, sep="")
556 colnames(clone_table) = c("paste", clone_col_name)
557 res = merge(res, clone_table, by="paste", all=T)
558 }
559
560 res[is.na(res)] = 0
561 infer.result = infer.clonality(as.matrix(res[,2:ncol(res)]))
562
563 print(infer.result)
564
565 write.table(data.table(infer.result[[12]]), file=paste("lymphclon_clonality_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=F)
566
567 res$type = rowSums(res[,2:ncol(res)])
568
569 coincidence.table = data.frame(table(res$type))
570 colnames(coincidence.table) = c("Coincidence Type", "Raw Coincidence Freq")
571 write.table(coincidence.table, file=paste("lymphclon_coincidences_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=T)
572 }
573 } else {
574 clonalFreq = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "clonaltype")])
575
576 #write files for every coincidence group of >1
577 samples = unique(clonalFreq$Sample)
578 for(sample in samples){
579 clonalFreqSample = clonalFreq[clonalFreq$Sample == sample,]
580 if(max(clonalFreqSample$Type) > 1){
581 for(i in 2:max(clonalFreqSample$Type)){
582 clonalFreqSampleType = clonalFreqSample[clonalFreqSample$Type == i,]
583 clonalityFrame.sub = clonalityFrame[clonalityFrame$clonaltype %in% clonalFreqSampleType$clonaltype,]
584 clonalityFrame.sub = clonalityFrame.sub[order(clonalityFrame.sub$clonaltype),]
585 write.table(clonalityFrame.sub, file=paste("coincidences_", sample, "_", i, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
586 }
587 }
588 }
589
590 clonalFreqCount = data.frame(data.table(clonalFreq)[, list(Count=.N), by=c("Sample", "Type")])
591 clonalFreqCount$realCount = clonalFreqCount$Type * clonalFreqCount$Count
592 clonalSum = data.frame(data.table(clonalFreqCount)[, list(Reads=sum(realCount)), by=c("Sample")])
593 clonalFreqCount = merge(clonalFreqCount, clonalSum, by.x="Sample", by.y="Sample")
594
595 ct = c('Type\tWeight\n2\t1\n3\t3\n4\t6\n5\t10\n6\t15')
596 tcct = textConnection(ct)
597 CT = read.table(tcct, sep="\t", header=TRUE)
598 close(tcct)
599 clonalFreqCount = merge(clonalFreqCount, CT, by.x="Type", by.y="Type", all.x=T)
600 clonalFreqCount$WeightedCount = clonalFreqCount$Count * clonalFreqCount$Weight
601
602 ReplicateReads = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "Replicate", "clonaltype")])
603 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(Reads=.N), by=c("Sample", "Replicate")])
604 clonalFreqCount$Reads = as.numeric(clonalFreqCount$Reads)
605 ReplicateReads$Reads = as.numeric(ReplicateReads$Reads)
606 ReplicateReads$squared = as.numeric(ReplicateReads$Reads * ReplicateReads$Reads)
607
608 ReplicatePrint <- function(dat){
609 write.table(dat[-1], paste("ReplicateReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
610 }
611
612 ReplicateSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"])
613 lapply(ReplicateSplit, FUN=ReplicatePrint)
614
615 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(ReadsSum=sum(as.numeric(Reads)), ReadsSquaredSum=sum(as.numeric(squared))), by=c("Sample")])
616 clonalFreqCount = merge(clonalFreqCount, ReplicateReads, by.x="Sample", by.y="Sample", all.x=T)
617
618 ReplicateSumPrint <- function(dat){
619 write.table(dat[-1], paste("ReplicateSumReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
620 }
621
622 ReplicateSumSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"])
623 lapply(ReplicateSumSplit, FUN=ReplicateSumPrint)
624
625 clonalFreqCountSum = data.frame(data.table(clonalFreqCount)[, list(Numerator=sum(WeightedCount, na.rm=T)), by=c("Sample")])
626 clonalFreqCount = merge(clonalFreqCount, clonalFreqCountSum, by.x="Sample", by.y="Sample", all.x=T)
627 clonalFreqCount$ReadsSum = as.numeric(clonalFreqCount$ReadsSum) #prevent integer overflow
628 clonalFreqCount$Denominator = (((clonalFreqCount$ReadsSum * clonalFreqCount$ReadsSum) - clonalFreqCount$ReadsSquaredSum) / 2)
629 clonalFreqCount$Result = (clonalFreqCount$Numerator + 1) / (clonalFreqCount$Denominator + 1)
630
631 ClonalityScorePrint <- function(dat){
632 write.table(dat$Result, paste("ClonalityScore_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
633 }
634
635 clonalityScore = clonalFreqCount[c("Sample", "Result")]
636 clonalityScore = unique(clonalityScore)
637
638 clonalityScoreSplit = split(clonalityScore, f=clonalityScore[,"Sample"])
639 lapply(clonalityScoreSplit, FUN=ClonalityScorePrint)
640
641 clonalityOverview = clonalFreqCount[c("Sample", "Type", "Count", "Weight", "WeightedCount")]
642
643
644
645 ClonalityOverviewPrint <- function(dat){
646 dat = dat[order(dat[,2]),]
647 write.table(dat[-1], paste("ClonalityOverView_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
648 }
649
650 clonalityOverviewSplit = split(clonalityOverview, f=clonalityOverview$Sample)
651 lapply(clonalityOverviewSplit, FUN=ClonalityOverviewPrint)
652 }
653 }
654
655 bak = PRODF
656
657 imgtcolumns = c("X3V.REGION.trimmed.nt.nb","P3V.nt.nb", "N1.REGION.nt.nb", "P5D.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "P3D.nt.nb", "N2.REGION.nt.nb", "P5J.nt.nb", "X5J.REGION.trimmed.nt.nb", "X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb")
658 if(all(imgtcolumns %in% colnames(inputdata)))
659 {
660 print("found IMGT columns, running junction analysis")
661
662 if(locus %in% c("IGK","IGL", "TRA", "TRG")){
663 print("VJ recombination, no filtering on absent D")
664 } else {
665 print("VDJ recombination, using N column for junction analysis")
666 fltr = nchar(PRODF$Top.D.Gene) < 4
667 print(paste("Removing", sum(fltr), "sequences without a identified D"))
668 PRODF = PRODF[!fltr,]
669 }
670
671
672 #ensure certain columns are in the data (files generated with older versions of IMGT Loader)
673 col.checks = c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb")
674 for(col.check in col.checks){
675 if(!(col.check %in% names(PRODF))){
676 print(paste(col.check, "not found adding new column"))
677 if(nrow(PRODF) > 0){ #because R is anoying...
678 PRODF[,col.check] = 0
679 } else {
680 PRODF = cbind(PRODF, data.frame(N3.REGION.nt.nb=numeric(0), N4.REGION.nt.nb=numeric(0)))
681 }
682 if(nrow(UNPROD) > 0){
683 UNPROD[,col.check] = 0
684 } else {
685 UNPROD = cbind(UNPROD, data.frame(N3.REGION.nt.nb=numeric(0), N4.REGION.nt.nb=numeric(0)))
686 }
687 }
688 }
689
690 num_median = function(x, na.rm=T) { as.numeric(median(x, na.rm=na.rm)) }
691
692 newData = data.frame(data.table(PRODF)[,list(unique=.N,
693 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
694 P1=mean(.SD$P3V.nt.nb, na.rm=T),
695 N1=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
696 P2=mean(.SD$P5D.nt.nb, na.rm=T),
697 DEL.DH=mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
698 DH.DEL=mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
699 P3=mean(.SD$P3D.nt.nb, na.rm=T),
700 N2=mean(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
701 P4=mean(.SD$P5J.nt.nb, na.rm=T),
702 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
703 Total.Del=mean(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
704 Total.N=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
705 Total.P=mean(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
706 Median.CDR3.l=median(.SD$CDR3.Length.DNA)),
707 by=c("Sample")])
708 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
709 write.table(newData, "junctionAnalysisProd_mean.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F)
710
711 newData = data.frame(data.table(PRODF)[,list(unique=.N,
712 VH.DEL=num_median(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
713 P1=num_median(.SD$P3V.nt.nb, na.rm=T),
714 N1=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
715 P2=num_median(.SD$P5D.nt.nb, na.rm=T),
716 DEL.DH=num_median(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
717 DH.DEL=num_median(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
718 P3=num_median(.SD$P3D.nt.nb, na.rm=T),
719 N2=num_median(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
720 P4=num_median(.SD$P5J.nt.nb, na.rm=T),
721 DEL.JH=num_median(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
722 Total.Del=num_median(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
723 Total.N=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
724 Total.P=num_median(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
725 Median.CDR3.l=median(.SD$CDR3.Length.DNA)),
726 by=c("Sample")])
727 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
728 write.table(newData, "junctionAnalysisProd_median.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F)
729
730 newData = data.frame(data.table(UNPROD)[,list(unique=.N,
731 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
732 P1=mean(.SD$P3V.nt.nb, na.rm=T),
733 N1=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
734 P2=mean(.SD$P5D.nt.nb, na.rm=T),
735 DEL.DH=mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
736 DH.DEL=mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
737 P3=mean(.SD$P3D.nt.nb, na.rm=T),
738 N2=mean(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
739 P4=mean(.SD$P5J.nt.nb, na.rm=T),
740 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
741 Total.Del=mean(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
742 Total.N=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
743 Total.P=mean(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
744 Median.CDR3.l=median(.SD$CDR3.Length.DNA)),
745 by=c("Sample")])
746 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
747 write.table(newData, "junctionAnalysisUnProd_mean.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F)
748
749 newData = data.frame(data.table(UNPROD)[,list(unique=.N,
750 VH.DEL=num_median(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
751 P1=num_median(.SD$P3V.nt.nb, na.rm=T),
752 N1=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
753 P2=num_median(.SD$P5D.nt.nb, na.rm=T),
754 DEL.DH=num_median(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
755 DH.DEL=num_median(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
756 P3=num_median(.SD$P3D.nt.nb, na.rm=T),
757 N2=num_median(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
758 P4=num_median(.SD$P5J.nt.nb, na.rm=T),
759 DEL.JH=num_median(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
760 Total.Del=num_median(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
761 Total.N=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
762 Total.P=num_median(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
763 Median.CDR3.l=median(.SD$CDR3.Length.DNA)),
764 by=c("Sample")])
765
766 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
767 write.table(newData, "junctionAnalysisUnProd_median.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F)
768 }
769
770 PRODF = bak
771
772
773 # ---------------------- D reading frame ----------------------
774
775 D.REGION.reading.frame = PRODF$D.REGION.reading.frame
776
777 D.REGION.reading.frame[is.na(D.REGION.reading.frame)] = "No D"
778
779 D.REGION.reading.frame = data.frame(table(D.REGION.reading.frame))
780
781 write.table(D.REGION.reading.frame, "DReadingFrame.csv" , sep="\t",quote=F,row.names=F,col.names=T)
782
783 D.REGION.reading.frame = ggplot(D.REGION.reading.frame)
784 D.REGION.reading.frame = D.REGION.reading.frame + geom_bar(aes( x = D.REGION.reading.frame, y = Freq), stat='identity', position='dodge' ) + ggtitle("D reading frame") + xlab("Frequency") + ylab("Frame")
785
786 png("DReadingFrame.png")
787 D.REGION.reading.frame
788 dev.off()
789
790
791
792
793 # ---------------------- AA composition in CDR3 ----------------------
794
795 AACDR3 = PRODF[,c("Sample", "CDR3.Seq")]
796
797 TotalPerSample = data.frame(data.table(AACDR3)[, list(total=sum(nchar(as.character(.SD$CDR3.Seq)))), by=Sample])
798
799 AAfreq = list()
800
801 for(i in 1:nrow(TotalPerSample)){
802 sample = TotalPerSample$Sample[i]
803 AAfreq[[i]] = data.frame(table(unlist(strsplit(as.character(AACDR3[AACDR3$Sample == sample,c("CDR3.Seq")]), ""))))
804 AAfreq[[i]]$Sample = sample
805 }
806
807 AAfreq = ldply(AAfreq, data.frame)
808 AAfreq = merge(AAfreq, TotalPerSample, by="Sample", all.x = T)
809 AAfreq$freq_perc = as.numeric(AAfreq$Freq / AAfreq$total * 100)
810
811
812 AAorder = read.table(sep="\t", header=TRUE, text="order.aa\tAA\n1\tR\n2\tK\n3\tN\n4\tD\n5\tQ\n6\tE\n7\tH\n8\tP\n9\tY\n10\tW\n11\tS\n12\tT\n13\tG\n14\tA\n15\tM\n16\tC\n17\tF\n18\tL\n19\tV\n20\tI")
813 AAfreq = merge(AAfreq, AAorder, by.x='Var1', by.y='AA', all.x=TRUE)
814
815 AAfreq = AAfreq[!is.na(AAfreq$order.aa),]
816
817 AAfreqplot = ggplot(AAfreq)
818 AAfreqplot = AAfreqplot + geom_bar(aes( x=factor(reorder(Var1, order.aa)), y = freq_perc, fill = Sample), stat='identity', position='dodge' )
819 AAfreqplot = AAfreqplot + annotate("rect", xmin = 0.5, xmax = 2.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2)
820 AAfreqplot = AAfreqplot + annotate("rect", xmin = 3.5, xmax = 4.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2)
821 AAfreqplot = AAfreqplot + annotate("rect", xmin = 5.5, xmax = 6.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2)
822 AAfreqplot = AAfreqplot + annotate("rect", xmin = 6.5, xmax = 7.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2)
823 AAfreqplot = AAfreqplot + ggtitle("Amino Acid Composition in the CDR3") + xlab("Amino Acid, from Hydrophilic (left) to Hydrophobic (right)") + ylab("Percentage")
824
825 png("AAComposition.png",width = 1280, height = 720)
826 AAfreqplot
827 dev.off()
828 write.table(AAfreq, "AAComposition.csv" , sep=",",quote=F,na="-",row.names=F,col.names=T)
829
830