Mercurial > repos > ecology > vigiechiro_bilanenrichirp
comparison BilanEnrichiRP.R @ 1:be4e28da3919 draft
planemo upload for repository https://github.com/galaxyecology/tools-ecology/tools/vigiechiro commit 7ef0e58cbcbf41088e359f00b6c86504c773c271
author | ecology |
---|---|
date | Fri, 26 Apr 2019 12:17:22 -0400 |
parents | c55e09a8b4c8 |
children | 5ad79c210eb9 |
comparison
equal
deleted
inserted
replaced
0:c55e09a8b4c8 | 1:be4e28da3919 |
---|---|
1 #!/usr/bin/env Rscript | |
2 | |
3 args <- commandArgs(trailingOnly = TRUE) | |
4 | |
1 suppressMessages(library(data.table)) | 5 suppressMessages(library(data.table)) |
2 suppressMessages(library(DT)) | 6 suppressMessages(library(DT)) |
3 suppressMessages(library(htmlwidgets)) | 7 suppressMessages(library(htmlwidgets)) |
4 | 8 |
5 args <- commandArgs(trailingOnly = TRUE) | 9 EchelleErreur=c("","POSSIBLE","PROBABLE","SUR") |
6 EchelleErreur=c("NA","POSSIBLE","PROBABLE","SUR") | |
7 EchelleNumErreur=c(99,50,10,1) | 10 EchelleNumErreur=c(99,50,10,1) |
8 | 11 |
9 | 12 IdC2=fread(args[1],encoding="UTF-8") |
10 IdC2=fread(args[1]) | |
11 refRP=fread(args[2]) | |
12 GroupList=fread(args[3]) | |
13 | 13 |
14 | 14 |
15 if(substr(IdC2$`nom du fichier`[1],2,2)!="i") | 15 if(substr(IdC2$`nom du fichier`[1],2,2)!="i") |
16 { | 16 { |
17 stop("Protocole non conforme, ce script doit etre lance pour un protocole Routier ou Pedestre",call.=FALSE) | 17 # print("Protocole non conforme, ce script doit etre lance uniquement pour un protocole Routier ou Pedestre") |
18 } | 18 print("Wrong protocol, please only use this tool for a \'Pedestre\' or \'Routier\' protocol.") |
19 | 19 }else{ |
20 Routier=grepl("-",substr(IdC2$`nom du fichier`[1],4,7)) | 20 |
21 #compute error risk by species (minimum error among files) | 21 refRP=fread(args[2],encoding="UTF-8") |
22 #to be replaced by glm outputs if I'll have time | 22 GroupList=fread(args[3],encoding="Latin-1") |
23 RisqueErreurT=aggregate(IdC2$IdProb,by=list(IdC2$IdExtrap),FUN=function(x) ceiling((1-max(x-0.0001))*100)) | 23 |
24 #barplot(RisqueErreurT$x,names.arg=RisqueErreurT$Group.1,las=2) | 24 IdC2$ConfV[is.na(IdC2$ConfV)]="" |
25 #compute error risk accoring to observer/validator (a little dirty because it relies on alphabetical order of confidence classes: POSSIBLE < PROBABLE < SUR) | 25 |
26 RisqueErreurOV0=match(IdC2$ConfV,EchelleErreur) | 26 |
27 RisqueErreurOV=aggregate(RisqueErreurOV0,by=list(IdC2$IdExtrap) | 27 Routier=grepl("-",substr(IdC2$`nom du fichier`[1],4,7)) |
28 ,FUN=max) | 28 #compute error risk by species (minimum error among files) |
29 RisqueErreurOV2=EchelleNumErreur[RisqueErreurOV$x] | 29 #to be replaced by glm outputs if I'll have time |
30 #compute minimum error risk between man and machine | 30 RisqueErreurT=aggregate(IdC2$IdProb,by=list(IdC2$IdExtrap) |
31 RisqueErreur=pmin(RisqueErreurT$x,RisqueErreurOV2,na.rm=TRUE) | 31 ,FUN=function(x) round((1-max(x))*100)) |
32 | 32 barplot(RisqueErreurT$x,names.arg=RisqueErreurT$Group.1,las=2) |
33 #compute number of files validated per species | 33 #compute error risk accoring to observer/validator (a little dirty because it relies on alphabetical order of confidence classes: POSSIBLE < PROBABLE < SUR) |
34 FichValid=aggregate(IdC2$IdV,by=list(IdC2$IdExtrap,IdC2$'nom du fichier') | 34 RisqueErreurOV0=match(IdC2$ConfV,EchelleErreur) |
35 ,FUN=function(x) sum(!is.na(x))) | 35 RisqueErreurOV=aggregate(RisqueErreurOV0,by=list(IdC2$IdExtrap) |
36 NbValid2=aggregate(FichValid$x,by=list(FichValid$Group.1),FUN=function(x) sum(x>0)) | 36 ,FUN=max) |
37 | 37 RisqueErreurOV2=EchelleNumErreur[RisqueErreurOV$x] |
38 DiffC50=vector() # to store the median of confidence difference between unvalidated records and validated ones | 38 #compute minimum error risk between man and machine |
39 DiffT50=vector() # to store the median of time difference between unvalidated records and validated ones | 39 RisqueErreur=pmin(RisqueErreurT$x,RisqueErreurOV2) |
40 for (j in 1:nlevels(as.factor(IdC2$IdExtrap))) | 40 |
41 { | 41 #compute number of files validated per species |
42 IdSp=subset(IdC2 | 42 FichValid=aggregate(IdC2$IdV,by=list(IdC2$IdExtrap,IdC2$'nom du fichier') |
43 ,IdC2$IdExtrap==levels(as.factor(IdC2$IdExtrap))[j]) | 43 ,FUN=function(x) sum(x!="")) |
44 IdSp$IdProb[is.na(IdSp$IdProb)]=0 | 44 NbValid2=aggregate(FichValid$x,by=list(FichValid$Group.1),FUN=function(x) sum(x>0)) |
45 IdSp=IdSp[order(IdSp$IdProb),] | 45 |
46 IdSpV=subset(IdSp,!is.na(IdSp$IdV)) | 46 DiffC50=vector() # to store the median of confidence difference between unvalidated records and validated ones |
47 if(nrow(IdSpV)>0) | 47 DiffT50=vector() # to store the median of time difference between unvalidated records and validated ones |
48 { | 48 for (j in 1:nlevels(as.factor(IdC2$IdExtrap))) |
49 cuts <- c(-Inf, IdSpV$IdProb[-1]-diff(IdSpV$IdProb)/2, Inf) | 49 { |
50 CorrC=findInterval(IdSp$IdProb, cuts) | 50 IdSp=subset(IdC2 |
51 CorrC2=IdSpV$IdProb[CorrC] | 51 ,IdC2$IdExtrap==levels(as.factor(IdC2$IdExtrap))[j]) |
52 DiffC=abs(IdSp$IdProb-CorrC2) | 52 IdSp$IdProb[is.na(IdSp$IdProb)]=0 |
53 DiffC50=c(DiffC50,median(DiffC)) | 53 IdSp=IdSp[order(IdSp$IdProb),] |
54 | 54 IdSpV=subset(IdSp,IdSp$IdV!="") |
55 IdSp=IdSp[order(IdSp$TimeNum),] | 55 if(nrow(IdSpV)>0) |
56 IdSpV=subset(IdSp,!is.na(IdSp$IdV)) | 56 { |
57 cuts <- c(-Inf, IdSpV$TimeNum[-1]-diff(IdSpV$TimeNum)/2, Inf) | 57 cuts <- c(-Inf, IdSpV$IdProb[-1]-diff(IdSpV$IdProb)/2, Inf) |
58 CorrT=findInterval(IdSp$TimeNum, cuts) | 58 CorrC=findInterval(IdSp$IdProb, cuts) |
59 CorrT2=IdSpV$TimeNum[CorrT] | 59 CorrC2=IdSpV$IdProb[CorrC] |
60 DiffT=abs(IdSp$TimeNum-CorrT2) | 60 DiffC=abs(IdSp$IdProb-CorrC2) |
61 DiffT50=c(DiffT50,median(DiffT)) | 61 DiffC50=c(DiffC50,median(DiffC)) |
62 | |
63 IdSp=IdSp[order(IdSp$TimeNum),] | |
64 IdSpV=subset(IdSp,IdSp$IdV!="") | |
65 cuts <- c(-Inf, IdSpV$TimeNum[-1]-diff(IdSpV$TimeNum)/2, Inf) | |
66 CorrT=findInterval(IdSp$TimeNum, cuts) | |
67 CorrT2=IdSpV$TimeNum[CorrT] | |
68 DiffT=abs(IdSp$TimeNum-CorrT2) | |
69 DiffT50=c(DiffT50,median(DiffT)) | |
70 }else{ | |
71 DiffC50=c(DiffC50,Inf) | |
72 DiffT50=c(DiffT50,Inf) | |
73 } | |
74 } | |
75 #compute an index of validation effort per species | |
76 EffortV=1/DiffC50/DiffT50 | |
77 EffortClass=(EffortV>0.0005)+(EffortV>0.005)+RisqueErreurOV$x | |
78 #cbind(RisqueErreurOV,EffortV,DiffC50,DiffT50) | |
79 barplot(EffortClass-1,names.arg=NbValid2$Group.1,las=2) | |
80 ClassEffortV=c("-","FAIBLE","SUFFISANT","SUFFISANT","FORT","FORT") | |
81 EffortClassMot=ClassEffortV[EffortClass] | |
82 | |
83 | |
84 #compare activity / reference frame | |
85 FileInfo=as.data.table(tstrsplit(IdC2$`nom du fichier`,"-")) | |
86 IdC2$Tron=FileInfo$V4 | |
87 | |
88 MicTempsInfo=as.data.table(tstrsplit(as.data.frame(FileInfo)[,(ncol(FileInfo))],"_")) | |
89 MicDroit=(as.data.frame(MicTempsInfo)[,(ncol(MicTempsInfo)-2)]=="1") | |
90 IdC2$MicDroit=MicDroit | |
91 | |
92 testTempsFin=aggregate(IdC2$temps_fin,by=list(MicDroit),FUN=max) | |
93 testTempsFin$Direct=(testTempsFin$x>0.5) | |
94 testTF2=sum((testTempsFin$x>0.5)) | |
95 if(testTF2>1){stop("Probleme stereo : les 2 canaux semblent etre en enregistrement direct")} | |
96 IdC2M=merge(IdC2,testTempsFin,by.x="MicDroit",by.y="Group.1") | |
97 | |
98 ActMoy=aggregate(IdC2$`nom du fichier` | |
99 ,by=list(IdC2M$IdExtrap,IdC2M$Direct),FUN=length) | |
100 ListSpref=match(levels(as.factor(ActMoy$Group.1)),refRP$Espece) | |
101 Subref=refRP[ListSpref] | |
102 if(Routier) | |
103 { | |
104 Subref=Subref[,c(1:17)] | |
62 }else{ | 105 }else{ |
63 DiffC50=c(DiffC50,Inf) | 106 Subref=Subref[,c(1,18:33)] |
64 DiffT50=c(DiffT50,Inf) | 107 } |
65 } | 108 QualifActE=vector() |
66 } | 109 QualifActD=vector() |
67 #compute an index of validation effort per species | 110 |
68 EffortV=1/DiffC50/DiffT50 | 111 for (k in 1:nlevels(as.factor(ActMoy$Group.1))) |
69 EffortClass=(EffortV>0.0005)+(EffortV>0.005)+RisqueErreurOV$x | 112 { |
70 #cbind(RisqueErreurOV,EffortV,DiffC50,DiffT50) | 113 Actsub=subset(ActMoy,ActMoy$Group.1==levels(as.factor(ActMoy$Group.1))[k]) |
71 #barplot(EffortClass-1,names.arg=NbValid2$Group.1,las=2) | 114 if(is.na(Subref[k,2])) |
72 ClassEffortV=c("-","FAIBLE","SUFFISANT","SUFFISANT","FORT","FORT") | 115 { |
73 EffortClassMot=ClassEffortV[EffortClass] | 116 QualifActE=c(QualifActE,NA) |
74 | 117 QualifActD=c(QualifActD,NA) |
75 | 118 }else{ |
76 #compare activity / reference frame | 119 ActE=subset(Actsub,Actsub$Group.2==F) |
77 FileInfo=as.data.table(tstrsplit(IdC2$`nom du fichier`,"-")) | 120 if(nrow(ActE)==0) |
78 IdC2$Tron=FileInfo$V4 | 121 { |
79 | 122 QualifActE=c(QualifActE,NA) |
80 MicTempsInfo=as.data.table(tstrsplit(as.data.frame(FileInfo)[,(ncol(FileInfo))],"_")) | 123 |
81 MicDroit=(as.data.frame(MicTempsInfo)[,(ncol(MicTempsInfo)-2)]=="1") | 124 }else{ |
82 IdC2$MicDroit=MicDroit | 125 cuts=cbind(-Inf,as.numeric(Subref[k,6]),as.numeric(Subref[k,7]) |
83 | 126 ,as.numeric(Subref[k,8]),Inf) |
84 testTempsFin=aggregate(IdC2$temps_fin,by=list(MicDroit),FUN=max) | 127 QualifActE=c(QualifActE,findInterval(ActE$x,cuts,left.open=T)) |
85 testTempsFin$Direct=(testTempsFin$x>0.5) | 128 } |
86 testTF2=sum((testTempsFin$x>0.5)) | 129 ActD=subset(Actsub,Actsub$Group.2==T) |
87 if(testTF2>1){stop("Probleme stereo : les 2 canaux semblent etre en enregistrement direct")} | 130 if(nrow(ActD)==0) |
88 IdC2M=merge(IdC2,testTempsFin,by.x="MicDroit",by.y="Group.1") | 131 { |
89 | 132 QualifActD=c(QualifActD,NA) |
90 ActMoy=aggregate(IdC2$`nom du fichier` | 133 |
91 ,by=list(IdC2M$IdExtrap,IdC2M$Direct),FUN=length) | 134 }else{ |
92 ListSpref=match(levels(as.factor(ActMoy$Group.1)),refRP$Espece) | 135 cuts=cbind(-Inf,as.numeric(Subref[k,14]),as.numeric(Subref[k,15]) |
93 Subref=refRP[ListSpref] | 136 ,as.numeric(Subref[k,16]),Inf) |
94 if(Routier) | 137 QualifActD=c(QualifActD,findInterval(ActD$x,cuts,left.open=T)) |
95 { | 138 } |
96 Subref=Subref[,c(1:17)] | |
97 }else{ | |
98 Subref=Subref[,c(1,18:33)] | |
99 } | |
100 QualifActE=vector() | |
101 QualifActD=vector() | |
102 | |
103 for (k in 1:nlevels(as.factor(ActMoy$Group.1))) | |
104 { | |
105 Actsub=subset(ActMoy,ActMoy$Group.1==levels(as.factor(ActMoy$Group.1))[k]) | |
106 if(is.na(Subref[k,2])) | |
107 { | |
108 QualifActE=c(QualifActE,NA) | |
109 QualifActD=c(QualifActD,NA) | |
110 }else{ | |
111 ActE=subset(Actsub,Actsub$Group.2==F) | |
112 if(nrow(ActE)==0) | |
113 { | |
114 QualifActE=c(QualifActE,NA) | |
115 | 139 |
116 }else{ | 140 } |
117 cuts=cbind(-Inf,as.numeric(Subref[k,6]),as.numeric(Subref[k,7]) | 141 } |
118 ,as.numeric(Subref[k,8]),Inf) | 142 ClassAct=c("FAIBLE","MODEREE","FORTE","TRES FORTE") |
119 QualifActE=c(QualifActE,findInterval(ActE$x,cuts,left.open=T)) | 143 QualifActMotE=ClassAct[QualifActE] |
120 } | 144 QualifActMotD=ClassAct[QualifActD] |
121 ActD=subset(Actsub,Actsub$Group.2==T) | 145 |
122 if(nrow(ActD)==0) | 146 #compute activity by nights (to be completed) |
123 { | 147 #ActNuit=aggregate(IdC2M$`nom du fichier`,by=list(IdC2M$DateNuit,IdC2M$IdExtrap),FUN=length) |
124 QualifActD=c(QualifActD,NA) | 148 ActED=dcast(data=ActMoy,formula=Group.1~Group.2,value=x) |
125 | 149 ActED[is.na(ActED)]=0 |
126 }else{ | 150 #organize the csv summary |
127 cuts=cbind(-Inf,as.numeric(Subref[k,14]),as.numeric(Subref[k,15]) | 151 SummPart0=cbind(Esp=levels(as.factor(IdC2M$IdExtrap)) |
128 ,as.numeric(Subref[k,16]),Inf) | 152 ,RisqueErreur,NbValid=NbValid2$x,EffortValid=EffortClassMot) |
129 QualifActD=c(QualifActD,findInterval(ActD$x,cuts,left.open=T)) | 153 |
130 } | 154 test=match("FALSE",colnames(ActED)) |
131 | 155 if(is.na(test)==F) |
132 } | 156 { |
133 } | 157 SummPart0=cbind(SummPart0,Contacts_Expansion=ActED$'FALSE' |
134 ClassAct=c("FAIBLE","MODEREE","FORTE","TRES FORTE") | 158 ,Niveau_Activite_Expansion=QualifActMotE) |
135 QualifActMotE=ClassAct[QualifActE] | 159 }else{ |
136 QualifActMotD=ClassAct[QualifActD] | 160 SummPart0=cbind(SummPart0,Contacts_Expansion="" |
137 | 161 ,Niveau_Activite_Expansion="") |
138 #compute activity by nights (to be completed) | 162 } |
139 #ActNuit=aggregate(IdC2M$`nom du fichier`,by=list(IdC2M$DateNuit,IdC2M$IdExtrap),FUN=length) | 163 test=match("TRUE",colnames(ActED)) |
140 ActED=dcast(data=ActMoy,formula=Group.1~Group.2,value=x) | 164 if(is.na(test)==F) |
141 ActED[is.na(ActED)]=0 | 165 { |
142 #organize the csv summary | 166 |
143 SummPart0=cbind(Esp=levels(as.factor(IdC2M$IdExtrap)) | 167 SummPart0=cbind(SummPart0,Contacts_Direct=ActED$'TRUE' |
144 ,RisqueErreur,NbValid=NbValid2$x,EffortValid=EffortClassMot) | 168 ,Niveau_Activite_Direct=QualifActMotD) |
145 | 169 }else{ |
146 test=match("FALSE",colnames(ActED)) | 170 SummPart0=cbind(SummPart0,Contacts_Direct="" |
147 if(is.na(test)==F) | 171 ,Niveau_Activite_Direct="") |
148 { | 172 } |
149 SummPart0=cbind(SummPart0,Contacts_Expansion=ActED$'FALSE' | 173 |
150 ,Niveau_Activite_Expansion=QualifActMotE) | 174 InfoSp=c("GroupFR","NomFR","Scientific name","Esp") |
151 }else{ | 175 GroupShort=GroupList[,..InfoSp] |
152 SummPart0=cbind(SummPart0,Contacts_Expansion="" | 176 SummPart=merge(GroupShort,SummPart0,by="Esp") |
153 ,Niveau_Activite_Expansion="") | 177 IndexGroupe=c("Autre","Sauterelle","Chauve-souris") |
154 } | 178 SummPart$IndexSumm=match(SummPart$GroupFR,IndexGroupe) |
155 test=match("TRUE",colnames(ActED)) | 179 SummPart=SummPart[with(SummPart |
156 if(is.na(test)==F) | 180 ,order(IndexSumm,as.numeric(Contacts_Direct),as.numeric(Contacts_Expansion),decreasing=T)),] |
157 { | 181 colnames(SummPart)=c("Code","Groupe","Nom francais","Nom scientifique" |
158 | |
159 SummPart0=cbind(SummPart0,Contacts_Direct=ActED$'TRUE' | |
160 ,Niveau_Activite_Direct=QualifActMotD) | |
161 }else{ | |
162 SummPart0=cbind(SummPart0,Contacts_Direct="" | |
163 ,Niveau_Activite_Direct="") | |
164 } | |
165 | |
166 InfoSp=c("GroupFR","NomFR","Scientific name","Esp") | |
167 GroupShort=GroupList[,..InfoSp] | |
168 SummPart=merge(GroupShort,SummPart0,by="Esp") | |
169 IndexGroupe=c("Autre","Sauterelle","Chauve-souris") | |
170 SummPart$IndexSumm=match(SummPart$GroupFR,IndexGroupe) | |
171 SummPart=SummPart[with(SummPart | |
172 ,order(IndexSumm,as.numeric(Contacts_Direct),as.numeric(Contacts_Expansion),decreasing=T)),] | |
173 colnames(SummPart)=c("Code","Groupe","Nom francais","Nom scientifique" | |
174 ,"Risque d'erreur (%)","Nb Validations" | 182 ,"Risque d'erreur (%)","Nb Validations" |
175 ,"Effort de validation","Nb de Contacts en expansion" | 183 ,"Effort de validation","Nb de Contacts en expansion" |
176 ,"Niveau d'Activite en expansion" | 184 ,"Niveau d'Activite en expansion" |
177 ,"Nb de Contacts en direct" | 185 ,"Nb de Contacts en direct" |
178 ,"Niveau d'Activite en direct","TriGroupe") | 186 ,"Niveau d'Activite en direct","TriGroupe") |
179 | 187 |
180 #to do: extend colors to other columns to improve readability | 188 #to do: extend colors to other columns to improve readability |
181 SummHTML=datatable(SummPart, rownames = FALSE) %>% | 189 SummHTML=datatable(SummPart, rownames = FALSE) %>% |
182 formatStyle(columns = c("Code","Groupe","Nom francais","Nom scientifique","Risque d'erreur (%)"),valueColumns="Risque d'erreur (%)", | 190 formatStyle(columns = c("Code","Groupe","Nom francais","Nom scientifique","Risque d'erreur (%)"),valueColumns="Risque d'erreur (%)", |
183 background = styleInterval(c(1, 10, 50), c("white", "khaki", "orange", "orangered"))) %>% | 191 background = styleInterval(c(1, 10, 50), c("white", "khaki", "orange", "orangered"))) %>% |
184 formatStyle(columns = "Effort de validation", | 192 formatStyle(columns = "Effort de validation", |
185 background = styleEqual(c("-","FAIBLE","SUFFISANT","FORT"), c("white", "cyan", "royalblue", "darkblue"))) %>% | 193 background = styleEqual(c("-","FAIBLE","SUFFISANT","FORT"), c("white", "cyan", "royalblue", "darkblue"))) %>% |
186 formatStyle(columns = c("Nb de Contacts en expansion","Niveau d'Activite en expansion"),valueColumns="Niveau d'Activite en expansion", | 194 formatStyle(columns = c("Nb de Contacts en expansion","Niveau d'Activite en expansion"),valueColumns="Niveau d'Activite en expansion", |
187 background = styleEqual(c("FAIBLE","MODEREE","FORTE","TRES FORTE"), c("palegoldenrod", "greenyellow", "limegreen", "darkgreen"))) %>% | 195 background = styleEqual(c("FAIBLE","MODEREE","FORTE","TRES FORTE"), c("palegoldenrod", "greenyellow", "limegreen", "darkgreen"))) %>% |
188 formatStyle(columns = c("Nb de Contacts en direct","Niveau d'Activite en direct"),valueColumns="Niveau d'Activite en direct", | 196 formatStyle(columns = c("Nb de Contacts en direct","Niveau d'Activite en direct"),valueColumns="Niveau d'Activite en direct", |
189 background = styleEqual(c("FAIBLE","MODEREE","FORTE","TRES FORTE"), c("palegoldenrod", "greenyellow", "limegreen", "darkgreen"))) | 197 background = styleEqual(c("FAIBLE","MODEREE","FORTE","TRES FORTE"), c("palegoldenrod", "greenyellow", "limegreen", "darkgreen"))) |
190 | 198 |
191 | 199 |
192 saveWidget(SummHTML,"output-summaryRP.html") | 200 saveWidget(SummHTML,"output-summaryRP.html") |
193 write.table(SummPart,"output-summaryRP.tabular",row.names=F,sep="\t",quote=FALSE) | 201 write.table(SummPart,"output-summaryRP.tabular",row.names=F,sep="\t") |
194 | 202 #write.csv2(SummPart,"output-summaryRP.tabular",row.names=F) #for testing |
195 #summary for each point/transect | 203 |
196 | 204 #summary for each point/transect |
197 #compute number of files validated per species | 205 |
198 IdC2M$Canal=sapply(IdC2M$Direct,FUN=function(x) if(x){"Direct"}else{"Expansion"}) | 206 #compute number of files validated per species |
199 | 207 IdC2M$Canal=sapply(IdC2M$Direct,FUN=function(x) if(x){"Direct"}else{"Expansion"}) |
200 ActMoyTA=aggregate(IdC2M$`nom du fichier`,by=list(IdC2M$IdExtrap,IdC2M$Canal,IdC2M$Session),FUN=length) | 208 |
201 ActMoyT=dcast(data=IdC2M,formula=IdExtrap+Canal~Session | 209 ActMoyTA=aggregate(IdC2M$`nom du fichier`,by=list(IdC2M$IdExtrap,IdC2M$Canal,IdC2M$Session),FUN=length) |
202 ,fun.aggregate=length) | 210 ActMoyT=dcast(data=IdC2M,formula=IdExtrap+Canal~Session |
203 SummPartshort=cbind(SummPart[,c(1:5)],TriGroupe=SummPart[,TriGroupe]) | 211 ,fun.aggregate=length) |
204 SummPartTron=merge(SummPartshort,ActMoyT,by.x="Code",by.y="IdExtrap") | 212 SummPartshort=cbind(SummPart[,c(1:5)],TriGroupe=SummPart[,TriGroupe]) |
205 SummPartTron=SummPartTron[order(TriGroupe,decreasing=T),] | 213 SummPartTron=merge(SummPartshort,ActMoyT,by.x="Code",by.y="IdExtrap") |
206 | 214 SummPartTron=SummPartTron[order(TriGroupe,decreasing=T),] |
207 ListSession=levels(as.factor(IdC2M$Session)) | 215 |
208 brks <- quantile(ActMoyTA$x, probs = seq(.05, .95, .05), na.rm = TRUE)-1 | 216 ListSession=levels(as.factor(IdC2M$Session)) |
209 clrs <- round(seq(255, 40, length.out = length(brks) + 1), 0) %>% | 217 brks <- quantile(ActMoyTA$x, probs = seq(.05, .95, .05), na.rm = TRUE)-1 |
210 {paste0("rgb(255,", ., ",", ., ")")} | 218 clrs <- round(seq(255, 40, length.out = length(brks) + 1), 0) %>% |
211 | 219 {paste0("rgb(255,", ., ",", ., ")")} |
212 | 220 |
213 #to do: extend colors to other columns to improve readability | 221 |
214 SummHTMLT=datatable(SummPartTron, rownames = FALSE) %>% | 222 #to do: extend colors to other columns to improve readability |
223 SummHTMLT=datatable(SummPartTron, rownames = FALSE) %>% | |
215 formatStyle(columns = c("Code","Groupe","Nom francais","Nom scientifique","Risque d'erreur (%)"),valueColumns="Risque d'erreur (%)", | 224 formatStyle(columns = c("Code","Groupe","Nom francais","Nom scientifique","Risque d'erreur (%)"),valueColumns="Risque d'erreur (%)", |
216 background = styleInterval(c(1, 10, 50), c("white", "khaki", "orange", "orangered"))) %>% | 225 background = styleInterval(c(1, 10, 50), c("white", "khaki", "orange", "orangered"))) %>% |
217 formatStyle(columns=ListSession, backgroundColor = styleInterval(brks, clrs)) | 226 formatStyle(columns=ListSession, backgroundColor = styleInterval(brks, clrs)) |
218 | 227 |
219 saveWidget(SummHTMLT,"output-detailRP.html") | 228 saveWidget(SummHTMLT,"output-detailRP.html") |
220 write.table(SummPartTron,"output-detailRP.tabular",row.names=F,sep="\t",quote=FALSE) | 229 write.table(SummPartTron,"output-detailRP.tabular",row.names=F,sep="\t") |
230 # write.csv2(SummPartTron,"output-detailRP.tabular",row.names=F)#for testing | |
231 } |