Previous changeset 12:ddaf84e15d06 (2018-11-08) Next changeset 14:90708fdbc22d (2020-11-18) |
Commit message:
planemo upload for repository https://github.com/HegemanLab/w4mcorcov_galaxy_wrapper/tree/master commit e89c652c0849eb1d5a1e6c9100c72c64a8d388b4 |
modified:
w4mcorcov.xml w4mcorcov_calc.R w4mcorcov_lib.R w4mcorcov_salience.R w4mcorcov_wrapper.R |
b |
diff -r ddaf84e15d06 -r 2ae2d26e3270 w4mcorcov.xml --- a/w4mcorcov.xml Thu Nov 08 23:06:09 2018 -0500 +++ b/w4mcorcov.xml Wed Dec 12 09:20:02 2018 -0500 |
[ |
b'@@ -1,10 +1,10 @@\n-\xef\xbb\xbf<tool id="w4mcorcov" name="OPLS-DA_Contrasts" version="0.98.16">\n+\xef\xbb\xbf<tool id="w4mcorcov" name="OPLS-DA_Contrasts" version="0.98.17">\n <description>OPLS-DA Contrasts of Univariate Results</description>\n <macros>\n <xml name="paramPairSigFeatOnly">\n <param name="pairSigFeatOnly" type="boolean" checked="true" truevalue="TRUE" falsevalue="FALSE"\n label="Retain only pairwise-significant features"\n- help="When this option is set to \'Yes\', analysis will be performed including only features that differ significantly for the pair of levels being contrasted; when set to \'No\', any feature that varies significantly across all levels will be included (i.e., exclude any feature that is not significantly different across all levels). See examples below." />\n+ help="When this option is set to \'Yes\', analysis will be performed including only features scored by the univariate test as differing significantly for the pair of levels being contrasted; when set to \'No\', any feature that varies significantly across all levels will be included (i.e., exclude only features not scored by the univariate test as significantly varying when all levels are considered). See examples below." />\n </xml>\n <xml name="cplots">\n <param name="cplot_y" label="C-plot Y-axis" type="select" help="Choose the Y-axis for C-plots.">\n@@ -17,14 +17,12 @@\n <param name="cplot_o" type="boolean" checked="true" truevalue="TRUE" falsevalue="FALSE"\n label="Produce orthogonal C-plot"\n help="When this option is set to \'Yes\', correlation will be plotted against vip4 for orthogonal loadings." />\n+ <param name="fdr_features" type="text" value="ALL"\n+ label="How many features for p-value calculation?"\n+ help="Specify how many features should be used to perform family-wise error rate adjustment of p-values for covariance and correlation. If you were to eliminate features from the data matrix based on significance criteria prior to running this tool, you would want to include them in the count here to avoid underestimating the p-value. Specify \'ALL\' to signify that all features that could impact p-value calculation are included in the data matrix."/>\n </xml>\n </macros>\n <requirements>\n- <!--\n- <requirement type="package" version="3.4.1">r-base</requirement>\n- <requirement type="package" version="1.1_4">r-batch</requirement>\n- <requirement type="package" version="1.2.14">bioconductor-ropls</requirement>\n- -->\n <requirement type="package">r-base</requirement>\n <requirement type="package">r-batch</requirement>\n <requirement type="package" version="1.10.0">bioconductor-ropls</requirement>\n@@ -35,9 +33,9 @@\n sampleMetadata_in \'$sampleMetadata_in\'\n variableMetadata_in \'$variableMetadata_in\'\n facC \'$facC\'\n- #if str( $signif_test.tesC ) == "none":\n- tesC "none"\n- pairSigFeatOnly "FALSE"\n+ #if str( $signif_test.tesC ) == \'none\':\n+ tesC \'none\'\n+ pairSigFeatOnly \'FALSE\'\n #else:\n tesC \'$signif_test.tesC\'\n pairSigFeatOnly \'$signif_test.pairSigFeatOnly\'\n@@ -45,20 +43,21 @@\n levCSV \'$levCSV\'\n matchingC \'$matchingC\'\n labelFeatures \'$labelFeatures\'\n- #if str( $xplots.expPlot ) == "none":\n- cplot_p "FALSE"\n- cplot_o "FALSE"\n- cplot_y "correlation"\n- #else if str( $xplots.expPlot ) == "cplot":\n- cplot_p \'$xplots.cplot_p\'\n- cplot_o \'$xplots.cplot_o\'\n- cplot_y \'$xplots.cplot_y\'\n+ #if str( $advanced.advancedFeatures ) == \'none\':\n+ fdr_features \'ALL\'\n+ cplot_p \'FALSE\'\n+ cplot_o \'FALSE\'\n+ cplot_y \'correlation\'\n+ #else if str( $advanced.advancedFeatures ) == \'a'..b'---------------------------+\n | Number of features having extreme loadings | 0 |\n +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n+ | How many features for p-value calculation? | ALL |\n+ +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n | Output primary table | https://raw.githubusercontent.com/HegemanLab/w4mcorcov_galaxy_wrapper/master/tools/w4mcorcov/test-data/expected_contrast_corcov_global.tsv |\n +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n | Output salience table | https://raw.githubusercontent.com/HegemanLab/w4mcorcov_galaxy_wrapper/master/tools/w4mcorcov/test-data/expected_contrast_salience_global.tsv |\n@@ -812,6 +843,8 @@\n +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n | Number of features having extreme loadings | 3 |\n +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n+ | How many features for p-value calculation? | ALL |\n+ +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n | Output primary table | https://raw.githubusercontent.com/HegemanLab/w4mcorcov_galaxy_wrapper/master/tools/w4mcorcov/test-data/expected_contrast_corcov_lohi.tsv |\n +--------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------+\n | Output salience table | https://raw.githubusercontent.com/HegemanLab/w4mcorcov_galaxy_wrapper/master/tools/w4mcorcov/test-data/expected_contrast_salience_lohi.tsv |\n@@ -830,6 +863,17 @@\n <citations>\n <!-- this tool -->\n <citation type="doi">10.5281/zenodo.1034784</citation>\n+ <!-- R project -->\n+ <citation type="bibtex"><![CDATA[\n+ @Manual{,\n+ title = {R: A Language and Environment for Statistical Computing},\n+ author = {{R Core Team}},\n+ organization = {R Foundation for Statistical Computing},\n+ address = {Vienna, Austria},\n+ year = {2018},\n+ url = {https://www.R-project.org/},\n+ }\n+ ]]></citation>\n <!-- Fisher_1921: Fisher z-transformation of correlation coefficient -->\n <citation type="bibtex"><![CDATA[\n @article{Fisher_1921,\n@@ -887,7 +931,7 @@\n address = {Roswell Park Cancer Institute},\n }\n ]]></citation>\n- <!-- Wiklund_2008 OPLS PLS-DA and S-PLOT -->\n+ <!-- Wiklund_2008 OPLS-DA and S-PLOT -->\n <citation type="doi">10.1021/ac0713510</citation>\n <!-- Yekutieli_2001 The control of the false discovery rate in multiple testing under dependency -->\n <citation type="doi">10.1214/aos/1013699998</citation>\n' |
b |
diff -r ddaf84e15d06 -r 2ae2d26e3270 w4mcorcov_calc.R --- a/w4mcorcov_calc.R Thu Nov 08 23:06:09 2018 -0500 +++ b/w4mcorcov_calc.R Wed Dec 12 09:20:02 2018 -0500 |
[ |
b'@@ -1,12 +1,4 @@\n-# center with \'colMeans()\' - ref: http://gastonsanchez.com/visually-enforced/how-to/2014/01/15/Center-data-in-R/\n-center_colmeans <- function(x) {\n- xcenter = colMeans(x)\n- x - rep(xcenter, rep.int(nrow(x), ncol(x)))\n-}\n-\n-#### OPLS-DA\n-algoC <- "nipals"\n-\n+# compute and output detail plots\n do_detail_plot <- function(\n x_dataMatrix\n , x_predictor\n@@ -21,7 +13,7 @@\n off <- function(x) if (x_show_labels == "0") 0 else x\n if ( x_is_match\n && ncol(x_dataMatrix) > 0\n- && length(unique(x_predictor))> 1\n+ && length(unique(x_predictor)) > 1\n && x_crossval_i < nrow(x_dataMatrix)\n ) {\n my_oplsda <- opls(\n@@ -36,20 +28,22 @@\n , scaleC = "pareto" # data centered and pareto scaled here only. This line fixes issue #2.\n )\n # strip out variables having negligible variance\n- x_dataMatrix <- x_dataMatrix[,names(my_oplsda@vipVn), drop = FALSE]\n+ x_dataMatrix <- x_dataMatrix[ , names(my_oplsda@vipVn), drop = FALSE]\n my_oplsda_suppLs_y_levels <- levels(as.factor(my_oplsda@suppLs$y))\n \n fctr_lvl_1 <- my_oplsda_suppLs_y_levels[1]\n fctr_lvl_2 <- my_oplsda_suppLs_y_levels[2]\n do_s_plot <- function(\n- x_env\n- , predictor_projection_x = TRUE\n- , cplot_x = FALSE\n- , cor_vs_cov_x = NULL\n- )\n- {\n+ x_env\n+ , predictor_projection_x = TRUE\n+ , cplot_x = FALSE\n+ , cor_vs_cov_x = NULL\n+ ) {\n if (cplot_x) {\n cplot_y_correlation <- (x_env$cplot_y == "correlation")\n+ default_lim_x <- 10\n+ } else {\n+ default_lim_x <- 1.2\n }\n if (is.null(cor_vs_cov_x)) {\n my_cor_vs_cov <- cor_vs_cov(\n@@ -57,14 +51,17 @@\n , ropls_x = my_oplsda\n , predictor_projection_x = predictor_projection_x\n , x_progress\n+ , x_env\n )\n } else {\n my_cor_vs_cov <- cor_vs_cov_x\n }\n- # str(my_cor_vs_cov)\n+\n if (is.null(my_cor_vs_cov) || sum(!is.na(my_cor_vs_cov$tsv1$covariance)) < 2) {\n if (is.null(cor_vs_cov_x)) {\n- x_progress("No cor_vs_cov data produced")\n+ x_progress(\n+ sprintf("No cor_vs_cov data produced for level %s versus %s", fctr_lvl_1, fctr_lvl_2)\n+ )\n }\n plot(x=1, y=1, xaxt="n", yaxt="n", xlab="", ylab="", type="n")\n text(x=1, y=1, labels="too few covariance data")\n@@ -76,216 +73,240 @@\n min_x <- min(covariance, na.rm = TRUE)\n max_x <- max(covariance, na.rm = TRUE)\n lim_x <- max(sapply(X=c(min_x, max_x), FUN=abs))\n- covariance <- covariance / lim_x\n- lim_x <- 1.2\n- # "It is generally accepted that a variable should be selected if vj>1, [27\xe2\x80\x9329],\n+\n+ # Regarding using VIP as a guide to selecting a biomarker:\n+ # "It is generally accepted that a variable should be selected if vj>1, [27\xe2\x80\x9329],\n # but a proper threshold between 0.83 and 1.21 can yield more relevant variables according to [28]."\n # (Mehmood 2012 doi:10.1186/1748-7188-6-27)\n plus_cor <- correlation\n plus_cov <- covariance\n- cex <- 0.65\n- which_projection <- if (projection == 1) "t1" else "o1"\n- which_loading <- if (projection == 1) "parallel" else "orthogonal"\n- if (projection == 1) { # predictor-projection\n- vipcp <- pmax(0, pmin(1,(vip4p-0.83)/(1.21-0.83)))\n- if (!cplot_x) { # S-plot predictor-projection\n- my_xlab <- "relative covariance(feature,t1)"\n- my_x <- plus_cov\n- my_ylab <- "correlation(feature,t1)"\n- my_y <- plus_cor\n- } else { # C-plot predictor-projection\n- my_xlab <- "variable importance in predictor-projection"\n- my_x <- vip4p\n- if (cplot_y_correlation) {\n+ if (length(plus_cor) != 0 && length(plus_cor) != 0) {\n+ cex <- 0.65\n+ if (projection == 1) {\n+ '..b'$level1 <- rep.int(x = fctr_lvl_1, times = feature_count)\n- result$level2 <- rep.int(x = fctr_lvl_2, times = feature_count)\n+ result$level1 <- rep.int(x = fctr_lvl_1, times = n_features)\n+ result$level2 <- rep.int(x = fctr_lvl_2, times = n_features)\n greaterLevel <- sapply(\n X = result$correlation\n- , FUN = function(my_corr)\n+ , FUN = function(my_corr) {\n tryCatch({\n- if ( is.nan( my_corr ) ) {\n- NA \n+ if ( is.na(my_corr) || ! is.numeric( my_corr ) ) {\n+ NA\n } else {\n if ( my_corr < 0 ) fctr_lvl_1 else fctr_lvl_2\n }\n- }, error = function(e) {\n+ }\n+ , error = function(e) {\n+ print(my_corr)\n x_progress(\n sprintf(\n "cor_vs_cov -> sapply: error - substituting NA - %s"\n@@ -992,16 +1035,11 @@\n )\n )\n NA\n- })\n+ }\n+ )\n+ }\n )\n \n- # begin fixes for https://github.com/HegemanLab/w4mcorcov_galaxy_wrapper/issues/1\n- featureID <- names(ropls_x@vipVn)\n- greaterLevel <- greaterLevel[featureID]\n- result$correlation <- result$correlation[featureID]\n- result$covariance <- result$covariance[featureID]\n- # end fixes for https://github.com/HegemanLab/w4mcorcov_galaxy_wrapper/issues/1\n-\n # build a data frame to hold the content for the tab-separated values file\n tsv1 <- data.frame(\n featureID = featureID\n@@ -1016,8 +1054,8 @@\n , loadp = result$loadp\n , loado = result$loado\n , cor_p_val_raw = result$p_value_raw\n- , cor_p_value = p.adjust(p = result$p_value_raw, method = "BY")\n- , cor_ci_lower = result$ci_lower \n+ , cor_p_value = result$p_value\n+ , cor_ci_lower = result$ci_lower\n , cor_ci_upper = result$ci_upper\n )\n rownames(tsv1) <- tsv1$featureID\n@@ -1039,7 +1077,7 @@\n tsv1 <- tsv1[!is.na(tsv1$covariance),]\n superresult$tsv1 <- tsv1\n \n- # # I did not include these but left them commentd out in case future \n+ # # I did not include these but left them commentd out in case future\n # # consumers of this routine want to use it in currently unanticipated ways\n # result$superresult <- superresult\n # result$oplsda <- ropls_x\n@@ -1059,22 +1097,28 @@\n # which follows\n # https://en.wikipedia.org/wiki/Fisher_transformation#Definition\n \n-correl.ci <- function(r, n, a = 0.05, rho = 0) {\n- ## r is the calculated correlation coefficient for n pairs\n+correl.ci <- function(r, n_obs, n_vars, a = 0.05, rho = 0) {\n+ ## r is the calculated correlation coefficient for n_obs pairs of observations of one variable\n ## a is the significance level\n ## rho is the hypothesised correlation\n zh0 <- atanh(rho) # 0.5*log((1+rho)/(1-rho)), i.e., Fisher\'s z-transformation for Ho\n zh1 <- atanh(r) # 0.5*log((1+r)/(1-r)), i.e., Fisher\'s z-transformation for H1\n- se <- (1 - r^2)/sqrt(n - 3) ## standard error for Fisher\'s z-transformation of Ho\n+ se <- (1 - r^2)/sqrt(n_obs - 3) ## standard error for Fisher\'s z-transformation of Ho\n test <- (zh1 - zh0)/se ### test statistic\n pvalue <- 2*(1 - pnorm(abs(test))) ## p-value\n- zL <- zh1 - qnorm(1 - a/2)*se\n- zH <- zh1 + qnorm(1 - a/2)*se\n- fishL <- tanh(zL) # (exp(2*zL)-1)/(exp(2*zL)+1), i.e., lower confidence limit\n- fishH <- tanh(zH) # (exp(2*zH)-1)/(exp(2*zH)+1), i.e., upper confidence limit\n- CI <- c(fishL, fishH)\n- names(CI) <- c(\'lower\', \'upper\')\n- list(correlation = r, p.value = pvalue, CI = CI)\n+ pvalue.adj <- p.adjust(p = pvalue, method = "BY", n = n_vars)\n+ z_L <- zh1 - qnorm(1 - a/2)*se\n+ z_h <- zh1 + qnorm(1 - a/2)*se\n+ fish_l <- tanh(z_L) # (exp(2*z_l)-1)/(exp(2*z_l)+1), i.e., lower confidence limit\n+ fish_h <- tanh(z_h) # (exp(2*z_h)-1)/(exp(2*z_h)+1), i.e., upper confidence limit\n+ ci <- c(fish_l, fish_h)\n+ names(ci) <- c("lower", "upper")\n+ list(\n+ correlation = r\n+ , p.value.raw = pvalue\n+ , p.value = pvalue.adj\n+ , CI = ci\n+ )\n }\n \n-# vim: sw=2 ts=2 et :\n+# vim: sw=2 ts=2 et ai :\n' |
b |
diff -r ddaf84e15d06 -r 2ae2d26e3270 w4mcorcov_lib.R --- a/w4mcorcov_lib.R Thu Nov 08 23:06:09 2018 -0500 +++ b/w4mcorcov_lib.R Wed Dec 12 09:20:02 2018 -0500 |
b |
@@ -1,3 +1,3 @@ suppressMessages(library(batch)) suppressMessages(library(ropls)) -suppressMessages(library(methods)) +#suppressMessages(library(methods)) |
b |
diff -r ddaf84e15d06 -r 2ae2d26e3270 w4mcorcov_salience.R --- a/w4mcorcov_salience.R Thu Nov 08 23:06:09 2018 -0500 +++ b/w4mcorcov_salience.R Wed Dec 12 09:20:02 2018 -0500 |
[ |
@@ -19,14 +19,21 @@ failure_action("w4msalience: Expected data_matrix to be a matrix (or data.frame) of numeric") return (NULL) } + + feature_names <- colnames(t_data_matrix) + n_features <- ncol(t_data_matrix) - n_features_plus_1 <- 1 + n_features - features <- colnames(t_data_matrix) n_samples <- nrow(t_data_matrix) if ( length(sample_class) != n_samples ) { strF(data_matrix) strF(sample_class) - failure_action(sprintf("w4msalience: The data_matrix has %d samples but sample_class has %d", n_samples, length(sample_class))) + failure_action( + sprintf( + "w4msalience: The data_matrix has %d samples but sample_class has %d" + , n_samples + , length(sample_class) + ) + ) return (NULL) } # end sanity checks @@ -39,13 +46,6 @@ , FUN = "median" ) - # "For each feature, 'select sample_class, max(intensity) from feature group by sample_class'." - maxOfFeatureBySampleClassLevel <- aggregate( - x = as.data.frame(t_data_matrix) - , by = list(sample_class) - , FUN = "max" - ) - # "For each feature, 'select sample_class, rcv(intensity) from feature group by sample_class'." # cv is less robust; deviation from normality degrades performance # cv(x) == sd(x) / mean(x) @@ -56,61 +56,72 @@ , by = list(sample_class) , FUN = "mad" ) - rcvOfFeatureBySampleClassLevel <- as.matrix( - madOfFeatureBySampleClassLevel[,2:n_features_plus_1] / medianOfFeatureBySampleClassLevel[,2:n_features_plus_1] - ) - rcvOfFeatureBySampleClassLevel[is.nan(rcvOfFeatureBySampleClassLevel)] <- max(9999,max(rcvOfFeatureBySampleClassLevel, na.rm = TRUE)) - # "For each feature, 'select max(max_feature_intensity) from feature'." - maxApplyMaxOfFeatureBySampleClassLevel <- sapply( - X = 1:n_features - , FUN = function(i) { - match( - max(maxOfFeatureBySampleClassLevel[, i + 1]) - , maxOfFeatureBySampleClassLevel[, i + 1] - ) - } - ) - - # "For each feature, 'select mean(median_feature_intensity) from feature'." - meanApplyMedianOfFeatureBySampleClassLevel <- sapply( - X = 1:n_features - , FUN = function(i) mean(medianOfFeatureBySampleClassLevel[, i + 1]) - ) + # Note that `apply(X=array(1:10), MARGIN = 1, FUN = function(x) return(c(x,x^2)))` + # produces a matrix with two rows and ten columns - # Compute the 'salience' for each feature, i.e., how salient the intensity of a feature - # is for one particular class-level relative to the intensity across all class-levels. - salience_df <- data.frame( - # the feature name - feature = features - # the name (or factor-level) of the class-level with the highest median intensity for the feature - , max_level = medianOfFeatureBySampleClassLevel[maxApplyMaxOfFeatureBySampleClassLevel,1] - # the median intensity for the feature and the level max_level - , max_median = sapply( - X = 1:n_features - , FUN = function(i) { - maxOfFeatureBySampleClassLevel[maxApplyMaxOfFeatureBySampleClassLevel[i], 1 + i] - } + my_list <- apply( + X = array(1:n_features) + , MARGIN = 1 + , FUN = function(x) { + my_df <- data.frame( + median = medianOfFeatureBySampleClassLevel[ , 1 + x] + , mad = madOfFeatureBySampleClassLevel[ , 1 + x] + ) + my_df$salient_level <- medianOfFeatureBySampleClassLevel[ , 1] + my_df <- my_df[ order(my_df$median, decreasing = TRUE), ] + my_dist_df <- my_df[ 1:2, ] + # "robust coefficient of variation", i.e., + # mad(feature-intensity for class-level max_level) / median(feature-intensity for class-level max_level) + rcv_result <- my_dist_df$mad[1] / my_dist_df$median[1] + dist_result <- + ( my_dist_df$median[1] - my_dist_df$median[2] ) / + sqrt( my_dist_df$mad[1] * my_dist_df$mad[2] ) + if (is.infinite(dist_result) || is.nan(dist_result)) + dist_result <- 0 + mean_median <- mean(my_df$median) + salience_result <- if (mean_median > 0) my_df$median[1] / mean_median else 0 + return ( + data.frame( + dist_result = dist_result + , max_median = my_df$median[1] + , mean_median = mean_median + , salience_result = salience_result + , salient_level = my_df$salient_level[1] + , rcv_result = rcv_result + ) + ) + } + ) + results_matrix <- sapply(X = 1:n_features, FUN = function(i) my_list[[i]]) + results_df <- as.data.frame(t(results_matrix)) + + relative_salient_distance <- unlist(results_df$dist_result) + salience <- unlist(results_df$salience_result) + salient_level <- unlist(results_df$salient_level) + max_median <- unlist(results_df$max_median) + mean_median <- unlist(results_df$mean_median) + rcv_result <- unlist(results_df$rcv_result) + + salience_df <- + data.frame( + # the feature name + feature = feature_names + # the name (or factor-level) of the class-level with the highest median intensity for the feature + , max_level = salient_level + # the median intensity for the feature and the level max_level + , max_median = max_median + # the distance between the maximum intensities for the feature at the two highest levels + , relative_salient_distance = relative_salient_distance + # the coefficient of variation (expressed as a proportion) for the intensity for the feature and the level max_level + , salience_rcv = rcv_result + # the mean of the medians of intensity for all class-levels for the feature + , mean_median = mean_median + # raw salience is the ratio of the most-prominent level to the mean of all levels for the feature + , salience = salience + # don't coerce strings to factors (this is a parameter for the data.frame constructor, not a column of the data.frame) + , stringsAsFactors = FALSE ) - # the coefficient of variation (expressed as a proportion) for the intensity for the feature and the level max_level - , max_rcv = sapply( - X = 1:n_features - , FUN = function(i) { - rcvOfFeatureBySampleClassLevel[maxApplyMaxOfFeatureBySampleClassLevel[i], i] - } - ) - # the mean of the medians of intensity for all class-levels for the feature - , mean_median = meanApplyMedianOfFeatureBySampleClassLevel - # don't coerce strings to factors (this is a parameter for the data.frame constructor, not a column of the data.frame) - , stringsAsFactors = FALSE - ) - # raw salience is the ratio of the most-prominent level to the mean of all levels for the feature - salience_df$salience <- sapply( - X = 1:nrow(salience_df) - , FUN = function(i) with(salience_df[i,], if (mean_median > 0) { max_median / mean_median } else { 0 } ) - ) - # "robust coefficient of variation, i.e., mad(feature-intensity for class-level max_level) / median(feature-intensity for class-level max_level) - salience_df$salient_rcv <- salience_df$max_rcv return (salience_df) } |
b |
diff -r ddaf84e15d06 -r 2ae2d26e3270 w4mcorcov_wrapper.R --- a/w4mcorcov_wrapper.R Thu Nov 08 23:06:09 2018 -0500 +++ b/w4mcorcov_wrapper.R Wed Dec 12 09:20:02 2018 -0500 |
[ |
@@ -73,6 +73,10 @@ ######## errorPrint(sessionInfo()) +errorCat("\nsearch path:") +errorPrint(search()) +# errorCat("\nCurrently loaded namespaces:\n") +# errorPrint(loadedNamespaces()) argVc <- unlist(parseCommandArgs(evaluate=FALSE)) errorCat("\n\n---\n\nArguments that were passed to R are as follows:\n") @@ -104,6 +108,7 @@ my_env$levCSV <- as.character(argVc["levCSV"]) my_env$matchingC <- as.character(argVc["matchingC"]) my_env$labelFeatures <- as.character(argVc["labelFeatures"]) # number of features to label at each extreme of the loadings or 'ALL' +my_env$fdr_features <- as.character(argVc["fdr_features"]) # number of features to consider when adjusting p-value, or 'ALL' my_env$cplot_o <- as.logical(argVc["cplot_o"]) # TRUE if orthogonal C-plot is requested my_env$cplot_p <- as.logical(argVc["cplot_p"]) # TRUE if parallel C-plot is requested my_env$cplot_y <- as.character(argVc["cplot_y"]) # Choice of covariance/correlation for Y-axis on C-plot |