Mercurial > repos > dlalgroup > simtext_app
comparison text_to_wordmatrix.R @ 0:34ed44f3f85c draft
"planemo upload for repository https://github.com/dlal-group/simtext commit fd3f5b7b0506fbc460f2a281f694cb57f1c90a3c-dirty"
| author | dlalgroup |
|---|---|
| date | Thu, 24 Sep 2020 02:17:05 +0000 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:34ed44f3f85c |
|---|---|
| 1 #!/usr/bin/env Rscript | |
| 2 # tool: text_to_wordmatrix | |
| 3 # | |
| 4 #The tool extracts the most frequent words per entity (per row). Text of columns starting with "ABSTRACT" or "TEXT" are considered. | |
| 5 #All extracted terms are used to generate a word matrix with rows = entities and columns = extracted words. | |
| 6 #The resulting matrix is binary with 0= word not present in abstracts of entity and 1= word present in abstracts of entity. | |
| 7 # | |
| 8 #Input: Output of 'pubmed_by_queries' or 'abstracts_by_pmids', or tab-delimited table with entities in column called “ID_<name>”, | |
| 9 #e.g. “ID_genes” and text in columns starting with "ABSTRACT" or "TEXT". | |
| 10 # | |
| 11 #Output: Binary matrix with rows = entities and columns = extracted words. | |
| 12 # | |
| 13 #usage: text_to_wordmatrix.R [-h] [-i INPUT] [-o OUTPUT] [-n NUMBER] [-r] [-l] [-w] [-s] [-p] | |
| 14 # | |
| 15 # optional arguments: | |
| 16 # -h, --help show help message | |
| 17 # -i INPUT, --input INPUT input file name. add path if file is not in working directory | |
| 18 # -o OUTPUT, --output OUTPUT output file name. [default "text_to_wordmatrix_output"] | |
| 19 # -n NUMBER, --number NUMBER number of most frequent words that should be extracted [default "50"] | |
| 20 # -r, --remove_num remove any numbers in text | |
| 21 # -l, --lower_case by default all characters are translated to lower case. otherwise use -l | |
| 22 # -w, --remove_stopwords by default a set of english stopwords (e.g., 'the' or 'not') are removed. otherwise use -w | |
| 23 # -s, --stemDoc apply Porter's stemming algorithm: collapsing words to a common root to aid comparison of vocabulary | |
| 24 # -p, --plurals by default words in plural and singular are merged to the singular form. otherwise use -p | |
| 25 | |
| 26 if ( '--install_packages' %in% commandArgs()) { | |
| 27 print('Installing packages') | |
| 28 if (!require('argparse')) install.packages('argparse', repo="http://cran.rstudio.com/"); | |
| 29 if (!require("PubMedWordcloud")) install.packages("PubMedWordcloud", repo="http://cran.rstudio.com/"); | |
| 30 if (!require('SnowballC')) install.packages('SnowballC', repo="http://cran.rstudio.com/"); | |
| 31 if (!require('textclean')) install.packages('textclean', repo="http://cran.rstudio.com/"); | |
| 32 if (!require('SemNetCleaner')) install.packages('SemNetCleaner',repo="http://cran.rstudio.com/"); | |
| 33 if (!require('stringi')) install.packages('stringi',repo="http://cran.rstudio.com/"); | |
| 34 if (!require('stringr')) install.packages('stringr',repo="http://cran.rstudio.com/"); | |
| 35 } | |
| 36 | |
| 37 suppressPackageStartupMessages(library("argparse")) | |
| 38 suppressPackageStartupMessages(library("PubMedWordcloud")) | |
| 39 suppressPackageStartupMessages(library("SnowballC")) | |
| 40 suppressPackageStartupMessages(library("SemNetCleaner")) | |
| 41 suppressPackageStartupMessages(library("textclean")) | |
| 42 suppressPackageStartupMessages(library("stringi")) | |
| 43 suppressPackageStartupMessages(library("stringr")) | |
| 44 | |
| 45 parser <- ArgumentParser() | |
| 46 parser$add_argument("-i", "--input", | |
| 47 help = "input fie name. add path if file is not in workind directory") | |
| 48 parser$add_argument("-o", "--output", default="text_to_wordmatrix_output", | |
| 49 help = "output file name. [default \"%(default)s\"]") | |
| 50 parser$add_argument("-n", "--number", type="integer", default=50, choices=seq(1, 500), metavar="{0..500}", | |
| 51 help="number of most frequent words used per ID in word matrix [default \"%(default)s\"]") | |
| 52 parser$add_argument("-r", "--remove_num", action="store_true", default=FALSE, | |
| 53 help= "remove any numbers in text") | |
| 54 parser$add_argument("-l", "--lower_case", action="store_false", default=TRUE, | |
| 55 help="by default all characters are translated to lower case. otherwise use -l") | |
| 56 parser$add_argument("-w", "--remove_stopwords", action="store_false", default=TRUE, | |
| 57 help="by default a set of English stopwords (e.g., 'the' or 'not') are removed. otherwise use -s") | |
| 58 parser$add_argument("-s", "--stemDoc", action="store_true", default=FALSE, | |
| 59 help="apply Porter's stemming algorithm: collapsing words to a common root to aid comparison of vocabulary") | |
| 60 parser$add_argument("-p", "--plurals", action="store_false", default=TRUE, | |
| 61 help="by default words in plural and singular are merged to the singular form. otherwise use -p") | |
| 62 parser$add_argument("--install_packages", action="store_true", default=FALSE, | |
| 63 help="If you want to auto install missing required packages.") | |
| 64 | |
| 65 args <- parser$parse_args() | |
| 66 | |
| 67 | |
| 68 data = read.delim(args$input, stringsAsFactors=FALSE, header = TRUE, sep='\t') | |
| 69 word_matrix = data.frame() | |
| 70 | |
| 71 text_cols_index <- grep(c("ABSTRACT|TEXT"), names(data)) | |
| 72 | |
| 73 for(row in 1:nrow(data)){ | |
| 74 top_words = cleanAbstracts(abstracts= data[row,text_cols_index], | |
| 75 rmNum = args$remove_num, | |
| 76 tolw= args$lower_case, | |
| 77 rmWords= args$remove_stopwords, | |
| 78 stemDoc= args$stemDoc) | |
| 79 | |
| 80 top_words$word <- as.character(top_words$word) | |
| 81 | |
| 82 # δ γ ε | |
| 83 | |
| 84 cat("Most frequent words for row", row, " are extracted.", "\n") | |
| 85 | |
| 86 if(args$plurals == TRUE){ | |
| 87 top_words$word <- sapply(top_words$word, function(x){singularize(x)}) | |
| 88 top_words = aggregate(freq~word,top_words,sum) | |
| 89 } | |
| 90 | |
| 91 top_words = top_words[order(top_words$freq, decreasing = TRUE), ] | |
| 92 top_words$word = as.character(top_words$word) | |
| 93 | |
| 94 number_extract = min(args$number, nrow(top_words)) | |
| 95 word_matrix[row,sapply(1:number_extract, function(x){paste0(top_words$word[x])})] <- top_words$freq[1:number_extract] | |
| 96 } | |
| 97 | |
| 98 word_matrix <- as.matrix(word_matrix) | |
| 99 word_matrix[is.na(word_matrix)] <- 0 | |
| 100 word_matrix <- (word_matrix>0) *1 #binary matrix | |
| 101 | |
| 102 cat("A matrix with ", nrow(word_matrix), " rows and ", ncol(word_matrix), "columns is generated.", "\n") | |
| 103 | |
| 104 write.table(word_matrix, args$output, row.names = FALSE, sep = '\t') |
