comparison compStrains.pl @ 1:c6f89f3e813c draft

Uploaded
author antmarge
date Tue, 28 Mar 2017 22:15:45 -0400
parents
children
comparison
equal deleted inserted replaced
0:350d99725748 1:c6f89f3e813c
1 #!/usr/bin/perl -w
2
3 #Margaret Antonio 16.01.13
4
5 #DESCRIPTION: Takes two aggregate.pl outputs and compares them using mean difference, pval for each
6 #gene. Can compare, for example, 19F in glucose and TIGR4 in glucose.
7 #DIFFERENT GENOMES (ie. diff. strains).
8 #Requires CONVERSION FILE
9
10 #USAGE: perl compStrains.pl -c <conversion.csv> <options>
11 #[<aggregateFile1.csv aggregateFile2.csv> OR -indir <indir/>]
12
13 use Data::Dumper;
14 use strict;
15 use Getopt::Long;
16 use warnings;
17 use File::Path;
18 use File::Basename;
19 use Statistics::Distributions;
20
21 #ASSIGN INPUTS TO VARIABLES USING FLAGS
22 our ($indir,$h,$out,$sortkey,$round,$l,$cfile);
23 GetOptions(
24 'd:s' => \$indir,
25 'h' => \$h,
26 'o:s' =>\$out,
27 's:i' => \$sortkey,
28 'r:i'=> \$round,
29 'l1:s'=> \$l1,
30 'l2:s'=> \$l2,
31 'c:s'=> \$cfile,
32 );
33
34 sub print_usage() {
35 print "\n";
36 print "\n##################################################################\n";
37 print "compStrains.pl: compare genes from a tn-seq experiment\n";
38 print "\tfor two DIFFERENT strains/genomes using aggregate files\n";
39
40 print "\nDESCRIPTION: Takes two aggregate.pl outputs and compares them by\n";
41 print "calculating the difference in mean fitness.\n";
42
43 print "Example: two strains tested under same condition.\n";
44 print "Note: For same strains (genomes), use compGenes.pl\n";
45
46 print "\nUSAGE:\n";
47 print "perl compStrains.pl -c conversion.csv -d inputs/\n";
48
49 print "\nREQUIRED:\n";
50 print " -d\tDirectory containing all input files (files from\n";
51 print " \taggregate fitness script)\n";
52 print " \tOR\n";
53 print " \tIn the command line (without a flag), input the name(s) of\n";
54 print " \ttwo files containing aggregate gene fitness values. \n\n";
55 print " -c\tConversion file: two columns with homologs for both organisms\n";
56
57 print "\nOPTIONAL:\n";
58 print " -h\tPrints usage and exits program\n";
59 print " -o\tOutput file for comparison data. Default: label1label2.csv\n";
60 print " -s\tSort output by this index of the file (indices begin at 0).\n";
61 print " \tDefault: by mean\n";
62 print " -r\tRound final output numbers to this number of decimals\n";
63 print " -l\tLabels for input files. Default: filenames\n";
64 print " \tTwo strings, comma separated (i.e. -l expt1,expt2).\n";
65 print " \tOrder should match file order.\n";
66 print " \n~~~~Always check that file paths are correctly specified~~~~\n";
67 print "\n##################################################################\n";
68 }
69 if ($h){
70 print_usage();
71 exit;
72 }
73 if (!$indir and (scalar @ARGV==0)){
74 print "\nERROR: Please correctly specify input files or directory\n";
75 print_usage();
76 print "\n";
77 exit;
78 }
79 if (!$cfile){
80 print "\nERROR: Please correctly specify the required conversion file\n";
81 print_usage();
82 print "\n";
83 exit;
84 }
85
86 #THE @files ARRAY WILL CONTAIN INPUT FILE NAMES, EXTRACTED FROM A DIRECTORY (-indir) OR ARGV
87 my @files;
88 if ($indir){
89 my $directory="$indir";
90 opendir(DIR, $directory) or (print "Couldn't open $directory: $!\n" and print_usage() and exit);
91 my @direct= readdir DIR;
92 my $tail=".csv";
93 foreach (@direct){
94 if (index($_, $tail) != -1){
95 $_=$indir.$_;
96 push (@files,$_);
97 }
98 }
99 closedir DIR;
100 }
101 else{
102 @files=@ARGV;
103 }
104
105 #GET LABELS: USE (-l) OR USE FILNEAMES AS LABELS FOR COLUMNS IN OUTPUT FILE
106
107 my @labels;
108
109 my @labels = ($l1,$l2);
110 #if ($l){
111 # @labels=split(',',$l);
112 #}
113 #else{
114 # foreach (@files){
115 # my $filename=basename($_);
116 # my @temp=split('\\.',$filename);
117 # my $colName=$temp[0];
118 # push (@labels,$colName);
119 }
120 }
121
122 #CHECK IF REQ. VARIABLES WERE DEFINED USING FLAGS. IF NOT THEN USE DEFAULT VALUES
123
124 if (!$out) {$out="comp.".$labels[0].$labels[1].".csv"}
125 if (!$round){$round='%.4f'}
126
127 #OPEN INPUTTED AGGREGATE GENE FILES AND STORE THEIR CONTENTS INTO TWO HASHES
128 #FILE1 GOES INTO HASH %ONE AND FILE2 GOES INTO HASH %TWO.
129
130 #FILE1 OPENING ---> %one WHERE KEY:VALUE IS GENE_ID:(GENE_ID,INSERTIONS,MEAN,ETC.)
131 my @header;
132 my %one;
133
134 open (F1,'<',$files[0]);
135
136 #STORE COLUMN NAMES (FIRST LINE OF FILE1) FOR HEADER AND APPEND LABELS
137 my $head=<F1>; #the header in the file
138 my @cols=split(',',$head);
139 @cols=@cols[0,1,2,3,4,5,6]; #get rid of blank columns
140 for (my $j=0;$j<scalar @cols;$j++){
141 $cols[$j]=$cols[$j].'-'.$labels[0]; #mark each column name with file it comes from
142 }
143 push (@header,@cols);
144
145 while (my $line=<F1>){
146 chomp $line;
147 my @info=split(",",$line);
148 #Only keep the first 7 columns (Ones about blanks aren't needed for comparisons)
149 @info=@info[0,1,2,3,4,5,6];
150 #Sometimes genes that don't have a gene name can't be blank, so fill with NA
151 if (!$info[5]){
152 $info[5]="NA";
153 }
154 #If there are no insertions in the column "total", then make it =0 rather than blank
155 if (!$info[6]){
156 $info[6]=0;
157 }
158 $one{$info[0]}=\@info;
159 }
160 close F1;
161
162 #FILE2 OPENING ---> %two WHERE KEY:VALUE IS GENE_ID:(GENE_ID,INSERTIONS,MEAN,ETC.)
163
164 my %two;
165 open (F2,'<',$files[1]);
166
167 #STORE COLUMN NAMES (FIRST LINE OF FILE2) FOR HEADER AND APPEND LABELS
168 $head=<F2>; #the header in the file
169 @cols=split(',',$head);
170 @cols=@cols[0,1,2,3,4,5,6]; #get rid of blank columns
171 for (my $j=0;$j<scalar @cols;$j++){
172 $cols[$j]=$cols[$j].'-'.$labels[1]; #mark each column name with file it comes from
173 }
174 push (@header,@cols);
175
176 while (my $line=<F2>){
177 chomp $line;
178 my @info=split(",",$line);
179 @info=@info[0,1,2,3,4,5,6];
180 if (!$info[5]){
181 $info[5]="NA";
182 }
183 if (!$info[6]){
184 $info[6]=0;
185 }
186 $two{$info[0]}=\@info;
187 }
188 close F2;
189
190
191 #READ CONVERSION FILE INTO ARRAY.
192 #Conversion file must have strain 1 for file 1 in column 1 (index 0) and
193 #strain 2 for file 2 in column 2 (index 1)
194 #conversion file must be tab delimited with no NA fields
195 #If homologs (exist then take info from hashes (%one and %two) by referring to gene_id in KEY
196
197 my @all; #store all homologs in this hash
198 open (CONV,'<',$cfile);
199 while (my $line=<CONV>){
200 chomp $line;
201 my @genes=split("\t",$line); #Array @genes will contain two genes (SP_0000,SPT_0000)
202 if (scalar @genes==2 and $genes[0] ne "" and $genes[1] ne ""){
203 my @info;
204 my @oneArray=@{$one{$genes[0]}};
205 my @twoArray=@{$two{$genes[1]}};
206 push (@info,@oneArray,@twoArray);
207 my $diff=sprintf("$round",($info[1]-$info[8]));
208 my $total1=$info[6];
209 my $total2=$info[13];
210 my $sd1=$info[3];
211 my $se1=$info[4];
212 my $sd2=$info[10];
213 my $se2=$info[11];
214 my $df=$total1+$total2-2;
215 my $tdist;
216 my $pval;
217 #TDIST, PVAL calculations with fail if standard dev, error, or counts are not real numbers
218 #or if 0 ends up in denominator
219 if ($se1 eq "X" or $se2 eq "X" or $sd1 eq "X" or $sd2 eq "X" or $total1==0 or $total2==0 or $sd1==0 or $sd2==0){
220 ($tdist,$pval)=("NA","NA");
221 }
222 else{
223 $tdist=sqrt((($diff)/(sqrt((($sd1**2)/$total1)+(($sd2**2)/$total2))))**2);
224 $pval=Statistics::Distributions::tprob($df,$tdist);
225 }
226 push (@info,$diff,$df,$tdist,$pval);
227 push (@all,\@info);
228 }
229 }
230 close CONV;
231
232 #SORT THE HOMOLOGS BY THE SORTKEY OR BY DEFAULT DIFFERENCE IN MEAN FITNESSES
233 if (!$sortkey){
234 $sortkey=14; #for mean difference
235 }
236 my @sorted = sort { $b->[$sortkey] <=> $a->[$sortkey] } @all;
237
238 #FINISH THE HEADER BY ADDING COLUMN NAMES FOR MEAN-DIFF, DOF, TDIST, AND PVALUE
239 my $field="MeanDiff(".$labels[0].'.'.$labels[1].")";
240 push (@header,$field,"DOF","TDIST","PVALUE");
241
242 #PRINT MATCHED HOMOLOG INFORMATION INTO A SINGLE OUTPUT FILE
243 open OUT, '>',"$out";
244 print OUT (join(',',@header),"\n");
245 foreach (@sorted){
246 my @woo=@{$_};
247 print OUT join(',',@woo),"\n";
248 }
249
250 close OUT;
251
252