Mercurial > repos > rnateam > graphclust_preprocessing
changeset 7:07ad2d77f28a draft
planemo upload for repository https://github.com/eteriSokhoyan/galaxytools/tree/branchForIterations/tools/GraphClust commit 4379e712f76f2bb12ee2cc270dd8a0e806df2cd6
author | rnateam |
---|---|
date | Mon, 22 May 2017 12:45:22 -0400 |
parents | dff6a5a17221 |
children | a04e93fdb40a |
files | preprocessing.xml splitSHAPE.py |
diffstat | 2 files changed, 89 insertions(+), 29 deletions(-) [+] |
line wrap: on
line diff
--- a/preprocessing.xml Sat Mar 25 16:53:38 2017 -0400 +++ b/preprocessing.xml Mon May 22 12:45:22 2017 -0400 @@ -1,4 +1,4 @@ -<tool id="preproc" name="Preprocessing" version="0.1"> +<tool id="preproc" name="Preprocessing" version="0.2"> <requirements> <requirement type="package" version="0.1.12">graphclust-wrappers</requirement> </requirements> @@ -7,41 +7,47 @@ </stdio> <command> <![CDATA[ + preprocessing.pl + '$fastaFile' + $max_length + $in_winShift + $min_seq_length - 'preprocessing.pl' - '$fastaFile' $max_length $in_winShift $min_seq_length - + #if $SHAPEdata: + && + python '$__tool_directory__/splitSHAPE.py' + '$SHAPEdata' + $max_length + #end if ]]> </command> <inputs> <param type="data" name="fastaFile" format="fasta" /> + <param type="data" name="SHAPEdata" format="txt" optional="true" label="SHAPE data"/> <param name="max_length" type="integer" value="10000" size="5" label="window size"/> <param name="in_winShift" type="integer" value="100" size="5" label="window shift in percent"/> <param name="min_seq_length" type="integer" value="5" size="5" label="minimum sequence length"/> </inputs> - <outputs> <data name="data.fasta" format="fasta" from_work_dir="FASTA/data.fasta" label="data.fasta"/> <data name="data.map" format="txt" from_work_dir="FASTA/data.map" label="data.map"/> <data name="data.names" format="txt" from_work_dir="FASTA/data.names" label="data.names"/> <data name="data.fasta.scan" format="fasta" from_work_dir="FASTA/data.fasta.scan" label="data.fasta.scan"/> <data name="FASTA" format="zip" from_work_dir="FASTA.zip" label="FASTA.ZIP"/> + <data name="shape_data_split" format="txt" from_work_dir="shape_data_split.react" label="SHAPE data splited"/> </outputs> - - <tests> - <test> - <param name="fastaFile" value="input.fa"/> - <param name="max_length" value="10000"/> - <param name="in_winShift" value="100"/> - <param name="min_seq_length" value="5"/> - <output name="data.fasta" file="FASTA/data.fasta"/> - <output name="data.map" file="FASTA/data.map" /> - <output name="data.names" file="FASTA/data.names"/> - <output name="data.fasta.scan" file="FASTA/data.fasta.scan" /> - </test> -</tests> - + <test> + <param name="fastaFile" value="input.fa"/> + <param name="max_length" value="10000"/> + <param name="in_winShift" value="100"/> + <param name="min_seq_length" value="5"/> + <output name="data.fasta" file="FASTA/data.fasta"/> + <output name="data.map" file="FASTA/data.map" /> + <output name="data.names" file="FASTA/data.names"/> + <output name="data.fasta.scan" file="FASTA/data.fasta.scan" /> + </test> + </tests> <help> <![CDATA[ @@ -57,8 +63,6 @@ Slightly larger windows are usually ok. Too small windows can disturb existing signals. - - + **window shift in percent** : Relative window size in % for window shift during input preprocessing. Please note that a small shift results in much more fragments for clustering. The benefit is that RNA motifs/structures are not destroyed by arbitrary split points. Smaller @@ -67,19 +71,11 @@ no other occurences in the dataset can be found. - - - + **minimum sequence length** : Minimal length of input sequences. Every input sequence below that length is ignored completely during clustering. - ]]></help> - - <citations> <citation type="doi">10.1093/bioinformatics/bts224</citation> </citations> - - </tool>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/splitSHAPE.py Mon May 22 12:45:22 2017 -0400 @@ -0,0 +1,64 @@ +import os +import re +import sys + +shape_file = sys.argv[1] +win_size = int(sys.argv[2]) + +pattern = re.compile("^>.*$") +toWrite = "" + +count_for_id = 1 +seq_counter = 0 +new_id = "" + +seq_id = [] +seq_string = [] +orig_id = [] +name_file = "FASTA/data.names" +array_all_chunks = [] +with open(name_file, 'r') as f: + content = f.read() + lines = content.split('\n')[:-1] + for line in lines: + seq_id.append(int(line.split()[0])) + seq_string.append(line.split()[1]) + orig_id_srt = line.split()[3] + orig_id_srt = orig_id_srt.rsplit('_',1)[0] + orig_id.append(orig_id_srt) + + +react_dict = {} +react_arr = [] + +with open(shape_file, 'r') as shape: + content = shape.read() + lines = content.split('\n') + for line in lines: + if pattern.match(line): + line = line.replace('>','').strip() + react_arr=[] + react_dict[line] = react_arr + continue + else: + react_arr.append(line) + +toWrite = "" +chunks = [] +for i in range(len(orig_id)): + if not orig_id[i] in react_dict: + raise RuntimeError('Error key {} not found'.format(orig_id)) + + react_val = react_dict[orig_id[i]] + toWrite += '>' + str(seq_id[i]) + " " + seq_string[i] + "\n" + chunks = re.findall(r'\d+', seq_string[i]) + + for j in react_val[int(chunks[1])-1:int(chunks[2])]: + id_s = int(j.split()[0]) + + if id_s > win_size: + id_s = id_s - int(chunks[1]) + toWrite += str(id_s) + '\t' + j.split()[1] + "\n" + +with open("shape_data_split.react", 'w') as out: + out.write(toWrite)