0
|
1 '''
|
|
2 Created on 31 dec. 2014
|
|
3
|
|
4 @author: lukas007
|
|
5 '''
|
|
6 import shutil
|
|
7 import subprocess
|
|
8 import csv
|
16
|
9 import os
|
|
10 import stat
|
0
|
11 from collections import OrderedDict
|
|
12
|
|
13 def copy_dir(src, dst):
|
|
14 shutil.copytree(src, dst)
|
|
15
|
16
|
16 def _del_rw(action, name, exc):
|
|
17 '''
|
|
18 ensures the read only files are set to read/write
|
|
19 and then deletes them
|
|
20 '''
|
|
21 os.chmod(name, stat.S_IWRITE)
|
|
22 os.remove(name)
|
|
23
|
|
24 def remove_dir(src):
|
|
25 shutil.rmtree(src, onerror=_del_rw)
|
|
26
|
|
27
|
9
|
28 def log_message(log_file, log_message):
|
|
29 with open(log_file, "a") as text_file:
|
|
30 text_file.write(log_message + "\n")
|
|
31
|
0
|
32 def copy_file(src, dst):
|
|
33 shutil.copy(src, dst)
|
|
34
|
|
35 def get_process_list():
|
|
36 p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
|
|
37 out, err = p.communicate()
|
|
38 return out.splitlines()
|
|
39
|
|
40 def get_process_pid(process_name):
|
|
41 pid = -1
|
|
42 for line in get_process_list():
|
|
43 if process_name in line:
|
|
44 pid = int(line.split(None, 1)[0])
|
|
45 return pid
|
|
46
|
|
47
|
|
48 def get_as_dict(in_tsv):
|
|
49 '''
|
|
50 Generic method to parse a tab-separated file returning a dictionary with named columns
|
|
51 @param in_tsv: input filename to be parsed
|
|
52 '''
|
|
53 data = list(csv.reader(open(in_tsv, 'rU'), delimiter='\t'))
|
|
54 header = data.pop(0)
|
|
55 # Create dictionary with column name as key
|
|
56 output = {}
|
|
57 for index in xrange(len(header)):
|
|
58 output[header[index]] = [row[index] for row in data]
|
|
59 return output
|
|
60
|
|
61 def save_dict_as_tsv(dict, out_tsv):
|
|
62 '''
|
|
63 Writes tab-separated data to file
|
|
64 @param data: dictionary containing merged dataset
|
|
65 @param out_tsv: output tsv file
|
|
66 '''
|
|
67
|
|
68 # Open output file for writing
|
|
69 out_file = open(out_tsv, 'wb')
|
|
70 output_writer = csv.writer(out_file, delimiter="\t")
|
|
71
|
|
72 # Write headers
|
|
73 output_writer.writerow(list(dict.keys()))
|
|
74
|
|
75 # Write
|
|
76 for record_index in xrange(len(dict[dict.keys()[0]])):
|
|
77 row = [dict[k][record_index] for k in dict]
|
|
78 output_writer.writerow(row)
|
|
79
|
|
80
|
|
81
|
|
82
|
|
83 def get_nist_out_as_dict(nist_result_file):
|
|
84 '''
|
|
85 Method to parse NIST specific output into a dictionary.
|
|
86 @param nist_result_file: result file as produced by NIST nistms$.exe
|
|
87 '''
|
|
88 # Create dictionary with column name as key
|
|
89 output = OrderedDict()
|
|
90 output['id'] = []
|
|
91 output['compound_name'] = []
|
|
92 output['formula'] = []
|
|
93 output['lib_name'] = []
|
|
94 output['id_in_lib'] = []
|
|
95 output['mf'] = []
|
|
96 output['rmf'] = []
|
|
97 output['prob'] = []
|
|
98 output['cas'] = []
|
|
99 output['mw'] = []
|
|
100
|
|
101
|
|
102 for line in open(nist_result_file):
|
|
103 row = line.split('<<')
|
|
104 if row[0].startswith('Unknown'):
|
|
105 title_row = row[0]
|
|
106 continue
|
|
107 elif row[0].startswith('Hit'):
|
|
108 hit = row
|
|
109
|
|
110 output['id'].append(title_row.split(': ')[1].split(' ')[0])
|
|
111 output['compound_name'].append((hit[1].split('>>')[0]).decode('utf-8', errors='replace')) # see http://blog.webforefront.com/archives/2011/02/python_ascii_co.html
|
|
112 output['formula'].append(hit[2].split('>>')[0])
|
|
113 output['lib_name'].append(hit[3].split('>>')[0])
|
|
114
|
|
115 other_fields_list = (hit[2].split('>>')[1] + hit[3].split('>>')[1]).split(';')
|
|
116 count = 0
|
|
117 for field in other_fields_list:
|
|
118 if field.startswith(' MF: '):
|
|
119 count += 1
|
|
120 output['mf'].append(field.split('MF: ')[1])
|
|
121 elif field.startswith(' RMF: '):
|
|
122 count += 1
|
|
123 output['rmf'].append(field.split('RMF: ')[1])
|
|
124 elif field.startswith(' Prob: '):
|
|
125 count += 1
|
|
126 output['prob'].append(field.split('Prob: ')[1])
|
|
127 elif field.startswith(' CAS:'):
|
|
128 count += 1
|
|
129 output['cas'].append(field.split('CAS:')[1])
|
|
130 elif field.startswith(' Mw: '):
|
|
131 count += 1
|
|
132 output['mw'].append(field.split('Mw: ')[1])
|
|
133 elif field.startswith(' Id: '):
|
|
134 count += 1
|
|
135 output['id_in_lib'].append(field.split('Id: ')[1][0:-2]) # the [0:-2] is to avoid the last 2 characters, namely a '.' and a \n
|
|
136 elif field != '' and field != ' Lib: ':
|
|
137 raise Exception('Error: unexpected field in NIST output: ' + field)
|
|
138
|
|
139 if count != 6:
|
|
140 raise Exception('Error: did not find all expected fields in NIST output')
|
|
141
|
|
142 return output
|
|
143
|
|
144 def get_spectra_file_as_dict(spectrum_file):
|
|
145 '''
|
|
146 Method to parse spectra file in NIST MSP input format into a dictionary.
|
|
147 The idea is to parse the following :
|
|
148
|
|
149 Name: spectrum1
|
|
150 DB#: 1
|
|
151 Num Peaks: 87
|
|
152 14 8; 15 15; 27 18; 28 15; 29 15;
|
|
153 30 11; 32 19; 39 32; 40 12; 41 68;
|
|
154
|
|
155 into:
|
|
156
|
|
157 dict['spectrum1'] = "14 8; 15 15; 27 18; 28 15; 29 15; 30 11; 32 19; 39 32; 40 12; 41 68;"
|
|
158
|
|
159 @param spectrum_file: spectra file in MSP format (e.g. also the format returned by MsClust)
|
|
160 '''
|
|
161
|
|
162 output = OrderedDict()
|
|
163 name = ''
|
|
164 spectrum = ''
|
|
165 for line in open(spectrum_file):
|
|
166 if line.startswith('Name: '):
|
|
167 if name != '':
|
|
168 # store spectrum:
|
|
169 output[name] = spectrum
|
|
170 name = line.split('Name: ')[1].replace('\n','')
|
|
171 spectrum = ''
|
|
172 elif line[0].isdigit():
|
|
173 # parse spectra:
|
|
174 spectrum += line.replace('\n','')
|
|
175
|
|
176 # store also last spectrum:
|
|
177 output[name] = spectrum
|
|
178
|
|
179 return output
|
|
180 |