Mercurial > repos > proteore > proteore_id_converter
comparison id_converter.py @ 16:b6607b7e683f draft
planemo upload commit f2b3d1ff6bea930b2ce32c009e4d3de39a17edfb-dirty
author | proteore |
---|---|
date | Mon, 28 Jan 2019 11:08:47 -0500 |
parents | |
children | 1e45ea50f145 |
comparison
equal
deleted
inserted
replaced
15:b50d913ec067 | 16:b6607b7e683f |
---|---|
1 import sys, os, argparse, re, csv | |
2 | |
3 def get_args() : | |
4 parser = argparse.ArgumentParser() | |
5 parser.add_argument("-d", "--ref_file", help="path to reference file: <species>_id_mapping.tsv", required=True) | |
6 parser.add_argument("--input_type", help="type of input (list of id or filename)", required=True) | |
7 parser.add_argument("-t", "--id_type", help="type of input IDs", required=True) | |
8 parser.add_argument("-i", "--input", help="list of IDs (text or filename)", required=True) | |
9 parser.add_argument("-c", "--column_number", help="list of IDs (text or filename)") | |
10 parser.add_argument("--header", help="true/false if your file contains a header") | |
11 parser.add_argument("--target_ids", help="target IDs to map to", required=True) | |
12 parser.add_argument("-o", "--output", help="output filename", required=True) | |
13 args = parser.parse_args() | |
14 return args | |
15 | |
16 #return list of (unique) ids from string | |
17 def get_input_ids_from_string(input) : | |
18 ids_list = list(set(re.split(r'\s+',input.replace("\r","").replace("\n"," ").replace("\t"," ")))) | |
19 if "" in ids_list : ids_list.remove("") | |
20 #if "NA" in ids_list : ids_list.remove("NA") | |
21 return ids_list | |
22 | |
23 #return input_file and list of unique ids from input file path | |
24 def get_input_ids_from_file(input,nb_col,header) : | |
25 with open(input, "r") as csv_file : | |
26 input_file= list(csv.reader(csv_file, delimiter='\t')) | |
27 | |
28 input_file, ids_list = one_id_one_line(input_file,nb_col,header) | |
29 if "" in ids_list : ids_list.remove("") | |
30 #if "NA" in ids_list : ids_list.remove("NA") | |
31 | |
32 return input_file, ids_list | |
33 | |
34 #return input file by adding lines when there are more than one id per line | |
35 def one_id_one_line(input_file,nb_col,header) : | |
36 | |
37 if header : | |
38 new_file = [input_file[0]] | |
39 input_file = input_file[1:] | |
40 else : | |
41 new_file=[] | |
42 ids_list=[] | |
43 | |
44 for line in input_file : | |
45 if line != [] and set(line) != {''}: | |
46 line[nb_col] = re.sub(r"\s+","",line[nb_col]) | |
47 if ";" in line[nb_col] : | |
48 ids = line[nb_col].split(";") | |
49 for id in ids : | |
50 new_file.append(line[:nb_col]+[id]+line[nb_col+1:]) | |
51 ids_list.append(id) | |
52 else : | |
53 new_file.append(line) | |
54 ids_list.append(line[nb_col]) | |
55 | |
56 ids_list= list(set(ids_list)) | |
57 | |
58 return new_file, ids_list | |
59 | |
60 #return the column number in int format | |
61 def nb_col_to_int(nb_col): | |
62 try : | |
63 nb_col = int(nb_col.replace("c", "")) - 1 | |
64 return nb_col | |
65 except : | |
66 sys.exit("Please specify the column where you would like to apply the filter with valid format") | |
67 | |
68 #replace all blank cells to NA | |
69 def blank_to_NA(csv_file) : | |
70 tmp=[] | |
71 for line in csv_file : | |
72 line = ["NA" if cell=="" or cell==" " or cell=="NaN" else cell for cell in line] | |
73 tmp.append(line) | |
74 | |
75 return tmp | |
76 | |
77 def str2bool(v): | |
78 if v.lower() in ('yes', 'true', 't', 'y', '1'): | |
79 return True | |
80 elif v.lower() in ('no', 'false', 'f', 'n', '0'): | |
81 return False | |
82 else: | |
83 raise argparse.ArgumentTypeError('Boolean value expected.') | |
84 | |
85 #return result dictionary | |
86 def map_to_dictionary(ids,ids_dictionary,id_in,id_out) : | |
87 | |
88 result_dict = {} | |
89 for id in ids : | |
90 for target_id in id_out : | |
91 if id in ids_dictionary : | |
92 res = ";".join(ids_dictionary[id][target_id]) | |
93 else : | |
94 res="" | |
95 | |
96 if id in result_dict : | |
97 result_dict[id].append(res) | |
98 else : | |
99 result_dict[id]=[res] | |
100 | |
101 return result_dict | |
102 | |
103 #create empty dictionary with index for tab | |
104 def create_ids_dictionary (ids_list) : | |
105 ids_dictionary = {} | |
106 ids_dictionary_index={} | |
107 for i,id in enumerate(ids_list) : | |
108 ids_dictionary_index[i]=id | |
109 | |
110 return(ids_dictionary,ids_dictionary_index) | |
111 | |
112 def main(): | |
113 | |
114 #Get args from command line | |
115 args = get_args() | |
116 target_ids = args.target_ids.split(",") | |
117 header=False | |
118 if args.id_type in target_ids : target_ids.remove(args.id_type) | |
119 if args.input_type=="file" : | |
120 args.column_number = nb_col_to_int(args.column_number) | |
121 header = str2bool(args.header) | |
122 | |
123 #Get ref file to build dictionary | |
124 csv.field_size_limit(sys.maxsize) # to handle big files | |
125 with open(args.ref_file, "r") as csv_file : | |
126 tab = csv.reader(csv_file, delimiter='\t') | |
127 tab = [line for line in tab] | |
128 | |
129 ids_list=tab[0] | |
130 | |
131 #create empty dictionary and dictionary index | |
132 ids_dictionary, ids_dictionary_index = create_ids_dictionary(ids_list) | |
133 | |
134 #fill dictionary and sub dictionaries with ids | |
135 id_index = ids_list.index(args.id_type) | |
136 for line in tab[1:] : | |
137 ref_ids=line[id_index] | |
138 other_id_type_index = [accession_id for accession_id in ids_dictionary_index.keys() if accession_id!=id_index] | |
139 for id in ref_ids.replace(" ","").split(";") : #if there's more than one id, one key per id (example : GO) | |
140 if id not in ids_dictionary : #if the key is not created yet | |
141 ids_dictionary[id]={} | |
142 for other_id_type in other_id_type_index : | |
143 if ids_dictionary_index[other_id_type] not in ids_dictionary[id] : | |
144 ids_dictionary[id][ids_dictionary_index[other_id_type]] = set(line[other_id_type].replace(" ","").split(";")) | |
145 else : | |
146 ids_dictionary[id][ids_dictionary_index[other_id_type]] |= set(line[other_id_type].replace(" ","").split(";")) | |
147 if len(ids_dictionary[id][ids_dictionary_index[other_id_type]]) > 1 and '' in ids_dictionary[id][ids_dictionary_index[other_id_type]] : | |
148 ids_dictionary[id][ids_dictionary_index[other_id_type]].remove('') | |
149 | |
150 #Get file and/or ids from input | |
151 if args.input_type == "list" : | |
152 ids = get_input_ids_from_string(args.input) | |
153 elif args.input_type == "file" : | |
154 input_file, ids = get_input_ids_from_file(args.input,args.column_number,args.header) | |
155 | |
156 #Mapping ids | |
157 result_dict = map_to_dictionary(ids,ids_dictionary,args.id_type,target_ids) | |
158 | |
159 #creating output file | |
160 if header : | |
161 output_file=[input_file[0]+target_ids] | |
162 input_file = input_file[1:] | |
163 else : | |
164 output_file=[[args.id_type]+target_ids] | |
165 | |
166 if args.input_type=="file" : | |
167 for line in input_file : | |
168 output_file.append(line+result_dict[line[args.column_number]]) | |
169 elif args.input_type=="list" : | |
170 for id in ids : | |
171 output_file.append([id]+result_dict[id]) | |
172 | |
173 #convert blank to NA | |
174 output_file = blank_to_NA(output_file) | |
175 | |
176 #write output file | |
177 with open(args.output,"w") as output : | |
178 writer = csv.writer(output,delimiter="\t") | |
179 writer.writerows(output_file) | |
180 | |
181 if __name__ == "__main__": | |
182 main() | |
183 |