comparison COBRAxy/ras_to_bounds.py @ 489:97eea560a10f draft default tip

Uploaded
author francesco_lapi
date Mon, 29 Sep 2025 10:33:26 +0000
parents 1e7a8da6c47a
children
comparison
equal deleted inserted replaced
488:e0bcc61b2feb 489:97eea560a10f
1 """
2 Apply RAS-based scaling to reaction bounds and optionally save updated models.
3
4 Workflow:
5 - Read one or more RAS matrices (patients/samples x reactions)
6 - Normalize and merge them, optionally adding class suffixes to sample IDs
7 - Build a COBRA model from a tabular CSV
8 - Run FVA to initialize bounds, then scale per-sample based on RAS values
9 - Save bounds per sample and optionally export updated models in chosen formats
10 """
1 import argparse 11 import argparse
2 import utils.general_utils as utils 12 import utils.general_utils as utils
3 from typing import Optional, List 13 from typing import Optional, Dict, Set, List, Tuple, Union
4 import os 14 import os
5 import numpy as np 15 import numpy as np
6 import pandas as pd 16 import pandas as pd
7 import cobra 17 import cobra
18 from cobra import Model
8 import sys 19 import sys
9 import csv
10 from joblib import Parallel, delayed, cpu_count 20 from joblib import Parallel, delayed, cpu_count
21 import utils.model_utils as modelUtils
11 22
12 ################################# process args ############################### 23 ################################# process args ###############################
13 def process_args(args :List[str] = None) -> argparse.Namespace: 24 def process_args(args :List[str] = None) -> argparse.Namespace:
14 """ 25 """
15 Processes command-line arguments. 26 Processes command-line arguments.
21 Namespace: An object containing parsed arguments. 32 Namespace: An object containing parsed arguments.
22 """ 33 """
23 parser = argparse.ArgumentParser(usage = '%(prog)s [options]', 34 parser = argparse.ArgumentParser(usage = '%(prog)s [options]',
24 description = 'process some value\'s') 35 description = 'process some value\'s')
25 36
26 parser.add_argument( 37
27 '-ms', '--model_selector', 38 parser.add_argument("-mo", "--model_upload", type = str,
28 type = utils.Model, default = utils.Model.ENGRO2, choices = [utils.Model.ENGRO2, utils.Model.Custom],
29 help = 'chose which type of model you want use')
30
31 parser.add_argument("-mo", "--model", type = str,
32 help = "path to input file with custom rules, if provided") 39 help = "path to input file with custom rules, if provided")
33
34 parser.add_argument("-mn", "--model_name", type = str, help = "custom mode name")
35
36 parser.add_argument(
37 '-mes', '--medium_selector',
38 default = "allOpen",
39 help = 'chose which type of medium you want use')
40
41 parser.add_argument("-meo", "--medium", type = str,
42 help = "path to input file with custom medium, if provided")
43 40
44 parser.add_argument('-ol', '--out_log', 41 parser.add_argument('-ol', '--out_log',
45 help = "Output log") 42 help = "Output log")
46 43
47 parser.add_argument('-td', '--tool_dir', 44 parser.add_argument('-td', '--tool_dir',
55 help = 'input ras') 52 help = 'input ras')
56 53
57 parser.add_argument('-rn', '--name', 54 parser.add_argument('-rn', '--name',
58 type=str, 55 type=str,
59 help = 'ras class names') 56 help = 'ras class names')
60
61 parser.add_argument('-rs', '--ras_selector',
62 required = True,
63 type=utils.Bool("using_RAS"),
64 help = 'ras selector')
65 57
66 parser.add_argument('-cc', '--cell_class', 58 parser.add_argument('-cc', '--cell_class',
67 type = str, 59 type = str,
68 help = 'output of cell class') 60 help = 'output of cell class')
69 parser.add_argument( 61 parser.add_argument(
70 '-idop', '--output_path', 62 '-idop', '--output_path',
71 type = str, 63 type = str,
72 default='ras_to_bounds/', 64 default='ras_to_bounds/',
73 help = 'output path for maps') 65 help = 'output path for maps')
74 66
67 parser.add_argument('-sm', '--save_models',
68 type=utils.Bool("save_models"),
69 default=False,
70 help = 'whether to save models with applied bounds')
71
72 parser.add_argument('-smp', '--save_models_path',
73 type = str,
74 default='saved_models/',
75 help = 'output path for saved models')
76
77 parser.add_argument('-smf', '--save_models_format',
78 type = str,
79 default='csv',
80 help = 'format for saved models (csv, xml, json, mat, yaml, tabular)')
81
75 82
76 ARGS = parser.parse_args(args) 83 ARGS = parser.parse_args(args)
77 return ARGS 84 return ARGS
78 85
79 ########################### warning ########################################### 86 ########################### warning ###########################################
85 s (str): The warning message to be logged and printed. 92 s (str): The warning message to be logged and printed.
86 93
87 Returns: 94 Returns:
88 None 95 None
89 """ 96 """
90 with open(ARGS.out_log, 'a') as log: 97 if ARGS.out_log:
91 log.write(s + "\n\n") 98 with open(ARGS.out_log, 'a') as log:
99 log.write(s + "\n\n")
92 print(s) 100 print(s)
93 101
94 ############################ dataset input #################################### 102 ############################ dataset input ####################################
95 def read_dataset(data :str, name :str) -> pd.DataFrame: 103 def read_dataset(data :str, name :str) -> pd.DataFrame:
96 """ 104 """
141 if upper_bound!=0 and lower_bound!=0: 149 if upper_bound!=0 and lower_bound!=0:
142 new_bounds.loc[reaction, "lower_bound"] = valMin 150 new_bounds.loc[reaction, "lower_bound"] = valMin
143 new_bounds.loc[reaction, "upper_bound"] = valMax 151 new_bounds.loc[reaction, "upper_bound"] = valMax
144 return new_bounds 152 return new_bounds
145 153
146 def process_ras_cell(cellName, ras_row, model, rxns_ids, output_folder): 154
155 def save_model(model, filename, output_folder, file_format='csv'):
156 """
157 Save a COBRA model to file in the specified format.
158
159 Args:
160 model (cobra.Model): The model to save.
161 filename (str): Base filename (without extension).
162 output_folder (str): Output directory.
163 file_format (str): File format ('xml', 'json', 'mat', 'yaml', 'tabular', 'csv').
164
165 Returns:
166 None
167 """
168 if not os.path.exists(output_folder):
169 os.makedirs(output_folder)
170
171 try:
172 if file_format == 'tabular' or file_format == 'csv':
173 # Special handling for tabular format using utils functions
174 filepath = os.path.join(output_folder, f"{filename}.csv")
175
176 rules = modelUtils.generate_rules(model, asParsed = False)
177 reactions = modelUtils.generate_reactions(model, asParsed = False)
178 bounds = modelUtils.generate_bounds(model)
179 medium = modelUtils.get_medium(model)
180
181 try:
182 compartments = modelUtils.generate_compartments(model)
183 except:
184 compartments = None
185
186 df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "Rule"])
187 df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Reaction"])
188 df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
189 df_medium = medium.rename(columns = {"reaction": "ReactionID"})
190 df_medium["InMedium"] = True
191
192 merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
193 merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
194
195 # Add compartments only if they exist and model name is ENGRO2
196 if compartments is not None and hasattr(ARGS, 'name') and ARGS.name == "ENGRO2":
197 merged = merged.merge(compartments, on = "ReactionID", how = "outer")
198
199 merged = merged.merge(df_medium, on = "ReactionID", how = "left")
200 merged["InMedium"] = merged["InMedium"].fillna(False)
201 merged = merged.sort_values(by = "InMedium", ascending = False)
202
203 merged.to_csv(filepath, sep="\t", index=False)
204
205 else:
206 # Standard COBRA formats
207 filepath = os.path.join(output_folder, f"{filename}.{file_format}")
208
209 if file_format == 'xml':
210 cobra.io.write_sbml_model(model, filepath)
211 elif file_format == 'json':
212 cobra.io.save_json_model(model, filepath)
213 elif file_format == 'mat':
214 cobra.io.save_matlab_model(model, filepath)
215 elif file_format == 'yaml':
216 cobra.io.save_yaml_model(model, filepath)
217 else:
218 raise ValueError(f"Unsupported format: {file_format}")
219
220 print(f"Model saved: {filepath}")
221
222 except Exception as e:
223 warning(f"Error saving model {filename}: {str(e)}")
224
225 def apply_bounds_to_model(model, bounds):
226 """
227 Apply bounds from a DataFrame to a COBRA model.
228
229 Args:
230 model (cobra.Model): The metabolic model to modify.
231 bounds (pd.DataFrame): DataFrame with reaction bounds.
232
233 Returns:
234 cobra.Model: Modified model with new bounds.
235 """
236 model_copy = model.copy()
237 for reaction_id in bounds.index:
238 try:
239 reaction = model_copy.reactions.get_by_id(reaction_id)
240 reaction.lower_bound = bounds.loc[reaction_id, "lower_bound"]
241 reaction.upper_bound = bounds.loc[reaction_id, "upper_bound"]
242 except KeyError:
243 # Reaction not found in model, skip
244 continue
245 return model_copy
246
247 def process_ras_cell(cellName, ras_row, model, rxns_ids, output_folder, save_models=False, save_models_path='saved_models/', save_models_format='csv'):
147 """ 248 """
148 Process a single RAS cell, apply bounds, and save the bounds to a CSV file. 249 Process a single RAS cell, apply bounds, and save the bounds to a CSV file.
149 250
150 Args: 251 Args:
151 cellName (str): The name of the RAS cell (used for naming the output file). 252 cellName (str): The name of the RAS cell (used for naming the output file).
152 ras_row (pd.Series): A row from a RAS DataFrame containing scaling factors for reaction bounds. 253 ras_row (pd.Series): A row from a RAS DataFrame containing scaling factors for reaction bounds.
153 model (cobra.Model): The metabolic model to be modified. 254 model (cobra.Model): The metabolic model to be modified.
154 rxns_ids (list of str): List of reaction IDs to which the scaling factors will be applied. 255 rxns_ids (list of str): List of reaction IDs to which the scaling factors will be applied.
155 output_folder (str): Folder path where the output CSV file will be saved. 256 output_folder (str): Folder path where the output CSV file will be saved.
257 save_models (bool): Whether to save models with applied bounds.
258 save_models_path (str): Path where to save models.
259 save_models_format (str): Format for saved models.
156 260
157 Returns: 261 Returns:
158 None 262 None
159 """ 263 """
160 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"]) 264 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"])
161 new_bounds = apply_ras_bounds(bounds, ras_row) 265 new_bounds = apply_ras_bounds(bounds, ras_row)
162 new_bounds.to_csv(output_folder + cellName + ".csv", sep='\t', index=True) 266 new_bounds.to_csv(output_folder + cellName + ".csv", sep='\t', index=True)
163 pass 267
164 268 # Save model if requested
165 def generate_bounds(model: cobra.Model, medium: dict, ras=None, output_folder='output/') -> pd.DataFrame: 269 if save_models:
270 modified_model = apply_bounds_to_model(model, new_bounds)
271 save_model(modified_model, cellName, save_models_path, save_models_format)
272
273 return
274
275 def generate_bounds_model(model: cobra.Model, ras=None, output_folder='output/', save_models=False, save_models_path='saved_models/', save_models_format='csv') -> pd.DataFrame:
166 """ 276 """
167 Generate reaction bounds for a metabolic model based on medium conditions and optional RAS adjustments. 277 Generate reaction bounds for a metabolic model based on medium conditions and optional RAS adjustments.
168 278
169 Args: 279 Args:
170 model (cobra.Model): The metabolic model for which bounds will be generated. 280 model (cobra.Model): The metabolic model for which bounds will be generated.
171 medium (dict): A dictionary where keys are reaction IDs and values are the medium conditions.
172 ras (pd.DataFrame, optional): RAS pandas dataframe. Defaults to None. 281 ras (pd.DataFrame, optional): RAS pandas dataframe. Defaults to None.
173 output_folder (str, optional): Folder path where output CSV files will be saved. Defaults to 'output/'. 282 output_folder (str, optional): Folder path where output CSV files will be saved. Defaults to 'output/'.
283 save_models (bool): Whether to save models with applied bounds.
284 save_models_path (str): Path where to save models.
285 save_models_format (str): Format for saved models.
174 286
175 Returns: 287 Returns:
176 pd.DataFrame: DataFrame containing the bounds of reactions in the model. 288 pd.DataFrame: DataFrame containing the bounds of reactions in the model.
177 """ 289 """
178 rxns_ids = [rxn.id for rxn in model.reactions] 290 rxns_ids = [rxn.id for rxn in model.reactions]
179
180 # Set all reactions to zero in the medium
181 for rxn_id, _ in model.medium.items():
182 model.reactions.get_by_id(rxn_id).lower_bound = float(0.0)
183
184 # Set medium conditions
185 for reaction, value in medium.items():
186 if value is not None:
187 model.reactions.get_by_id(reaction).lower_bound = -float(value)
188
189 291
190 # Perform Flux Variability Analysis (FVA) on this medium 292 # Perform Flux Variability Analysis (FVA) on this medium
191 df_FVA = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=0, processes=1).round(8) 293 df_FVA = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=0, processes=1).round(8)
192 294
193 # Set FVA bounds 295 # Set FVA bounds
194 for reaction in rxns_ids: 296 for reaction in rxns_ids:
195 model.reactions.get_by_id(reaction).lower_bound = float(df_FVA.loc[reaction, "minimum"]) 297 model.reactions.get_by_id(reaction).lower_bound = float(df_FVA.loc[reaction, "minimum"])
196 model.reactions.get_by_id(reaction).upper_bound = float(df_FVA.loc[reaction, "maximum"]) 298 model.reactions.get_by_id(reaction).upper_bound = float(df_FVA.loc[reaction, "maximum"])
197 299
198 if ras is not None: 300 if ras is not None:
199 Parallel(n_jobs=cpu_count())(delayed(process_ras_cell)(cellName, ras_row, model, rxns_ids, output_folder) for cellName, ras_row in ras.iterrows()) 301 Parallel(n_jobs=cpu_count())(delayed(process_ras_cell)(
302 cellName, ras_row, model, rxns_ids, output_folder,
303 save_models, save_models_path, save_models_format
304 ) for cellName, ras_row in ras.iterrows())
200 else: 305 else:
201 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"]) 306 raise ValueError("RAS DataFrame is None. Cannot generate bounds without RAS data.")
202 newBounds = apply_ras_bounds(bounds, pd.Series([1]*len(rxns_ids), index=rxns_ids)) 307 return
203 newBounds.to_csv(output_folder + "bounds.csv", sep='\t', index=True)
204 pass
205
206
207 308
208 ############################# main ########################################### 309 ############################# main ###########################################
209 def main(args:List[str] = None) -> None: 310 def main(args:List[str] = None) -> None:
210 """ 311 """
211 Initializes everything and sets the program in motion based on the fronted input arguments. 312 Initialize and execute RAS-to-bounds pipeline based on the frontend input arguments.
212 313
213 Returns: 314 Returns:
214 None 315 None
215 """ 316 """
216 if not os.path.exists('ras_to_bounds'): 317 if not os.path.exists('ras_to_bounds'):
217 os.makedirs('ras_to_bounds') 318 os.makedirs('ras_to_bounds')
218 319
219
220 global ARGS 320 global ARGS
221 ARGS = process_args(args) 321 ARGS = process_args(args)
222 322
223 if(ARGS.ras_selector == True): 323
224 ras_file_list = ARGS.input_ras.split(",") 324 ras_file_list = ARGS.input_ras.split(",")
225 ras_file_names = ARGS.name.split(",") 325 ras_file_names = ARGS.name.split(",")
226 if len(ras_file_names) != len(set(ras_file_names)): 326 if len(ras_file_names) != len(set(ras_file_names)):
227 error_message = "Duplicated file names in the uploaded RAS matrices." 327 error_message = "Duplicated file names in the uploaded RAS matrices."
228 warning(error_message) 328 warning(error_message)
229 raise ValueError(error_message) 329 raise ValueError(error_message)
230 pass 330
231 ras_class_names = [] 331 ras_class_names = []
232 for file in ras_file_names: 332 for file in ras_file_names:
233 ras_class_names.append(file.rsplit(".", 1)[0]) 333 ras_class_names.append(file.rsplit(".", 1)[0])
234 ras_list = [] 334 ras_list = []
235 class_assignments = pd.DataFrame(columns=["Patient_ID", "Class"]) 335 class_assignments = pd.DataFrame(columns=["Patient_ID", "Class"])
236 for ras_matrix, ras_class_name in zip(ras_file_list, ras_class_names): 336 for ras_matrix, ras_class_name in zip(ras_file_list, ras_class_names):
237 ras = read_dataset(ras_matrix, "ras dataset") 337 ras = read_dataset(ras_matrix, "ras dataset")
238 ras.replace("None", None, inplace=True) 338 ras.replace("None", None, inplace=True)
239 ras.set_index("Reactions", drop=True, inplace=True) 339 ras.set_index("Reactions", drop=True, inplace=True)
240 ras = ras.T 340 ras = ras.T
241 ras = ras.astype(float) 341 ras = ras.astype(float)
242 if(len(ras_file_list)>1): 342 if(len(ras_file_list)>1):
243 #append class name to patient id (dataframe index) 343 # Append class name to patient id (DataFrame index)
244 ras.index = [f"{idx}_{ras_class_name}" for idx in ras.index] 344 ras.index = [f"{idx}_{ras_class_name}" for idx in ras.index]
245 else: 345 else:
246 ras.index = [f"{idx}" for idx in ras.index] 346 ras.index = [f"{idx}" for idx in ras.index]
247 ras_list.append(ras) 347 ras_list.append(ras)
248 for patient_id in ras.index: 348 for patient_id in ras.index:
249 class_assignments.loc[class_assignments.shape[0]] = [patient_id, ras_class_name] 349 class_assignments.loc[class_assignments.shape[0]] = [patient_id, ras_class_name]
250 350
251 351
252 # Concatenate all ras DataFrames into a single DataFrame 352 # Concatenate all RAS DataFrames into a single DataFrame
253 ras_combined = pd.concat(ras_list, axis=0) 353 ras_combined = pd.concat(ras_list, axis=0)
254 # Normalize the RAS values by max RAS 354 # Normalize RAS values column-wise by max RAS
255 ras_combined = ras_combined.div(ras_combined.max(axis=0)) 355 ras_combined = ras_combined.div(ras_combined.max(axis=0))
256 ras_combined.dropna(axis=1, how='all', inplace=True) 356 ras_combined.dropna(axis=1, how='all', inplace=True)
257 357
258 358 model = modelUtils.build_cobra_model_from_csv(ARGS.model_upload)
259 359
260 model_type :utils.Model = ARGS.model_selector 360 validation = modelUtils.validate_model(model)
261 if model_type is utils.Model.Custom: 361
262 model = model_type.getCOBRAmodel(customPath = utils.FilePath.fromStrPath(ARGS.model), customExtension = utils.FilePath.fromStrPath(ARGS.model_name).ext) 362 print("\n=== MODEL VALIDATION ===")
263 else: 363 for key, value in validation.items():
264 model = model_type.getCOBRAmodel(toolDir=ARGS.tool_dir) 364 print(f"{key}: {value}")
265 365
266 if(ARGS.medium_selector == "Custom"): 366
267 medium = read_dataset(ARGS.medium, "medium dataset") 367 generate_bounds_model(model, ras=ras_combined, output_folder=ARGS.output_path,
268 medium.set_index(medium.columns[0], inplace=True) 368 save_models=ARGS.save_models, save_models_path=ARGS.save_models_path,
269 medium = medium.astype(float) 369 save_models_format=ARGS.save_models_format)
270 medium = medium[medium.columns[0]].to_dict() 370 class_assignments.to_csv(ARGS.cell_class, sep='\t', index=False)
271 else: 371
272 df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0) 372
273 ARGS.medium_selector = ARGS.medium_selector.replace("_", " ") 373 return
274 medium = df_mediums[[ARGS.medium_selector]]
275 medium = medium[ARGS.medium_selector].to_dict()
276
277 if(ARGS.ras_selector == True):
278 generate_bounds(model, medium, ras = ras_combined, output_folder=ARGS.output_path)
279 class_assignments.to_csv(ARGS.cell_class, sep = '\t', index = False)
280 else:
281 generate_bounds(model, medium, output_folder=ARGS.output_path)
282
283 pass
284 374
285 ############################################################################## 375 ##############################################################################
286 if __name__ == "__main__": 376 if __name__ == "__main__":
287 main() 377 main()