diff COBRAxy/flux_simulation.py @ 489:97eea560a10f draft

Uploaded
author francesco_lapi
date Mon, 29 Sep 2025 10:33:26 +0000
parents c561c060a55f
children
line wrap: on
line diff
--- a/COBRAxy/flux_simulation.py	Tue Sep 23 13:48:24 2025 +0000
+++ b/COBRAxy/flux_simulation.py	Mon Sep 29 10:33:26 2025 +0000
@@ -1,101 +1,124 @@
+"""
+Flux sampling and analysis utilities for COBRA models.
+
+This script supports two modes:
+- Mode 1 (model_and_bounds=True): load a base model and apply bounds from
+    separate files before sampling.
+- Mode 2 (model_and_bounds=False): load complete models and sample directly.
+
+Sampling algorithms supported: OPTGP and CBS. Outputs include flux samples
+and optional analyses (pFBA, FVA, sensitivity), saved as tabular files.
+"""
+
 import argparse
 import utils.general_utils as utils
-from typing import Optional, List
+from typing import List
 import os
+import pandas as pd
 import numpy as np
-import pandas as pd
 import cobra
 import utils.CBS_backend as CBS_backend
 from joblib import Parallel, delayed, cpu_count
 from cobra.sampling import OptGPSampler
 import sys
+import utils.model_utils as model_utils
+
 
 ################################# process args ###############################
-def process_args(args :List[str] = None) -> argparse.Namespace:
+def process_args(args: List[str] = None) -> argparse.Namespace:
     """
     Processes command-line arguments.
-
+    
     Args:
         args (list): List of command-line arguments.
-
+    
     Returns:
         Namespace: An object containing parsed arguments.
     """
-    parser = argparse.ArgumentParser(usage = '%(prog)s [options]',
-                                     description = 'process some value\'s')
-
-    parser.add_argument('-ol', '--out_log', 
-                        help = "Output log")
+    parser = argparse.ArgumentParser(usage='%(prog)s [options]',
+                                     description='process some value\'s')
+    
+    parser.add_argument("-mo", "--model_upload", type=str,
+        help="path to input file with custom rules, if provided")
+    
+    parser.add_argument("-mab", "--model_and_bounds", type=str,
+        choices=['True', 'False'],
+        required=True,
+        help="upload mode: True for model+bounds, False for complete models")
+    
+    parser.add_argument("-ens", "--sampling_enabled", type=str,
+        choices=['true', 'false'],
+        required=True,
+        help="enable sampling: 'true' for sampling, 'false' for no sampling")
+    
+    parser.add_argument('-ol', '--out_log',
+                        help="Output log")
     
     parser.add_argument('-td', '--tool_dir',
-                        type = str,
-                        required = True,
-                        help = 'your tool directory')
+                        type=str,
+                        required=True,
+                        help='your tool directory')
     
     parser.add_argument('-in', '--input',
-                        required = True,
-                        type=str,
-                        help = 'inputs bounds')
-    
-    parser.add_argument('-ni', '--names',
-                        required = True,
+                        required=True,
                         type=str,
-                        help = 'cell names')
- 
-    parser.add_argument(
-        '-ms', '--model_selector', 
-        type = utils.Model, default = utils.Model.ENGRO2, choices = [utils.Model.ENGRO2, utils.Model.Custom],
-        help = 'chose which type of model you want use')
+                        help='input bounds files or complete model files')
     
-    parser.add_argument("-mo", "--model", type = str)
-    
-    parser.add_argument("-mn", "--model_name", type = str, help = "custom mode name")
+    parser.add_argument('-ni', '--name',
+                        required=True,
+                        type=str,
+                        help='cell names')
     
     parser.add_argument('-a', '--algorithm',
-                        type = str,
-                        choices = ['OPTGP', 'CBS'],
-                        required = True,
-                        help = 'choose sampling algorithm')
+                        type=str,
+                        choices=['OPTGP', 'CBS'],
+                        required=True,
+                        help='choose sampling algorithm')
     
-    parser.add_argument('-th', '--thinning', 
-                        type = int,
-                        default= 100,
-                        required=False,
-                        help = 'choose thinning')
+    parser.add_argument('-th', '--thinning',
+                        type=int,
+                        default=100,
+                        required=True,
+                        help='choose thinning')
     
-    parser.add_argument('-ns', '--n_samples', 
-                        type = int,
-                        required = True,
-                        help = 'choose how many samples')
+    parser.add_argument('-ns', '--n_samples',
+                        type=int,
+                        required=True,
+                        help='choose how many samples (set to 0 for optimization only)')
+    
+    parser.add_argument('-sd', '--seed',
+                        type=int,
+                        required=True,
+                        help='seed for random number generation')
     
-    parser.add_argument('-sd', '--seed', 
-                        type = int,
-                        required = True,
-                        help = 'seed')
+    parser.add_argument('-nb', '--n_batches',
+                        type=int,
+                        required=True,
+                        help='choose how many batches')
     
-    parser.add_argument('-nb', '--n_batches', 
-                        type = int,
-                        required = True,
-                        help = 'choose how many batches')
+    parser.add_argument('-opt', '--perc_opt',
+                        type=float,
+                        default=0.9,
+                        required=False,
+                        help='choose the fraction of optimality for FVA (0-1)')
     
-    parser.add_argument('-ot', '--output_type', 
-                        type = str,
-                        required = True,
-                        help = 'output type')
+    parser.add_argument('-ot', '--output_type',
+                        type=str,
+                        required=True,
+                        help='output type for sampling results')
     
-    parser.add_argument('-ota', '--output_type_analysis', 
-                        type = str,
-                        required = False,
-                        help = 'output type analysis')
+    parser.add_argument('-ota', '--output_type_analysis',
+                        type=str,
+                        required=False,
+                        help='output type analysis (optimization methods)')
     
-    parser.add_argument('-idop', '--output_path', 
-                        type = str,
+    parser.add_argument('-idop', '--output_path',
+                        type=str,
                         default='flux_simulation',
-                        help = 'output path for maps')
+                        help='output path for maps')
     
     ARGS = parser.parse_args(args)
     return ARGS
-
 ########################### warning ###########################################
 def warning(s :str) -> None:
     """
@@ -113,6 +136,17 @@
 
 
 def write_to_file(dataset: pd.DataFrame, name: str, keep_index:bool=False)->None:
+    """
+    Write a DataFrame to a TSV file under ARGS.output_path with a given base name.
+
+    Args:
+        dataset: The DataFrame to write.
+        name: Base file name (without extension).
+        keep_index: Whether to keep the DataFrame index in the file.
+
+    Returns:
+        None
+    """
     dataset.index.name = 'Reactions'
     dataset.to_csv(ARGS.output_path + "/" + name + ".csv", sep = '\t', index = keep_index)
 
@@ -142,10 +176,10 @@
 
 
 
-def OPTGP_sampler(model:cobra.Model, model_name:str, n_samples:int=1000, thinning:int=100, n_batches:int=1, seed:int=0)-> None:
+def OPTGP_sampler(model: cobra.Model, model_name: str, n_samples: int = 1000, thinning: int = 100, n_batches: int = 1, seed: int = 0) -> None:
     """
     Samples from the OPTGP (Optimal Global Perturbation) algorithm and saves the results to CSV files.
-
+    
     Args:
         model (cobra.Model): The COBRA model to sample from.
         model_name (str): The name of the model, used in naming output files.
@@ -153,75 +187,131 @@
         thinning (int, optional): Thinning parameter for the sampler. Default is 100.
         n_batches (int, optional): Number of batches to run. Default is 1.
         seed (int, optional): Random seed for reproducibility. Default is 0.
-    
+        
     Returns:
         None
     """
-
-    for i in range(0, n_batches):
+    import numpy as np
+    
+    # Get reaction IDs for consistent column ordering
+    reaction_ids = [rxn.id for rxn in model.reactions]
+    
+    # Sample and save each batch as numpy file
+    for i in range(n_batches):
         optgp = OptGPSampler(model, thinning, seed)
         samples = optgp.sample(n_samples)
-        samples.to_csv(ARGS.output_path + "/" +  model_name + '_'+ str(i)+'_OPTGP.csv', index=False)
-        seed+=1
-    samplesTotal = pd.DataFrame()
-    for i in range(0, n_batches):
-        samples_batch = pd.read_csv(ARGS.output_path + "/"  +  model_name + '_'+ str(i)+'_OPTGP.csv')
-        samplesTotal = pd.concat([samplesTotal, samples_batch], ignore_index = True)
-
+        
+        # Save as numpy array (more memory efficient)
+        batch_filename = f"{ARGS.output_path}/{model_name}_{i}_OPTGP.npy"
+        np.save(batch_filename, samples.to_numpy())
+        
+        seed += 1
+    
+    # Merge all batches into a single DataFrame
+    all_samples = []
+    
+    for i in range(n_batches):
+        batch_filename = f"{ARGS.output_path}/{model_name}_{i}_OPTGP.npy"
+        batch_data = np.load(batch_filename, allow_pickle=True)
+        all_samples.append(batch_data)
+    
+    # Concatenate all batches
+    samplesTotal_array = np.vstack(all_samples)
+    
+    # Convert back to DataFrame with proper column names
+    samplesTotal = pd.DataFrame(samplesTotal_array, columns=reaction_ids)
+    
+    # Save the final merged result as CSV
     write_to_file(samplesTotal.T, model_name, True)
-
-    for i in range(0, n_batches):
-        os.remove(ARGS.output_path + "/" +   model_name + '_'+ str(i)+'_OPTGP.csv')
-    pass
+    
+    # Clean up temporary numpy files
+    for i in range(n_batches):
+        batch_filename = f"{ARGS.output_path}/{model_name}_{i}_OPTGP.npy"
+        if os.path.exists(batch_filename):
+            os.remove(batch_filename)
 
 
-def CBS_sampler(model:cobra.Model, model_name:str, n_samples:int=1000, n_batches:int=1, seed:int=0)-> None:
+def CBS_sampler(model: cobra.Model, model_name: str, n_samples: int = 1000, n_batches: int = 1, seed: int = 0) -> None:
     """
     Samples using the CBS (Constraint-based Sampling) algorithm and saves the results to CSV files.
-
+    
     Args:
         model (cobra.Model): The COBRA model to sample from.
         model_name (str): The name of the model, used in naming output files.
         n_samples (int, optional): Number of samples per batch. Default is 1000.
         n_batches (int, optional): Number of batches to run. Default is 1.
         seed (int, optional): Random seed for reproducibility. Default is 0.
-    
+        
     Returns:
         None
     """
-
-    df_FVA = cobra.flux_analysis.flux_variability_analysis(model,fraction_of_optimum=0).round(6)
+    import numpy as np
+    
+    # Get reaction IDs for consistent column ordering
+    reaction_ids = [reaction.id for reaction in model.reactions]
+    
+    # Perform FVA analysis once for all batches
+    df_FVA = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=0).round(6)
+    
+    # Generate random objective functions for all samples across all batches
+    df_coefficients = CBS_backend.randomObjectiveFunction(model, n_samples * n_batches, df_FVA, seed=seed)
     
-    df_coefficients = CBS_backend.randomObjectiveFunction(model, n_samples*n_batches, df_FVA, seed=seed)
-
-    for i in range(0, n_batches):
-        samples = pd.DataFrame(columns =[reaction.id for reaction in model.reactions], index = range(n_samples))
+    # Sample and save each batch as numpy file
+    for i in range(n_batches):
+        samples = pd.DataFrame(columns=reaction_ids, index=range(n_samples))
+        
         try:
-            CBS_backend.randomObjectiveFunctionSampling(model, n_samples, df_coefficients.iloc[:,i*n_samples:(i+1)*n_samples], samples)
+            CBS_backend.randomObjectiveFunctionSampling(
+                model, 
+                n_samples, 
+                df_coefficients.iloc[:, i * n_samples:(i + 1) * n_samples], 
+                samples
+            )
         except Exception as e:
             utils.logWarning(
-            "Warning: GLPK solver has failed for " + model_name + ". Trying with COBRA interface. Error:" + str(e),
-            ARGS.out_log)
-            CBS_backend.randomObjectiveFunctionSampling_cobrapy(model, n_samples, df_coefficients.iloc[:,i*n_samples:(i+1)*n_samples], 
-                                                    samples)
-        utils.logWarning(ARGS.output_path + "/" +  model_name + '_'+ str(i)+'_CBS.csv', ARGS.out_log)
-        samples.to_csv(ARGS.output_path + "/" +  model_name + '_'+ str(i)+'_CBS.csv', index=False)
-
-    samplesTotal = pd.DataFrame()
-    for i in range(0, n_batches):
-        samples_batch = pd.read_csv(ARGS.output_path + "/"  +  model_name + '_'+ str(i)+'_CBS.csv')
-        samplesTotal = pd.concat([samplesTotal, samples_batch], ignore_index = True)
-
+                f"Warning: GLPK solver has failed for {model_name}. Trying with COBRA interface. Error: {str(e)}",
+                ARGS.out_log
+            )
+            CBS_backend.randomObjectiveFunctionSampling_cobrapy(
+                model, 
+                n_samples, 
+                df_coefficients.iloc[:, i * n_samples:(i + 1) * n_samples],
+                samples
+            )
+        
+        # Save as numpy array (more memory efficient)
+        batch_filename = f"{ARGS.output_path}/{model_name}_{i}_CBS.npy"
+        utils.logWarning(batch_filename, ARGS.out_log)
+        np.save(batch_filename, samples.to_numpy())
+    
+    # Merge all batches into a single DataFrame
+    all_samples = []
+    
+    for i in range(n_batches):
+        batch_filename = f"{ARGS.output_path}/{model_name}_{i}_CBS.npy"
+        batch_data = np.load(batch_filename, allow_pickle=True)
+        all_samples.append(batch_data)
+    
+    # Concatenate all batches
+    samplesTotal_array = np.vstack(all_samples)
+    
+    # Convert back to DataFrame with proper column namesq
+    samplesTotal = pd.DataFrame(samplesTotal_array, columns=reaction_ids)
+    
+    # Save the final merged result as CSV
     write_to_file(samplesTotal.T, model_name, True)
-
-    for i in range(0, n_batches):
-        os.remove(ARGS.output_path + "/" + model_name + '_'+ str(i)+'_CBS.csv')
-    pass
+    
+    # Clean up temporary numpy files
+    for i in range(n_batches):
+        batch_filename = f"{ARGS.output_path}/{model_name}_{i}_CBS.npy"
+        if os.path.exists(batch_filename):
+            os.remove(batch_filename)
 
 
-def model_sampler(model_input_original:cobra.Model, bounds_path:str, cell_name:str)-> List[pd.DataFrame]:
+
+def model_sampler_with_bounds(model_input_original: cobra.Model, bounds_path: str, cell_name: str) -> List[pd.DataFrame]:
     """
-    Prepares the model with bounds from the dataset and performs sampling and analysis based on the selected algorithm.
+    MODE 1: Prepares the model with bounds from separate bounds file and performs sampling.
 
     Args:
         model_input_original (cobra.Model): The original COBRA model.
@@ -234,26 +324,45 @@
 
     model_input = model_input_original.copy()
     bounds_df = read_dataset(bounds_path, "bounds dataset")
-    for rxn_index, row in bounds_df.iterrows():
-        model_input.reactions.get_by_id(rxn_index).lower_bound = row.lower_bound
-        model_input.reactions.get_by_id(rxn_index).upper_bound = row.upper_bound
     
+    # Apply bounds to model
+    for rxn_index, row in bounds_df.iterrows():
+        try:
+            model_input.reactions.get_by_id(rxn_index).lower_bound = row.lower_bound
+            model_input.reactions.get_by_id(rxn_index).upper_bound = row.upper_bound
+        except KeyError:
+            warning(f"Warning: Reaction {rxn_index} not found in model. Skipping.")
     
-    if ARGS.algorithm == 'OPTGP':
-        OPTGP_sampler(model_input, cell_name, ARGS.n_samples, ARGS.thinning, ARGS.n_batches, ARGS.seed)
+    return perform_sampling_and_analysis(model_input, cell_name)
+
 
-    elif ARGS.algorithm == 'CBS':
-        CBS_sampler(model_input,  cell_name, ARGS.n_samples, ARGS.n_batches, ARGS.seed)
+def perform_sampling_and_analysis(model_input: cobra.Model, cell_name: str) -> List[pd.DataFrame]:
+    """
+    Common function to perform sampling and analysis on a prepared model.
 
-    df_mean, df_median, df_quantiles = fluxes_statistics(cell_name, ARGS.output_types)
+    Args:
+        model_input (cobra.Model): The prepared COBRA model with bounds applied.
+        cell_name (str): Name of the cell, used to generate filenames for output.
 
-    if("fluxes" not in ARGS.output_types):
-        os.remove(ARGS.output_path + "/"  +  cell_name + '.csv')
+    Returns:
+        List[pd.DataFrame]: A list of DataFrames containing statistics and analysis results.
+    """
 
     returnList = []
-    returnList.append(df_mean)
-    returnList.append(df_median)
-    returnList.append(df_quantiles)
+
+    if ARGS.sampling_enabled == "true":
+    
+        if ARGS.algorithm == 'OPTGP':
+            OPTGP_sampler(model_input, cell_name, ARGS.n_samples, ARGS.thinning, ARGS.n_batches, ARGS.seed)
+        elif ARGS.algorithm == 'CBS':
+            CBS_sampler(model_input, cell_name, ARGS.n_samples, ARGS.n_batches, ARGS.seed)
+
+        df_mean, df_median, df_quantiles = fluxes_statistics(cell_name, ARGS.output_types)
+
+        if("fluxes" not in ARGS.output_types):
+            os.remove(ARGS.output_path + "/" + cell_name + '.csv')
+
+        returnList = [df_mean, df_median, df_quantiles]
 
     df_pFBA, df_FVA, df_sensitivity = fluxes_analysis(model_input, cell_name, ARGS.output_type_analysis)
 
@@ -316,7 +425,8 @@
 
 def fluxes_analysis(model:cobra.Model,  model_name:str, output_types:List)-> List[pd.DataFrame]:
     """
-    Performs flux analysis including pFBA, FVA, and sensitivity analysis.
+    Performs flux analysis including pFBA, FVA, and sensitivity analysis. The objective function
+    is assumed to be already set in the model.
 
     Args:
         model (cobra.Model): The COBRA model to analyze.
@@ -333,15 +443,14 @@
 
     for output_type in output_types:
         if(output_type == "pFBA"):
-            model.objective = "Biomass"
             solution = cobra.flux_analysis.pfba(model)
             fluxes = solution.fluxes
-            df_pFBA.loc[0,[rxn._id for rxn in model.reactions]] = fluxes.tolist()
+            df_pFBA.loc[0,[rxn.id for rxn in model.reactions]] = fluxes.tolist()
             df_pFBA = df_pFBA.reset_index(drop=True)
             df_pFBA.index = [model_name]
             df_pFBA = df_pFBA.astype(float).round(6)
         elif(output_type == "FVA"):
-            fva = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=0, processes=1).round(8)
+            fva = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=ARGS.perc_opt, processes=1).round(8)
             columns = []
             for rxn in fva.index.to_list():
                 columns.append(rxn + "_min")
@@ -354,7 +463,6 @@
             df_FVA.index = [model_name]
             df_FVA = df_FVA.astype(float).round(6)
         elif(output_type == "sensitivity"):
-            model.objective = "Biomass"
             solution_original = model.optimize().objective_value
             reactions = model.reactions
             single = cobra.flux_analysis.single_reaction_deletion(model)
@@ -367,75 +475,130 @@
     return df_pFBA, df_FVA, df_sensitivity
 
 ############################# main ###########################################
-def main(args :List[str] = None) -> None:
+def main(args: List[str] = None) -> None:
     """
-    Initializes everything and sets the program in motion based on the fronted input arguments.
+    Initialize and run sampling/analysis based on the frontend input arguments.
 
     Returns:
         None
     """
 
-    num_processors = cpu_count()
+    num_processors = max(1, cpu_count() - 1)
 
     global ARGS
     ARGS = process_args(args)
 
     if not os.path.exists(ARGS.output_path):
         os.makedirs(ARGS.output_path)
-    
-    model_type :utils.Model = ARGS.model_selector
-    if model_type is utils.Model.Custom:
-        model = model_type.getCOBRAmodel(customPath = utils.FilePath.fromStrPath(ARGS.model), customExtension = utils.FilePath.fromStrPath(ARGS.model_name).ext)
+
+    # --- Normalize inputs (the tool may pass comma-separated --input and either --name or --names) ---
+    ARGS.input_files = ARGS.input.split(",") if ARGS.input else []
+    ARGS.file_names = ARGS.name.split(",")
+    # output types (required) -> list
+    ARGS.output_types = ARGS.output_type.split(",") if ARGS.output_type else []
+    # optional analysis output types -> list or empty
+    ARGS.output_type_analysis = ARGS.output_type_analysis.split(",") if ARGS.output_type_analysis else []
+
+    # Determine if sampling should be performed
+    if ARGS.sampling_enabled == "true":
+        perform_sampling = True
     else:
-        model = model_type.getCOBRAmodel(toolDir=ARGS.tool_dir)
+        perform_sampling = False
 
-    #Set solver verbosity to 1 to see warning and error messages only.
-    model.solver.configuration.verbosity = 1
+    print("=== INPUT FILES ===")
+    print(f"{ARGS.input_files}")
+    print(f"{ARGS.file_names}")
+    print(f"{ARGS.output_type}")
+    print(f"{ARGS.output_types}")
+    print(f"{ARGS.output_type_analysis}")
+    print(f"Sampling enabled: {perform_sampling} (n_samples: {ARGS.n_samples})")
     
-    ARGS.bounds = ARGS.input.split(",")
-    ARGS.bounds_name = ARGS.names.split(",")
-    ARGS.output_types = ARGS.output_type.split(",")
-    ARGS.output_type_analysis = ARGS.output_type_analysis.split(",")
+    if ARGS.model_and_bounds == "True":
+        # MODE 1: Model + bounds (separate files)
+        print("=== MODE 1: Model + Bounds (separate files) ===")
+        
+        # Load base model
+        if not ARGS.model_upload:
+            sys.exit("Error: model_upload is required for Mode 1")
 
+        base_model = model_utils.build_cobra_model_from_csv(ARGS.model_upload)
 
-    results = Parallel(n_jobs=num_processors)(delayed(model_sampler)(model, bounds_path, cell_name) for bounds_path, cell_name in zip(ARGS.bounds, ARGS.bounds_name))
+        validation = model_utils.validate_model(base_model)
+
+        print("\n=== MODEL VALIDATION ===")
+        for key, value in validation.items():
+            print(f"{key}: {value}")
+
+        # Set solver verbosity to 1 to see warning and error messages only.
+        base_model.solver.configuration.verbosity = 1
 
-    all_mean = pd.concat([result[0] for result in results], ignore_index=False)
-    all_median = pd.concat([result[1] for result in results], ignore_index=False)
-    all_quantiles = pd.concat([result[2] for result in results], ignore_index=False)
+        # Process each bounds file with the base model
+        results = Parallel(n_jobs=num_processors)(
+            delayed(model_sampler_with_bounds)(base_model, bounds_file, cell_name) 
+            for bounds_file, cell_name in zip(ARGS.input_files, ARGS.file_names)
+        )
 
-    if("mean" in ARGS.output_types):
-        all_mean = all_mean.fillna(0.0)
-        all_mean = all_mean.sort_index()
-        write_to_file(all_mean.T, "mean", True)
+    else:
+        # MODE 2: Multiple complete models
+        print("=== MODE 2: Multiple complete models ===")
+        
+        # Process each complete model file
+        results = Parallel(n_jobs=num_processors)(
+            delayed(perform_sampling_and_analysis)(model_utils.build_cobra_model_from_csv(model_file), cell_name) 
+            for model_file, cell_name in zip(ARGS.input_files, ARGS.file_names)
+        )
+
+    # Handle sampling outputs (only if sampling was performed)
+    if perform_sampling:
+        print("=== PROCESSING SAMPLING RESULTS ===")
+        
+        all_mean = pd.concat([result[0] for result in results], ignore_index=False)
+        all_median = pd.concat([result[1] for result in results], ignore_index=False)
+        all_quantiles = pd.concat([result[2] for result in results], ignore_index=False)
 
-    if("median" in ARGS.output_types):
-        all_median = all_median.fillna(0.0)
-        all_median = all_median.sort_index()
-        write_to_file(all_median.T, "median", True)
+        if "mean" in ARGS.output_types:
+            all_mean = all_mean.fillna(0.0)
+            all_mean = all_mean.sort_index()
+            write_to_file(all_mean.T, "mean", True)
+
+        if "median" in ARGS.output_types:
+            all_median = all_median.fillna(0.0)
+            all_median = all_median.sort_index()
+            write_to_file(all_median.T, "median", True)
+        
+        if "quantiles" in ARGS.output_types:
+            all_quantiles = all_quantiles.fillna(0.0)
+            all_quantiles = all_quantiles.sort_index()
+            write_to_file(all_quantiles.T, "quantiles", True)
+    else:
+        print("=== SAMPLING SKIPPED (n_samples = 0 or sampling disabled) ===")
+
+    # Handle optimization analysis outputs (always available)
+    print("=== PROCESSING OPTIMIZATION RESULTS ===")
     
-    if("quantiles" in ARGS.output_types):
-        all_quantiles = all_quantiles.fillna(0.0)
-        all_quantiles = all_quantiles.sort_index()
-        write_to_file(all_quantiles.T, "quantiles", True)
-
-    index_result = 3
-    if("pFBA" in ARGS.output_type_analysis):
+    # Determine the starting index for optimization results
+    # If sampling was performed, optimization results start at index 3
+    # If no sampling, optimization results start at index 0
+    index_result = 3 if perform_sampling else 0
+    
+    if "pFBA" in ARGS.output_type_analysis:
         all_pFBA = pd.concat([result[index_result] for result in results], ignore_index=False)
         all_pFBA = all_pFBA.sort_index()
         write_to_file(all_pFBA.T, "pFBA", True)
-        index_result+=1
-    if("FVA" in ARGS.output_type_analysis):
-        all_FVA= pd.concat([result[index_result] for result in results], ignore_index=False)
+        index_result += 1
+        
+    if "FVA" in ARGS.output_type_analysis:
+        all_FVA = pd.concat([result[index_result] for result in results], ignore_index=False)
         all_FVA = all_FVA.sort_index()
         write_to_file(all_FVA.T, "FVA", True)
-        index_result+=1
-    if("sensitivity" in ARGS.output_type_analysis):
+        index_result += 1
+        
+    if "sensitivity" in ARGS.output_type_analysis:
         all_sensitivity = pd.concat([result[index_result] for result in results], ignore_index=False)
         all_sensitivity = all_sensitivity.sort_index()
         write_to_file(all_sensitivity.T, "sensitivity", True)
 
-    pass
+    return
         
 ##############################################################################
 if __name__ == "__main__":