changeset 491:7a413a5ec566 draft default tip

Uploaded
author francesco_lapi
date Mon, 29 Sep 2025 15:34:59 +0000
parents c6ea189ea7e9
children
files COBRAxy/fromCSVtoCOBRA.py COBRAxy/fromCSVtoCOBRA.xml COBRAxy/metabolicModel2Tabular.py COBRAxy/metabolicModel2Tabular.xml COBRAxy/metabolic_model_setting.py COBRAxy/metabolic_model_setting.xml COBRAxy/tabular2MetabolicModel.py COBRAxy/tabular2MetabolicModel.xml
diffstat 8 files changed, 670 insertions(+), 670 deletions(-) [+]
line wrap: on
line diff
--- a/COBRAxy/fromCSVtoCOBRA.py	Mon Sep 29 15:13:21 2025 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-"""
-Convert a tabular (CSV/TSV/Tabular) description of a COBRA model into a COBRA file.
-
-Supported output formats: SBML, JSON, MATLAB (.mat), YAML.
-The script logs to a user-provided file for easier debugging in Galaxy.
-"""
-
-import os
-import cobra
-import argparse
-from typing import List
-import logging
-import utils.model_utils as modelUtils
-
-ARGS : argparse.Namespace
-def process_args(args: List[str] = None) -> argparse.Namespace:
-    """
-    Parse command-line arguments for the CSV-to-COBRA conversion tool.
-
-    Returns:
-        argparse.Namespace: Parsed arguments.
-    """
-    parser = argparse.ArgumentParser(
-    usage="%(prog)s [options]",
-    description="Convert a tabular/CSV file to a COBRA model"
-    )
-
-
-    parser.add_argument("--out_log", type=str, required=True,
-    help="Output log file")
-
-
-    parser.add_argument("--input", type=str, required=True,
-    help="Input tabular file (CSV/TSV)")
-
-
-    parser.add_argument("--format", type=str, required=True, choices=["sbml", "json", "mat", "yaml"],
-    help="Model format (SBML, JSON, MATLAB, YAML)")
-
-
-    parser.add_argument("--output", type=str, required=True,
-    help="Output model file path")
-
-
-    parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__),
-    help="Tool directory (passed from Galaxy as $__tool_directory__)")
-
-
-    return parser.parse_args(args)
-
-
-###############################- ENTRY POINT -################################
-
-def main(args: List[str] = None) -> None:
-    """
-    Entry point: parse arguments, build the COBRA model from a CSV/TSV file,
-    and save it in the requested format.
-
-    Returns:
-        None
-    """
-    global ARGS
-    ARGS = process_args(args)
-
-    # configure logging to the requested log file (overwrite each run)
-    logging.basicConfig(filename=ARGS.out_log,
-                        level=logging.DEBUG,
-                        format='%(asctime)s %(levelname)s: %(message)s',
-                        filemode='w')
-
-    logging.info('Starting fromCSVtoCOBRA tool')
-    logging.debug('Args: input=%s format=%s output=%s tool_dir=%s', ARGS.input, ARGS.format, ARGS.output, ARGS.tool_dir)
-
-    try:
-        # Basic sanity checks
-        if not os.path.exists(ARGS.input):
-            logging.error('Input file not found: %s', ARGS.input)
-
-        out_dir = os.path.dirname(os.path.abspath(ARGS.output))
-        
-        if out_dir and not os.path.isdir(out_dir):
-            try:
-                os.makedirs(out_dir, exist_ok=True)
-                logging.info('Created missing output directory: %s', out_dir)
-            except Exception as e:
-                logging.exception('Cannot create output directory: %s', out_dir)
-
-        model = modelUtils.build_cobra_model_from_csv(ARGS.input)
-
-        # Save model in requested format
-        if ARGS.format == "sbml":
-            cobra.io.write_sbml_model(model, ARGS.output)
-        elif ARGS.format == "json":
-            cobra.io.save_json_model(model, ARGS.output)
-        elif ARGS.format == "mat":
-            cobra.io.save_matlab_model(model, ARGS.output)
-        elif ARGS.format == "yaml":
-            cobra.io.save_yaml_model(model, ARGS.output)
-        else:
-            logging.error('Unknown format requested: %s', ARGS.format)
-            print(f"ERROR: Unknown format: {ARGS.format}")
-
-
-        logging.info('Model successfully written to %s (format=%s)', ARGS.output, ARGS.format)
-
-    except Exception:
-        # Log full traceback to the out_log so Galaxy users/admins can see what happened
-        logging.exception('Unhandled exception in fromCSVtoCOBRA')
-
-
-if __name__ == '__main__':
-    main()
--- a/COBRAxy/fromCSVtoCOBRA.xml	Mon Sep 29 15:13:21 2025 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-<tool id="fromCSVtoCOBRA" name="fromCSVtoCOBRA" version="1.0.0">
-    <description>Convert a tabular dataset to a COBRA model</description>
-
-    <!-- Python dependencies required for COBRApy -->
-    <requirements>
-        <requirement type="package" version="0.29.0">cobra</requirement>
-        <requirement type="package" version="1.24.4">numpy</requirement>
-        <requirement type="package" version="2.0.3">pandas</requirement>
-        <requirement type="package" version="5.2.2">lxml</requirement>
-    </requirements>
-
-    <!-- Import shared macros if available -->
-    <macros>
-        <import>marea_macros.xml</import>
-    </macros>
-
-    <!-- Command to run the Python script -->
-    <command detect_errors="exit_code"><![CDATA[
-        python $__tool_directory__/fromCSVtoCOBRA.py 
-            --tool_dir $__tool_directory__
-            --input $input 
-            --format $format 
-            --output $output
-            --out_log $log
-    ]]></command>
-
-    <!-- Tool inputs -->
-    <inputs>
-        <param name="input" type="data" format="tabular,csv,tsv" label="Input table"/>
-        <param name="format" type="select" label="Output COBRA model format">
-            <option value="sbml" selected="true">SBML (.xml)</option>
-            <option value="json">JSON (.json)</option>
-            <option value="mat">MATLAB (.mat)</option>
-            <option value="yaml">YAML (.yml)</option>
-        </param>
-    </inputs>
-
-    <!-- Tool outputs -->
-    <outputs>
-        <data name="log" format="txt" label="fromcsvtocobra - Log" />
-        <data name="output" format="xml" label="COBRA model">
-            <change_format>
-                <when input="format" value="sbml" format="xml"/>
-                <when input="format" value="json" format="json"/>
-                <when input="format" value="mat" format="mat"/>
-                <when input="format" value="yaml" format="yaml"/>
-            </change_format>
-        </data>
-    </outputs>
-
-    <!-- Help section -->
-    <help><![CDATA[
-This tool converts a tabular dataset into a COBRA model using COBRApy.
-
-**Input**
-- A tabular/CSV/TSV file describing reactions, metabolites, or stoichiometry.
-
-**Output**
-- A COBRA model in the chosen format:  
-  - SBML (.xml)  
-  - JSON (.json)  
-  - MATLAB (.mat)  
-  - YAML (.yml)  
-
-**Notes**
-- The exact table structure (columns required) depends on how you want to encode reactions and metabolites.  
-- You can extend the Python script to parse specific column formats.  
-    ]]></help>
-</tool>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/metabolicModel2Tabular.py	Mon Sep 29 15:34:59 2025 +0000
@@ -0,0 +1,368 @@
+"""
+Scripts to generate a tabular file of a metabolic model (built-in or custom).
+
+This script loads a COBRA model (built-in or custom), optionally applies
+medium and gene nomenclature settings, derives reaction-related metadata
+(GPR rules, formulas, bounds, objective coefficients, medium membership,
+and compartments for ENGRO2), and writes a tabular summary.
+"""
+
+import os
+import csv
+import cobra
+import argparse
+import pandas as pd
+import utils.general_utils as utils
+from typing import Optional, Tuple, List
+import utils.model_utils as modelUtils
+import logging
+from pathlib import Path
+
+
+ARGS : argparse.Namespace
+def process_args(args: List[str] = None) -> argparse.Namespace:
+    """
+    Parse command-line arguments for metabolic_model_setting.
+    """
+
+    parser = argparse.ArgumentParser(
+        usage="%(prog)s [options]",
+        description="Generate custom data from a given model"
+    )
+
+    parser.add_argument("--out_log", type=str, required=True,
+                        help="Output log file")
+
+    parser.add_argument("--model", type=str,
+                        help="Built-in model identifier (e.g., ENGRO2, Recon, HMRcore)")
+    parser.add_argument("--input", type=str,
+                        help="Custom model file (JSON or XML)")
+    parser.add_argument("--name", type=str, required=True,
+                        help="Model name (default or custom)")
+    
+    parser.add_argument("--medium_selector", type=str, required=True,
+                        help="Medium selection option")
+
+    parser.add_argument("--gene_format", type=str, default="Default",
+                        help="Gene nomenclature format: Default (original), ENSNG, HGNC_SYMBOL, HGNC_ID, ENTREZ")
+    
+    parser.add_argument("--out_tabular", type=str,
+                        help="Output file for the merged dataset (CSV or XLSX)")
+    
+    parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__),
+                        help="Tool directory (passed from Galaxy as $__tool_directory__)")
+
+
+    return parser.parse_args(args)
+
+################################- INPUT DATA LOADING -################################
+def load_custom_model(file_path :utils.FilePath, ext :Optional[utils.FileFormat] = None) -> cobra.Model:
+    """
+    Loads a custom model from a file, either in JSON, XML, MAT, or YML format.
+
+    Args:
+        file_path : The path to the file containing the custom model.
+        ext : explicit file extension. Necessary for standard use in galaxy because of its weird behaviour.
+
+    Raises:
+        DataErr : if the file is in an invalid format or cannot be opened for whatever reason.    
+    
+    Returns:
+        cobra.Model : the model, if successfully opened.
+    """
+    ext = ext if ext else file_path.ext
+    try:
+        if ext is utils.FileFormat.XML:
+            return cobra.io.read_sbml_model(file_path.show())
+        
+        if ext is utils.FileFormat.JSON:
+            return cobra.io.load_json_model(file_path.show())
+
+        if ext is utils.FileFormat.MAT:
+            return cobra.io.load_matlab_model(file_path.show())
+
+        if ext is utils.FileFormat.YML:
+            return cobra.io.load_yaml_model(file_path.show())
+
+    except Exception as e: raise utils.DataErr(file_path, e.__str__())
+    raise utils.DataErr(
+        file_path,
+        f"Unrecognized format '{file_path.ext}'. Only JSON, XML, MAT, YML are supported."
+    )
+
+
+###############################- FILE SAVING -################################
+def save_as_csv_filePath(data :dict, file_path :utils.FilePath, fieldNames :Tuple[str, str]) -> None:
+    """
+    Saves any dictionary-shaped data in a .csv file created at the given file_path as FilePath.
+
+    Args:
+        data : the data to be written to the file.
+        file_path : the path to the .csv file.
+        fieldNames : the names of the fields (columns) in the .csv file.
+    
+    Returns:
+        None
+    """
+    with open(file_path.show(), 'w', newline='') as csvfile:
+        writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
+        writer.writeheader()
+
+        for key, value in data.items():
+            writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
+
+def save_as_csv(data :dict, file_path :str, fieldNames :Tuple[str, str]) -> None:
+    """
+    Saves any dictionary-shaped data in a .csv file created at the given file_path as string.
+
+    Args:
+        data : the data to be written to the file.
+        file_path : the path to the .csv file.
+        fieldNames : the names of the fields (columns) in the .csv file.
+    
+    Returns:
+        None
+    """
+    with open(file_path, 'w', newline='') as csvfile:
+        writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
+        writer.writeheader()
+
+        for key, value in data.items():
+            writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
+
+def save_as_tabular_df(df: pd.DataFrame, path: str) -> None:
+    """
+    Save a pandas DataFrame as a tab-separated file, creating directories as needed.
+
+    Args:
+        df: The DataFrame to write.
+        path: Destination file path (will be written as TSV).
+
+    Raises:
+        DataErr: If writing the output fails for any reason.
+
+    Returns:
+        None
+    """
+    try:
+        os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
+        df.to_csv(path, sep="\t", index=False)
+    except Exception as e:
+        raise utils.DataErr(path, f"failed writing tabular output: {e}")
+    
+def is_placeholder(gid) -> bool:
+    """Return True if the gene id looks like a placeholder (e.g., 0/NA/NAN/empty)."""
+    if gid is None:
+        return True
+    s = str(gid).strip().lower()
+    return s in {"0", "", "na", "nan"}  # lowercase for simple matching
+
+def sample_valid_gene_ids(genes, limit=10):
+    """Yield up to `limit` valid gene IDs, skipping placeholders (e.g., the first 0 in RECON)."""
+    out = []
+    for g in genes:
+        gid = getattr(g, "id", getattr(g, "gene_id", g))
+        if not is_placeholder(gid):
+            out.append(str(gid))
+            if len(out) >= limit:
+                break
+    return out
+
+
+###############################- ENTRY POINT -################################
+def main(args:List[str] = None) -> None:
+    """
+    Initialize and generate custom data based on the frontend input arguments.
+    
+    Returns:
+        None
+    """
+    # Parse args from frontend (Galaxy XML)
+    global ARGS
+    ARGS = process_args(args)
+
+
+    if ARGS.input:
+        # Load a custom model from file
+        model = load_custom_model(
+            utils.FilePath.fromStrPath(ARGS.input), utils.FilePath.fromStrPath(ARGS.name).ext)
+    else:
+        # Load a built-in model
+
+        try:
+            model_enum = utils.Model[ARGS.model]  # e.g., Model['ENGRO2']
+        except KeyError:
+            raise utils.ArgsErr("model", "one of Recon/ENGRO2/HMRcore/Custom_model", ARGS.model)
+
+        # Load built-in model (Model.getCOBRAmodel uses tool_dir to locate local models)
+        try:
+            model = model_enum.getCOBRAmodel(toolDir=ARGS.tool_dir)
+        except Exception as e:
+            # Wrap/normalize load errors as DataErr for consistency
+            raise utils.DataErr(ARGS.model, f"failed loading built-in model: {e}")
+
+    # Determine final model name: explicit --name overrides, otherwise use the model id
+    
+    model_name = ARGS.name if ARGS.name else ARGS.model
+    
+    if ARGS.name == "ENGRO2" and ARGS.medium_selector != "Default":
+        df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0)
+        ARGS.medium_selector = ARGS.medium_selector.replace("_", " ")
+        medium = df_mediums[[ARGS.medium_selector]]
+        medium = medium[ARGS.medium_selector].to_dict()
+
+        # Reset all medium reactions lower bound to zero
+        for rxn_id, _ in model.medium.items():
+            model.reactions.get_by_id(rxn_id).lower_bound = float(0.0)
+        
+        # Apply selected medium uptake bounds (negative for uptake)
+        for reaction, value in medium.items():
+            if value is not None:
+                model.reactions.get_by_id(reaction).lower_bound = -float(value)
+
+    # Initialize translation_issues dictionary
+    translation_issues = {}
+    
+    if (ARGS.name == "Recon" or ARGS.name == "ENGRO2") and ARGS.gene_format != "Default":
+        logging.basicConfig(level=logging.INFO)
+        logger = logging.getLogger(__name__)
+
+        model, translation_issues = modelUtils.translate_model_genes(
+            model=model,
+            mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
+            target_nomenclature=ARGS.gene_format,
+            source_nomenclature='HGNC_symbol',
+            logger=logger
+        )
+
+    if ARGS.name == "Custom_model" and ARGS.gene_format != "Default":
+        logging.basicConfig(level=logging.INFO)
+        logger = logging.getLogger(__name__)
+
+        tmp_check = []
+        for g in model.genes[1:5]:  # check first 3 genes only
+            tmp_check.append(modelUtils.gene_type(g.id, "Custom_model"))
+        
+        if len(set(tmp_check)) > 1:
+            raise utils.DataErr("Custom_model", "The custom model contains genes with mixed or unrecognized nomenclature. Please ensure all genes use the same recognized nomenclature before applying gene_format conversion.")
+        else:
+            source_nomenclature = tmp_check[0]
+
+        if source_nomenclature != ARGS.gene_format:
+            model, translation_issues = modelUtils.translate_model_genes(
+                model=model,
+                mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
+                target_nomenclature=ARGS.gene_format,
+                source_nomenclature=source_nomenclature,
+                logger=logger
+            )
+
+
+
+
+    if ARGS.name == "Custom_model" and ARGS.gene_format != "Default":
+        logger = logging.getLogger(__name__)
+
+        # Take a small, clean sample of gene IDs (skipping placeholders like 0)
+        ids_sample = sample_valid_gene_ids(model.genes, limit=10)
+        if not ids_sample:
+            raise utils.DataErr(
+                "Custom_model",
+                "No valid gene IDs found (many may be placeholders like 0)."
+            )
+
+        # Detect source nomenclature on the sample
+        types = []
+        for gid in ids_sample:
+            try:
+                t = modelUtils.gene_type(gid, "Custom_model")
+            except Exception as e:
+                # Keep it simple: skip problematic IDs
+                logger.debug(f"gene_type failed for {gid}: {e}")
+                t = None
+            if t:
+                types.append(t)
+
+        if not types:
+            raise utils.DataErr(
+                "Custom_model",
+                "Could not detect a known gene nomenclature from the sample."
+            )
+
+        unique_types = set(types)
+        if len(unique_types) > 1:
+            raise utils.DataErr(
+                "Custom_model",
+                "Mixed or inconsistent gene nomenclatures detected. "
+                "Please unify them before converting."
+            )
+
+        source_nomenclature = types[0]
+
+        # Convert only if needed
+        if source_nomenclature != ARGS.gene_format:
+            model, translation_issues = modelUtils.translate_model_genes(
+                model=model,
+                mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
+                target_nomenclature=ARGS.gene_format,
+                source_nomenclature=source_nomenclature,
+                logger=logger
+            )
+
+    # generate data
+    rules = modelUtils.generate_rules(model, asParsed = False)
+    reactions = modelUtils.generate_reactions(model, asParsed = False)
+    bounds = modelUtils.generate_bounds(model)
+    medium = modelUtils.get_medium(model)
+    objective_function = modelUtils.extract_objective_coefficients(model)
+    
+    if ARGS.name == "ENGRO2":
+        compartments = modelUtils.generate_compartments(model)
+
+    df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "GPR"])
+    df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Formula"])
+
+    # Create DataFrame for translation issues
+    df_translation_issues = pd.DataFrame([
+        {"ReactionID": rxn_id, "TranslationIssues": issues}
+        for rxn_id, issues in translation_issues.items()
+    ])
+    
+    df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
+    df_medium = medium.rename(columns = {"reaction": "ReactionID"})
+    df_medium["InMedium"] = True
+
+    merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
+    merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
+    merged = merged.merge(objective_function, on = "ReactionID", how = "outer")
+    if ARGS.name == "ENGRO2": 
+        merged = merged.merge(compartments, on = "ReactionID", how = "outer")
+    merged = merged.merge(df_medium, on = "ReactionID", how = "left")
+    
+    # Add translation issues column
+    if not df_translation_issues.empty:
+        merged = merged.merge(df_translation_issues, on = "ReactionID", how = "left")
+        merged["TranslationIssues"] = merged["TranslationIssues"].fillna("")
+    else:
+        # Add empty TranslationIssues column if no issues found
+        #merged["TranslationIssues"] = ""
+        pass
+
+    merged["InMedium"] = merged["InMedium"].fillna(False)
+
+    merged = merged.sort_values(by = "InMedium", ascending = False)
+
+    if not ARGS.out_tabular:
+        raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular)
+    save_as_tabular_df(merged, ARGS.out_tabular)
+    expected = ARGS.out_tabular
+
+    # verify output exists and non-empty
+    if not expected or not os.path.exists(expected) or os.path.getsize(expected) == 0:
+        raise utils.DataErr(expected, "Output not created or empty")
+
+    print("Metabolic_model_setting: completed successfully")
+
+if __name__ == '__main__':
+
+    main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/metabolicModel2Tabular.xml	Mon Sep 29 15:34:59 2025 +0000
@@ -0,0 +1,121 @@
+<tool id="metabolicModel2Tabular" name="metabolicModel2Tabular" version="2.0.0">
+
+	<requirements>
+        <requirement type="package" version="1.24.4">numpy</requirement>
+        <requirement type="package" version="2.0.3">pandas</requirement>
+		<requirement type="package" version="0.29.0">cobra</requirement>
+        <requirement type="package" version="5.2.2">lxml</requirement>
+	</requirements>
+
+    <macros>
+        <import>marea_macros.xml</import>
+    </macros>
+
+    <command detect_errors="exit_code">
+        <![CDATA[
+      	python $__tool_directory__/metabolicModel2Tabular.py
+        --tool_dir $__tool_directory__
+        --medium_selector $cond_model.cond_medium.medium_selector
+        #if $cond_model.model_selector == 'Custom_model'
+            --input $cond_model.input
+            --name $cond_model.input.element_identifier
+        #else
+            --model $cond_model.model_selector
+            --name $cond_model.model_selector
+        #end if
+
+        --gene_format $cond_model.gene_format
+        
+        --out_log $log
+        --out_tabular $out_tabular
+        ]]>
+    </command>
+    <inputs>
+        <conditional name="cond_model">
+            <expand macro="options_model"/>
+            
+            <!-- ENGRO2 -->
+            <when value="ENGRO2">
+                <param name="name" argument="--name" type="text" value="ENGRO2" hidden="true" />
+                <conditional name="cond_medium">
+                    <expand macro="options_ras_to_bounds_medium"/>
+                </conditional>
+
+                <param name="gene_format" argument="--gene_format" type="select" label="Gene nomenclature format:">
+                    <option value="Default" selected="true">Keep original gene nomenclature (HGNC Symbol)</option>
+                    <option value="ENSG">ENSNG (Ensembl Gene ID)</option>
+                    <option value="HGNC_ID">HGNC ID</option>
+                    <option value="entrez_id">Entrez Gene ID</option>
+                </param>
+            </when>
+
+            <!-- Recon -->
+            <when value="Recon">
+                <param name="name" argument="--name" type="text" value="Recon" hidden="true" />
+                <conditional name="cond_medium">
+                    <param name="medium_selector" argument="--medium_selector" type="select" label="Medium">
+                        <option value="Default" selected="true">Default (Recon built-in medium)</option>
+                    </param>
+                    <when value="Default">
+                        <!-- Nessun parametro aggiuntivo necessario -->
+                    </when>
+                </conditional>
+                <param name="gene_format" argument="--gene_format" type="select" label="Gene nomenclature format:">
+                    <option value="Default" selected="true">Keep original gene nomenclature (HGNC Symbol)</option>
+                    <option value="ENSG">ENSNG (Ensembl Gene ID)</option>
+                    <option value="HGNC_ID">HGNC ID</option>
+                    <option value="entrez_id">Entrez Gene ID</option>
+                </param>
+            </when>
+
+            <!-- Custom model -->
+            <when value="Custom_model">
+                <param name="input" argument="--input" type="data" format="json,xml" label="Custom model file:" />
+                <conditional name="cond_medium">
+                    <param name="medium_selector" argument="--medium_selector" type="select" label="Medium">
+                        <option value="Default" selected="true">Don't use a separate medium file (use model defaults)</option>
+                    </param>
+                    <when value="Default">
+                        <!-- Nessun parametro aggiuntivo necessario -->
+                    </when>
+                </conditional>
+                <param name="gene_format" argument="--gene_format" type="select" label="Gene nomenclature format:">
+                    <option value="Default" selected="true">Keep original gene nomenclature</option>
+                    <option value="HGNC_symbol">HGNC Symbol</option>
+                    <option value="ENSG">ENSNG (Ensembl Gene ID)</option>
+                    <option value="HGNC_ID">HGNC ID</option>
+                    <option value="entrez_id">Entrez Gene ID</option>
+                </param>
+            </when>
+        </conditional>
+
+    </inputs>
+
+    <outputs>
+        <data name="log" format="txt" label="MetabolicModelSetting - Log" />
+        <data name="out_tabular" format="tabular" label="${cond_model.model_selector}_model_tabular" optional="true"/>
+    </outputs>
+
+    <help>
+    <![CDATA[
+What it does
+-------------
+This tool generates one file containing the main information of the metabolic model, starting from a custom model. 
+This file can be used as input for the RAS  generator tool, RPS generator tools, and flux simulator tool.
+
+Accepted files:
+    - A model: JSON, XML, MAT or YAML (.yml) file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2. Filename must follow the pattern: {model_name}.{extension}.[zip|gz|bz2]
+
+Output:
+-------------
+
+The tool generates a tabular file containing:
+	- a tabular file (.tabular) containing reaction IDs, reaction formula, GPR rules, reaction bounds, the objective function coefficients, the pathways in which the reaction is involved and which reactions are part of the medium.
+    - a log file (.txt).
+    ]]>
+    </help>
+    <expand macro="citations" />
+
+</tool>
+
+
--- a/COBRAxy/metabolic_model_setting.py	Mon Sep 29 15:13:21 2025 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,368 +0,0 @@
-"""
-Scripts to generate a tabular file of a metabolic model (built-in or custom).
-
-This script loads a COBRA model (built-in or custom), optionally applies
-medium and gene nomenclature settings, derives reaction-related metadata
-(GPR rules, formulas, bounds, objective coefficients, medium membership,
-and compartments for ENGRO2), and writes a tabular summary.
-"""
-
-import os
-import csv
-import cobra
-import argparse
-import pandas as pd
-import utils.general_utils as utils
-from typing import Optional, Tuple, List
-import utils.model_utils as modelUtils
-import logging
-from pathlib import Path
-
-
-ARGS : argparse.Namespace
-def process_args(args: List[str] = None) -> argparse.Namespace:
-    """
-    Parse command-line arguments for metabolic_model_setting.
-    """
-
-    parser = argparse.ArgumentParser(
-        usage="%(prog)s [options]",
-        description="Generate custom data from a given model"
-    )
-
-    parser.add_argument("--out_log", type=str, required=True,
-                        help="Output log file")
-
-    parser.add_argument("--model", type=str,
-                        help="Built-in model identifier (e.g., ENGRO2, Recon, HMRcore)")
-    parser.add_argument("--input", type=str,
-                        help="Custom model file (JSON or XML)")
-    parser.add_argument("--name", type=str, required=True,
-                        help="Model name (default or custom)")
-    
-    parser.add_argument("--medium_selector", type=str, required=True,
-                        help="Medium selection option")
-
-    parser.add_argument("--gene_format", type=str, default="Default",
-                        help="Gene nomenclature format: Default (original), ENSNG, HGNC_SYMBOL, HGNC_ID, ENTREZ")
-    
-    parser.add_argument("--out_tabular", type=str,
-                        help="Output file for the merged dataset (CSV or XLSX)")
-    
-    parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__),
-                        help="Tool directory (passed from Galaxy as $__tool_directory__)")
-
-
-    return parser.parse_args(args)
-
-################################- INPUT DATA LOADING -################################
-def load_custom_model(file_path :utils.FilePath, ext :Optional[utils.FileFormat] = None) -> cobra.Model:
-    """
-    Loads a custom model from a file, either in JSON, XML, MAT, or YML format.
-
-    Args:
-        file_path : The path to the file containing the custom model.
-        ext : explicit file extension. Necessary for standard use in galaxy because of its weird behaviour.
-
-    Raises:
-        DataErr : if the file is in an invalid format or cannot be opened for whatever reason.    
-    
-    Returns:
-        cobra.Model : the model, if successfully opened.
-    """
-    ext = ext if ext else file_path.ext
-    try:
-        if ext is utils.FileFormat.XML:
-            return cobra.io.read_sbml_model(file_path.show())
-        
-        if ext is utils.FileFormat.JSON:
-            return cobra.io.load_json_model(file_path.show())
-
-        if ext is utils.FileFormat.MAT:
-            return cobra.io.load_matlab_model(file_path.show())
-
-        if ext is utils.FileFormat.YML:
-            return cobra.io.load_yaml_model(file_path.show())
-
-    except Exception as e: raise utils.DataErr(file_path, e.__str__())
-    raise utils.DataErr(
-        file_path,
-        f"Unrecognized format '{file_path.ext}'. Only JSON, XML, MAT, YML are supported."
-    )
-
-
-###############################- FILE SAVING -################################
-def save_as_csv_filePath(data :dict, file_path :utils.FilePath, fieldNames :Tuple[str, str]) -> None:
-    """
-    Saves any dictionary-shaped data in a .csv file created at the given file_path as FilePath.
-
-    Args:
-        data : the data to be written to the file.
-        file_path : the path to the .csv file.
-        fieldNames : the names of the fields (columns) in the .csv file.
-    
-    Returns:
-        None
-    """
-    with open(file_path.show(), 'w', newline='') as csvfile:
-        writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
-        writer.writeheader()
-
-        for key, value in data.items():
-            writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
-
-def save_as_csv(data :dict, file_path :str, fieldNames :Tuple[str, str]) -> None:
-    """
-    Saves any dictionary-shaped data in a .csv file created at the given file_path as string.
-
-    Args:
-        data : the data to be written to the file.
-        file_path : the path to the .csv file.
-        fieldNames : the names of the fields (columns) in the .csv file.
-    
-    Returns:
-        None
-    """
-    with open(file_path, 'w', newline='') as csvfile:
-        writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
-        writer.writeheader()
-
-        for key, value in data.items():
-            writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
-
-def save_as_tabular_df(df: pd.DataFrame, path: str) -> None:
-    """
-    Save a pandas DataFrame as a tab-separated file, creating directories as needed.
-
-    Args:
-        df: The DataFrame to write.
-        path: Destination file path (will be written as TSV).
-
-    Raises:
-        DataErr: If writing the output fails for any reason.
-
-    Returns:
-        None
-    """
-    try:
-        os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
-        df.to_csv(path, sep="\t", index=False)
-    except Exception as e:
-        raise utils.DataErr(path, f"failed writing tabular output: {e}")
-    
-def is_placeholder(gid) -> bool:
-    """Return True if the gene id looks like a placeholder (e.g., 0/NA/NAN/empty)."""
-    if gid is None:
-        return True
-    s = str(gid).strip().lower()
-    return s in {"0", "", "na", "nan"}  # lowercase for simple matching
-
-def sample_valid_gene_ids(genes, limit=10):
-    """Yield up to `limit` valid gene IDs, skipping placeholders (e.g., the first 0 in RECON)."""
-    out = []
-    for g in genes:
-        gid = getattr(g, "id", getattr(g, "gene_id", g))
-        if not is_placeholder(gid):
-            out.append(str(gid))
-            if len(out) >= limit:
-                break
-    return out
-
-
-###############################- ENTRY POINT -################################
-def main(args:List[str] = None) -> None:
-    """
-    Initialize and generate custom data based on the frontend input arguments.
-    
-    Returns:
-        None
-    """
-    # Parse args from frontend (Galaxy XML)
-    global ARGS
-    ARGS = process_args(args)
-
-
-    if ARGS.input:
-        # Load a custom model from file
-        model = load_custom_model(
-            utils.FilePath.fromStrPath(ARGS.input), utils.FilePath.fromStrPath(ARGS.name).ext)
-    else:
-        # Load a built-in model
-
-        try:
-            model_enum = utils.Model[ARGS.model]  # e.g., Model['ENGRO2']
-        except KeyError:
-            raise utils.ArgsErr("model", "one of Recon/ENGRO2/HMRcore/Custom_model", ARGS.model)
-
-        # Load built-in model (Model.getCOBRAmodel uses tool_dir to locate local models)
-        try:
-            model = model_enum.getCOBRAmodel(toolDir=ARGS.tool_dir)
-        except Exception as e:
-            # Wrap/normalize load errors as DataErr for consistency
-            raise utils.DataErr(ARGS.model, f"failed loading built-in model: {e}")
-
-    # Determine final model name: explicit --name overrides, otherwise use the model id
-    
-    model_name = ARGS.name if ARGS.name else ARGS.model
-    
-    if ARGS.name == "ENGRO2" and ARGS.medium_selector != "Default":
-        df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0)
-        ARGS.medium_selector = ARGS.medium_selector.replace("_", " ")
-        medium = df_mediums[[ARGS.medium_selector]]
-        medium = medium[ARGS.medium_selector].to_dict()
-
-        # Reset all medium reactions lower bound to zero
-        for rxn_id, _ in model.medium.items():
-            model.reactions.get_by_id(rxn_id).lower_bound = float(0.0)
-        
-        # Apply selected medium uptake bounds (negative for uptake)
-        for reaction, value in medium.items():
-            if value is not None:
-                model.reactions.get_by_id(reaction).lower_bound = -float(value)
-
-    # Initialize translation_issues dictionary
-    translation_issues = {}
-    
-    if (ARGS.name == "Recon" or ARGS.name == "ENGRO2") and ARGS.gene_format != "Default":
-        logging.basicConfig(level=logging.INFO)
-        logger = logging.getLogger(__name__)
-
-        model, translation_issues = modelUtils.translate_model_genes(
-            model=model,
-            mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
-            target_nomenclature=ARGS.gene_format,
-            source_nomenclature='HGNC_symbol',
-            logger=logger
-        )
-
-    if ARGS.name == "Custom_model" and ARGS.gene_format != "Default":
-        logging.basicConfig(level=logging.INFO)
-        logger = logging.getLogger(__name__)
-
-        tmp_check = []
-        for g in model.genes[1:5]:  # check first 3 genes only
-            tmp_check.append(modelUtils.gene_type(g.id, "Custom_model"))
-        
-        if len(set(tmp_check)) > 1:
-            raise utils.DataErr("Custom_model", "The custom model contains genes with mixed or unrecognized nomenclature. Please ensure all genes use the same recognized nomenclature before applying gene_format conversion.")
-        else:
-            source_nomenclature = tmp_check[0]
-
-        if source_nomenclature != ARGS.gene_format:
-            model, translation_issues = modelUtils.translate_model_genes(
-                model=model,
-                mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
-                target_nomenclature=ARGS.gene_format,
-                source_nomenclature=source_nomenclature,
-                logger=logger
-            )
-
-
-
-
-    if ARGS.name == "Custom_model" and ARGS.gene_format != "Default":
-        logger = logging.getLogger(__name__)
-
-        # Take a small, clean sample of gene IDs (skipping placeholders like 0)
-        ids_sample = sample_valid_gene_ids(model.genes, limit=10)
-        if not ids_sample:
-            raise utils.DataErr(
-                "Custom_model",
-                "No valid gene IDs found (many may be placeholders like 0)."
-            )
-
-        # Detect source nomenclature on the sample
-        types = []
-        for gid in ids_sample:
-            try:
-                t = modelUtils.gene_type(gid, "Custom_model")
-            except Exception as e:
-                # Keep it simple: skip problematic IDs
-                logger.debug(f"gene_type failed for {gid}: {e}")
-                t = None
-            if t:
-                types.append(t)
-
-        if not types:
-            raise utils.DataErr(
-                "Custom_model",
-                "Could not detect a known gene nomenclature from the sample."
-            )
-
-        unique_types = set(types)
-        if len(unique_types) > 1:
-            raise utils.DataErr(
-                "Custom_model",
-                "Mixed or inconsistent gene nomenclatures detected. "
-                "Please unify them before converting."
-            )
-
-        source_nomenclature = types[0]
-
-        # Convert only if needed
-        if source_nomenclature != ARGS.gene_format:
-            model, translation_issues = modelUtils.translate_model_genes(
-                model=model,
-                mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
-                target_nomenclature=ARGS.gene_format,
-                source_nomenclature=source_nomenclature,
-                logger=logger
-            )
-
-    # generate data
-    rules = modelUtils.generate_rules(model, asParsed = False)
-    reactions = modelUtils.generate_reactions(model, asParsed = False)
-    bounds = modelUtils.generate_bounds(model)
-    medium = modelUtils.get_medium(model)
-    objective_function = modelUtils.extract_objective_coefficients(model)
-    
-    if ARGS.name == "ENGRO2":
-        compartments = modelUtils.generate_compartments(model)
-
-    df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "GPR"])
-    df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Formula"])
-
-    # Create DataFrame for translation issues
-    df_translation_issues = pd.DataFrame([
-        {"ReactionID": rxn_id, "TranslationIssues": issues}
-        for rxn_id, issues in translation_issues.items()
-    ])
-    
-    df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
-    df_medium = medium.rename(columns = {"reaction": "ReactionID"})
-    df_medium["InMedium"] = True
-
-    merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
-    merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
-    merged = merged.merge(objective_function, on = "ReactionID", how = "outer")
-    if ARGS.name == "ENGRO2": 
-        merged = merged.merge(compartments, on = "ReactionID", how = "outer")
-    merged = merged.merge(df_medium, on = "ReactionID", how = "left")
-    
-    # Add translation issues column
-    if not df_translation_issues.empty:
-        merged = merged.merge(df_translation_issues, on = "ReactionID", how = "left")
-        merged["TranslationIssues"] = merged["TranslationIssues"].fillna("")
-    else:
-        # Add empty TranslationIssues column if no issues found
-        #merged["TranslationIssues"] = ""
-        pass
-
-    merged["InMedium"] = merged["InMedium"].fillna(False)
-
-    merged = merged.sort_values(by = "InMedium", ascending = False)
-
-    if not ARGS.out_tabular:
-        raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular)
-    save_as_tabular_df(merged, ARGS.out_tabular)
-    expected = ARGS.out_tabular
-
-    # verify output exists and non-empty
-    if not expected or not os.path.exists(expected) or os.path.getsize(expected) == 0:
-        raise utils.DataErr(expected, "Output not created or empty")
-
-    print("Metabolic_model_setting: completed successfully")
-
-if __name__ == '__main__':
-
-    main()
--- a/COBRAxy/metabolic_model_setting.xml	Mon Sep 29 15:13:21 2025 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-<tool id="Metabolic_model_setting" name="Metabolic model setting" version="2.0.0">
-
-	<requirements>
-        <requirement type="package" version="1.24.4">numpy</requirement>
-        <requirement type="package" version="2.0.3">pandas</requirement>
-		<requirement type="package" version="0.29.0">cobra</requirement>
-        <requirement type="package" version="5.2.2">lxml</requirement>
-	</requirements>
-
-    <macros>
-        <import>marea_macros.xml</import>
-    </macros>
-
-    <command detect_errors="exit_code">
-        <![CDATA[
-      	python $__tool_directory__/metabolic_model_setting.py
-        --tool_dir $__tool_directory__
-        --medium_selector $cond_model.cond_medium.medium_selector
-        #if $cond_model.model_selector == 'Custom_model'
-            --input $cond_model.input
-            --name $cond_model.input.element_identifier
-        #else
-            --model $cond_model.model_selector
-            --name $cond_model.model_selector
-        #end if
-
-        --gene_format $cond_model.gene_format
-        
-        --out_log $log
-        --out_tabular $out_tabular
-        ]]>
-    </command>
-    <inputs>
-        <conditional name="cond_model">
-            <expand macro="options_model"/>
-            
-            <!-- ENGRO2 -->
-            <when value="ENGRO2">
-                <param name="name" argument="--name" type="text" value="ENGRO2" hidden="true" />
-                <conditional name="cond_medium">
-                    <expand macro="options_ras_to_bounds_medium"/>
-                </conditional>
-
-                <param name="gene_format" argument="--gene_format" type="select" label="Gene nomenclature format:">
-                    <option value="Default" selected="true">Keep original gene nomenclature (HGNC Symbol)</option>
-                    <option value="ENSG">ENSNG (Ensembl Gene ID)</option>
-                    <option value="HGNC_ID">HGNC ID</option>
-                    <option value="entrez_id">Entrez Gene ID</option>
-                </param>
-            </when>
-
-            <!-- Recon -->
-            <when value="Recon">
-                <param name="name" argument="--name" type="text" value="Recon" hidden="true" />
-                <conditional name="cond_medium">
-                    <param name="medium_selector" argument="--medium_selector" type="select" label="Medium">
-                        <option value="Default" selected="true">Default (Recon built-in medium)</option>
-                    </param>
-                    <when value="Default">
-                        <!-- Nessun parametro aggiuntivo necessario -->
-                    </when>
-                </conditional>
-                <param name="gene_format" argument="--gene_format" type="select" label="Gene nomenclature format:">
-                    <option value="Default" selected="true">Keep original gene nomenclature (HGNC Symbol)</option>
-                    <option value="ENSG">ENSNG (Ensembl Gene ID)</option>
-                    <option value="HGNC_ID">HGNC ID</option>
-                    <option value="entrez_id">Entrez Gene ID</option>
-                </param>
-            </when>
-
-            <!-- Custom model -->
-            <when value="Custom_model">
-                <param name="input" argument="--input" type="data" format="json,xml" label="Custom model file:" />
-                <conditional name="cond_medium">
-                    <param name="medium_selector" argument="--medium_selector" type="select" label="Medium">
-                        <option value="Default" selected="true">Don't use a separate medium file (use model defaults)</option>
-                    </param>
-                    <when value="Default">
-                        <!-- Nessun parametro aggiuntivo necessario -->
-                    </when>
-                </conditional>
-                <param name="gene_format" argument="--gene_format" type="select" label="Gene nomenclature format:">
-                    <option value="Default" selected="true">Keep original gene nomenclature</option>
-                    <option value="HGNC_symbol">HGNC Symbol</option>
-                    <option value="ENSG">ENSNG (Ensembl Gene ID)</option>
-                    <option value="HGNC_ID">HGNC ID</option>
-                    <option value="entrez_id">Entrez Gene ID</option>
-                </param>
-            </when>
-        </conditional>
-
-    </inputs>
-
-    <outputs>
-        <data name="log" format="txt" label="MetabolicModelSetting - Log" />
-        <data name="out_tabular" format="tabular" label="${cond_model.model_selector}_model_tabular" optional="true"/>
-    </outputs>
-
-    <help>
-    <![CDATA[
-What it does
--------------
-This tool generates one file containing the main information of the metabolic model, starting from a custom model. 
-This file can be used as input for the RAS  generator tool, RPS generator tools, and flux simulator tool.
-
-Accepted files:
-    - A model: JSON, XML, MAT or YAML (.yml) file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2. Filename must follow the pattern: {model_name}.{extension}.[zip|gz|bz2]
-
-Output:
--------------
-
-The tool generates a tabular file containing:
-	- a tabular file (.tabular) containing reaction IDs, reaction formula, GPR rules, reaction bounds, the objective function coefficients, the pathways in which the reaction is involved and which reactions are part of the medium.
-    - a log file (.txt).
-    ]]>
-    </help>
-    <expand macro="citations" />
-
-</tool>
-
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/tabular2MetabolicModel.py	Mon Sep 29 15:34:59 2025 +0000
@@ -0,0 +1,112 @@
+"""
+Convert a tabular (CSV/TSV/Tabular) description of a COBRA model into a COBRA file.
+
+Supported output formats: SBML, JSON, MATLAB (.mat), YAML.
+The script logs to a user-provided file for easier debugging in Galaxy.
+"""
+
+import os
+import cobra
+import argparse
+from typing import List
+import logging
+import utils.model_utils as modelUtils
+
+ARGS : argparse.Namespace
+def process_args(args: List[str] = None) -> argparse.Namespace:
+    """
+    Parse command-line arguments for the CSV-to-COBRA conversion tool.
+
+    Returns:
+        argparse.Namespace: Parsed arguments.
+    """
+    parser = argparse.ArgumentParser(
+    usage="%(prog)s [options]",
+    description="Convert a tabular/CSV file to a COBRA model"
+    )
+
+
+    parser.add_argument("--out_log", type=str, required=True,
+    help="Output log file")
+
+
+    parser.add_argument("--input", type=str, required=True,
+    help="Input tabular file (CSV/TSV)")
+
+
+    parser.add_argument("--format", type=str, required=True, choices=["sbml", "json", "mat", "yaml"],
+    help="Model format (SBML, JSON, MATLAB, YAML)")
+
+
+    parser.add_argument("--output", type=str, required=True,
+    help="Output model file path")
+
+
+    parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__),
+    help="Tool directory (passed from Galaxy as $__tool_directory__)")
+
+
+    return parser.parse_args(args)
+
+
+###############################- ENTRY POINT -################################
+
+def main(args: List[str] = None) -> None:
+    """
+    Entry point: parse arguments, build the COBRA model from a CSV/TSV file,
+    and save it in the requested format.
+
+    Returns:
+        None
+    """
+    global ARGS
+    ARGS = process_args(args)
+
+    # configure logging to the requested log file (overwrite each run)
+    logging.basicConfig(filename=ARGS.out_log,
+                        level=logging.DEBUG,
+                        format='%(asctime)s %(levelname)s: %(message)s',
+                        filemode='w')
+
+    logging.info('Starting fromCSVtoCOBRA tool')
+    logging.debug('Args: input=%s format=%s output=%s tool_dir=%s', ARGS.input, ARGS.format, ARGS.output, ARGS.tool_dir)
+
+    try:
+        # Basic sanity checks
+        if not os.path.exists(ARGS.input):
+            logging.error('Input file not found: %s', ARGS.input)
+
+        out_dir = os.path.dirname(os.path.abspath(ARGS.output))
+        
+        if out_dir and not os.path.isdir(out_dir):
+            try:
+                os.makedirs(out_dir, exist_ok=True)
+                logging.info('Created missing output directory: %s', out_dir)
+            except Exception as e:
+                logging.exception('Cannot create output directory: %s', out_dir)
+
+        model = modelUtils.build_cobra_model_from_csv(ARGS.input)
+
+        # Save model in requested format
+        if ARGS.format == "sbml":
+            cobra.io.write_sbml_model(model, ARGS.output)
+        elif ARGS.format == "json":
+            cobra.io.save_json_model(model, ARGS.output)
+        elif ARGS.format == "mat":
+            cobra.io.save_matlab_model(model, ARGS.output)
+        elif ARGS.format == "yaml":
+            cobra.io.save_yaml_model(model, ARGS.output)
+        else:
+            logging.error('Unknown format requested: %s', ARGS.format)
+            print(f"ERROR: Unknown format: {ARGS.format}")
+
+
+        logging.info('Model successfully written to %s (format=%s)', ARGS.output, ARGS.format)
+
+    except Exception:
+        # Log full traceback to the out_log so Galaxy users/admins can see what happened
+        logging.exception('Unhandled exception in fromCSVtoCOBRA')
+
+
+if __name__ == '__main__':
+    main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/tabular2MetabolicModel.xml	Mon Sep 29 15:34:59 2025 +0000
@@ -0,0 +1,69 @@
+<tool id="tabular2MetabolicModel" name="tabular2MetabolicModel" version="1.0.0">
+    <description>Convert a tabular dataset to a COBRA model</description>
+
+    <!-- Python dependencies required for COBRApy -->
+    <requirements>
+        <requirement type="package" version="0.29.0">cobra</requirement>
+        <requirement type="package" version="1.24.4">numpy</requirement>
+        <requirement type="package" version="2.0.3">pandas</requirement>
+        <requirement type="package" version="5.2.2">lxml</requirement>
+    </requirements>
+
+    <!-- Import shared macros if available -->
+    <macros>
+        <import>marea_macros.xml</import>
+    </macros>
+
+    <!-- Command to run the Python script -->
+    <command detect_errors="exit_code"><![CDATA[
+        python $__tool_directory__/tabular2MetabolicModel.py
+            --tool_dir $__tool_directory__
+            --input $input 
+            --format $format 
+            --output $output
+            --out_log $log
+    ]]></command>
+
+    <!-- Tool inputs -->
+    <inputs>
+        <param name="input" type="data" format="tabular,csv,tsv" label="Input table"/>
+        <param name="format" type="select" label="Output COBRA model format">
+            <option value="sbml" selected="true">SBML (.xml)</option>
+            <option value="json">JSON (.json)</option>
+            <option value="mat">MATLAB (.mat)</option>
+            <option value="yaml">YAML (.yml)</option>
+        </param>
+    </inputs>
+
+    <!-- Tool outputs -->
+    <outputs>
+        <data name="log" format="txt" label="fromcsvtocobra - Log" />
+        <data name="output" format="xml" label="COBRA model">
+            <change_format>
+                <when input="format" value="sbml" format="xml"/>
+                <when input="format" value="json" format="json"/>
+                <when input="format" value="mat" format="mat"/>
+                <when input="format" value="yaml" format="yaml"/>
+            </change_format>
+        </data>
+    </outputs>
+
+    <!-- Help section -->
+    <help><![CDATA[
+This tool converts a tabular dataset into a COBRA model using COBRApy.
+
+**Input**
+- A tabular/CSV/TSV file describing reactions, metabolites, or stoichiometry.
+
+**Output**
+- A COBRA model in the chosen format:  
+  - SBML (.xml)  
+  - JSON (.json)  
+  - MATLAB (.mat)  
+  - YAML (.yml)  
+
+**Notes**
+- The exact table structure (columns required) depends on how you want to encode reactions and metabolites.  
+- You can extend the Python script to parse specific column formats.  
+    ]]></help>
+</tool>