changeset 381:0a3ca20848f3 draft

Uploaded
author francesco_lapi
date Fri, 05 Sep 2025 09:18:26 +0000
parents 03a7ba63813f
children 15f05a694c94
files COBRAxy/custom_data_generator.py COBRAxy/custom_data_generator.xml COBRAxy/dist/cobraxy-0.1.0.tar.gz COBRAxy/dist/cobraxy/meta.yaml COBRAxy/dist/release/channeldata.json COBRAxy/dist/release/index.html COBRAxy/dist/release/noarch/.cache/cache.db COBRAxy/dist/release/noarch/cobraxy-0.1.0-py_0.conda COBRAxy/dist/release/noarch/current_repodata.json COBRAxy/dist/release/noarch/index.html COBRAxy/dist/release/noarch/repodata.json COBRAxy/dist/release/noarch/repodata_from_packages.json COBRAxy/dist/release/win-64/.cache/cache.db COBRAxy/dist/release/win-64/current_repodata.json COBRAxy/dist/release/win-64/index.html COBRAxy/dist/release/win-64/repodata.json COBRAxy/dist/release/win-64/repodata_from_packages.json COBRAxy/flux_simulation.xml COBRAxy/local/pickle files/HMRcore_genes.p COBRAxy/local/pickle files/HMRcore_rules.p COBRAxy/local/pickle files/Recon_genes.p COBRAxy/local/pickle files/Recon_rules.p COBRAxy/local/pickle files/synonyms.pickle COBRAxy/marea_cluster.py COBRAxy/marea_macros.xml COBRAxy/ras_generator.py COBRAxy/ras_to_bounds.xml COBRAxy/rps_generator.py COBRAxy/setup.py COBRAxy/utils/CBS_backend.py COBRAxy/utils/general_utils.py COBRAxy/utils/reaction_parsing.py
diffstat 27 files changed, 1340 insertions(+), 932 deletions(-) [+]
line wrap: on
line diff
--- a/COBRAxy/custom_data_generator.py	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/custom_data_generator.py	Fri Sep 05 09:18:26 2025 +0000
@@ -220,9 +220,6 @@
     global ARGS
     ARGS = process_args(args)
 
-    # this is the worst thing I've seen so far, congrats to the former MaREA devs for suggesting this!
-    #if os.path.isdir(ARGS.output_path) == False: 
-    #    os.makedirs(ARGS.output_path)
 
     if ARGS.input:
         # load custom model
--- a/COBRAxy/custom_data_generator.xml	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/custom_data_generator.xml	Fri Sep 05 09:18:26 2025 +0000
@@ -132,8 +132,7 @@
 Reactions and rules can be used as inputs for the RAS and RPS generator tools.
 
 Accepted files:
-    - A model: JSON or XML file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2 - Filename must end with .json.zip (.xml.zip), .json.gz (.xml.gz) or .json.bz2 (.xml.bz2) for JSON (XML) files.   
-
+    - A model: JSON, XML, MAT or YAML (.yml) file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2. Filename must follow the pattern: {model_name}.{extension}.[zip|gz|bz2]
 
 Output:
 -------------
Binary file COBRAxy/dist/cobraxy-0.1.0.tar.gz has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/cobraxy/meta.yaml	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,80 @@
+{% set name = "cobraxy" %}
+{% set version = "0.1.0" %}
+
+package:
+  name: {{ name|lower }}
+  version: {{ version }}
+
+source:
+  url: file:///C:/Users/f.lapi/Documents/GitHub/COBRAxy/dist/cobraxy-0.1.0.tar.gz
+  sha256: 9445b95e62cfe5233c29baabd5c02a7732d3d096cccdd99cfea13948d1509d9e
+
+build:
+  entry_points:
+    - custom_data_generator=custom_data_generator:main
+    - flux_simulation=flux_simulation:main
+    - flux_to_map=flux_to_map:main
+    - marea_cluster=marea_cluster:main
+    - marea=marea:main
+    - ras_generator=ras_generator:main
+    - ras_to_bounds=ras_to_bounds:main
+    - rps_generator=rps_generator:main
+  noarch: python
+  script: {{ PYTHON }} -m pip install . -vv --no-build-isolation
+  number: 0
+
+requirements:
+  host:
+    - python >=3.8.20,<3.12
+    - pip
+    - setuptools
+  run:
+    - python >=3.8.20,<3.12
+    - cairosvg ==2.7.1
+    - cobra ==0.29.0
+    - joblib ==1.4.2
+    - lxml ==5.2.2
+    - matplotlib-base ==3.7.3
+    - numpy ==1.24.4
+    - pandas ==2.0.3
+    - pyvips ==2.2.3 # [linux or osx]
+    - scikit-learn ==1.3.2
+    - scipy ==1.10.1
+    - seaborn ==0.13.0
+    - svglib ==1.5.1
+  pip:
+    - pyvips==2.2.3  # [win]
+
+test:
+  imports:
+    - utils
+    - custom_data_generator
+    - flux_simulation
+    - marea_cluster
+    - ras_generator
+    - ras_to_bounds
+    - rps_generator
+  commands:
+    - pip install pyvips==2.2.3
+    - python -c "import pyvips; print('pyvips version:', pyvips.__version__)"
+    - pip check
+    - custom_data_generator --help
+    - flux_simulation --help
+    - flux_to_map --help
+    - marea_cluster --help
+    - marea --help
+    - ras_generator --help
+    - ras_to_bounds --help
+    - rps_generator --help
+  requires:
+    - pip
+
+about:
+  home: https://github.com/CompBtBs/COBRAxy.git
+  summary: A collection of tools for metabolic flux analysis in Galaxy.
+  #license: ''
+  #license_file: PLEASE_ADD_LICENSE_FILE
+
+extra:
+  recipe-maintainers:
+    - Francesco2304
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/channeldata.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"channeldata_version":1,"packages":{"cobraxy":{"activate.d":false,"binary_prefix":false,"deactivate.d":false,"home":"https://github.com/CompBtBs/COBRAxy.git","post_link":false,"pre_link":false,"pre_unlink":false,"run_exports":{},"source_url":"file:///C:/Users/f.lapi/Documents/GitHub/COBRAxy/dist/cobraxy-0.1.0.tar.gz","subdirs":["noarch"],"summary":"A collection of tools for metabolic flux analysis in Galaxy.","text_prefix":false,"timestamp":1741366947,"version":"0.1.0"}},"subdirs":["noarch","win-64"]}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/index.html	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,90 @@
+<html>
+<head>
+  <title>release</title>
+  <style type="text/css">
+    a, a:active {
+      text-decoration: none; color: blue;
+    }
+    a:visited {
+      color: #48468F;
+    }
+    a:hover, a:focus {
+      text-decoration: underline; color: red;
+    }
+    body {
+      background-color: #F5F5F5;
+    }
+    h2 {
+      margin-bottom: 12px;
+    }
+    th, td {
+      font: 100% monospace; text-align: left;
+    }
+    th {
+      font-weight: bold; padding-right: 14px; padding-bottom: 3px;
+    }
+    th.tight {
+        padding-right: 6px;
+    }
+    td {
+      padding-right: 14px;
+    }
+    td.tight {
+        padding-right: 8px;
+    }
+    td.s, th.s {
+      text-align: right;
+    }
+    td.summary {
+      white-space: nowrap;
+      overflow: hidden;
+    }
+    td.packagename {
+      white-space: nowrap;
+      text-overflow: ellipsis;
+      overflow: hidden;
+      max-width: 180px;
+      padding-right: 8px;
+    }
+    td.version {
+      //white-space: nowrap;
+      overflow: hidden;
+      max-width: 90px;
+      padding-right: 8px;
+    }
+    table {
+      background-color: white;
+      border-top: 1px solid #646464;
+      border-bottom: 1px solid #646464;
+      padding-top: 10px;
+      padding-bottom: 14px;
+    }
+    address {
+      color: #787878;
+      padding-top: 10px;
+    }
+  </style>
+</head>
+<body>
+  <h2>release</h2>
+  <h3><a href="rss.xml">RSS Feed</a>&nbsp;&nbsp;&nbsp;<a href="channeldata.json">channeldata.json</a></h3>
+<a href="noarch">noarch</a>&nbsp;&nbsp;&nbsp;<a href="win-64">win-64</a>&nbsp;&nbsp;&nbsp;  <table>
+    <tr>
+      <th style="padding-right:18px;">Package</th>
+      <th>Latest Version</th>
+      <th>Doc</th>
+      <th>Dev</th>
+      <th>License</th>
+<th class="tight">win-64</th><th class="tight">noarch</th>      <th>Summary</th>
+    </tr>
+    <tr>
+      <td class="packagename"><a href="https://github.com/CompBtBs/COBRAxy.git">cobraxy</a></td>
+      <td class="version">0.1.0</td>
+      <td></td>
+      <td></td>
+      <td class="tight"></td>
+<td></td><td>X</td>      <td class="summary">A collection of tools for metabolic flux analysis in Galaxy.</td>
+    </tr>  </table>
+  <address>Updated: 2025-03-07 17:03:09 +0000 - Files: 1</address>
+</body>
+</html>
\ No newline at end of file
Binary file COBRAxy/dist/release/noarch/.cache/cache.db has changed
Binary file COBRAxy/dist/release/noarch/cobraxy-0.1.0-py_0.conda has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/noarch/current_repodata.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"info":{"subdir":"noarch"},"packages":{},"packages.conda":{"cobraxy-0.1.0-py_0.conda":{"build":"py_0","build_number":0,"depends":["cairosvg 2.7.1","cobra 0.29.0","joblib 1.4.2","lxml 5.2.2","matplotlib-base 3.7.3","numpy 1.24.4","pandas 2.0.3","python >=3.8.20,<3.12","scikit-learn 1.3.2","scipy 1.10.1","seaborn 0.13.0","svglib 1.5.1"],"legacy_bz2_md5":null,"md5":"3ce06fbcbd3360c12bac9adf7957ab85","name":"cobraxy","noarch":"python","sha256":"caba0a0729ba111c4a65a6c1450653275f6e0cfdeb052f7e7a8981cc2bbc447e","size":51008,"subdir":"noarch","timestamp":1741366947437,"version":"0.1.0"}},"removed":[],"repodata_version":1}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/noarch/index.html	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,76 @@
+<html>
+<head>
+  <title>release/noarch</title>
+  <style type="text/css">
+    a, a:active {
+      text-decoration: none; color: blue;
+    }
+    a:visited {
+      color: #48468F;
+    }
+    a:hover, a:focus {
+      text-decoration: underline; color: red;
+    }
+    body {
+      background-color: #F5F5F5;
+    }
+    h2 {
+      margin-bottom: 12px;
+    }
+    th, td {
+      font: 100% monospace; text-align: left;
+    }
+    th {
+      font-weight: bold; padding-right: 14px; padding-bottom: 3px;
+    }
+    td {
+      padding-right: 20px;
+    }
+    td.s, th.s {
+      text-align: right;
+    }
+    table {
+      background-color: white;
+      border-top: 1px solid #646464;
+      border-bottom: 1px solid #646464;
+      padding-top: 10px;
+      padding-bottom: 14px;
+    }
+    address {
+      color: #787878;
+      padding-top: 10px;
+    }
+  </style>
+</head>
+<body>
+  <h2>release/noarch</h2>
+  <table>
+    <tr>
+      <th>Filename</th>
+      <th>Size</th>
+      <th>Last Modified</th>
+      <th>SHA256</th>
+      <th>MD5</th>
+    </tr>
+    <tr>
+      <td><a href="repodata.json">repodata.json</a></td>
+      <td class="s">601 B</td>
+      <td>2025-03-07 17:02:28 +0000</td>
+      <td>f8cfc8b6d1f71e9b13db3152bbe3fdfac3d5ca94c748a279599dd6f6872a5ba2</td>
+      <td>21106deab69c56a7e13f5167091e9860</td>
+    </tr>    <tr>
+      <td><a href="repodata_from_packages.json">repodata_from_packages.json</a></td>
+      <td class="s">601 B</td>
+      <td>2025-03-07 17:02:28 +0000</td>
+      <td>f8cfc8b6d1f71e9b13db3152bbe3fdfac3d5ca94c748a279599dd6f6872a5ba2</td>
+      <td>21106deab69c56a7e13f5167091e9860</td>
+    </tr>    <tr>
+      <td><a href="cobraxy-0.1.0-py_0.conda">cobraxy-0.1.0-py_0.conda</a></td>
+      <td class="s">50 KB</td>
+      <td>2025-03-07 17:02:27 +0000</td>
+      <td>caba0a0729ba111c4a65a6c1450653275f6e0cfdeb052f7e7a8981cc2bbc447e</td>
+      <td>3ce06fbcbd3360c12bac9adf7957ab85</td>
+    </tr>  </table>
+  <address>Updated: 2025-03-07 17:03:09 +0000 - Files: 1</address>
+</body>
+</html>
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/noarch/repodata.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"info":{"subdir":"noarch"},"packages":{},"packages.conda":{"cobraxy-0.1.0-py_0.conda":{"build":"py_0","build_number":0,"depends":["cairosvg 2.7.1","cobra 0.29.0","joblib 1.4.2","lxml 5.2.2","matplotlib-base 3.7.3","numpy 1.24.4","pandas 2.0.3","python >=3.8.20,<3.12","scikit-learn 1.3.2","scipy 1.10.1","seaborn 0.13.0","svglib 1.5.1"],"md5":"3ce06fbcbd3360c12bac9adf7957ab85","name":"cobraxy","noarch":"python","sha256":"caba0a0729ba111c4a65a6c1450653275f6e0cfdeb052f7e7a8981cc2bbc447e","size":51008,"subdir":"noarch","timestamp":1741366947437,"version":"0.1.0"}},"removed":[],"repodata_version":1}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/noarch/repodata_from_packages.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"info":{"subdir":"noarch"},"packages":{},"packages.conda":{"cobraxy-0.1.0-py_0.conda":{"build":"py_0","build_number":0,"depends":["cairosvg 2.7.1","cobra 0.29.0","joblib 1.4.2","lxml 5.2.2","matplotlib-base 3.7.3","numpy 1.24.4","pandas 2.0.3","python >=3.8.20,<3.12","scikit-learn 1.3.2","scipy 1.10.1","seaborn 0.13.0","svglib 1.5.1"],"md5":"3ce06fbcbd3360c12bac9adf7957ab85","name":"cobraxy","noarch":"python","sha256":"caba0a0729ba111c4a65a6c1450653275f6e0cfdeb052f7e7a8981cc2bbc447e","size":51008,"subdir":"noarch","timestamp":1741366947437,"version":"0.1.0"}},"removed":[],"repodata_version":1}
\ No newline at end of file
Binary file COBRAxy/dist/release/win-64/.cache/cache.db has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/win-64/current_repodata.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"info":{"subdir":"win-64"},"packages":{},"packages.conda":{},"removed":[],"repodata_version":1}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/win-64/index.html	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,70 @@
+<html>
+<head>
+  <title>release/win-64</title>
+  <style type="text/css">
+    a, a:active {
+      text-decoration: none; color: blue;
+    }
+    a:visited {
+      color: #48468F;
+    }
+    a:hover, a:focus {
+      text-decoration: underline; color: red;
+    }
+    body {
+      background-color: #F5F5F5;
+    }
+    h2 {
+      margin-bottom: 12px;
+    }
+    th, td {
+      font: 100% monospace; text-align: left;
+    }
+    th {
+      font-weight: bold; padding-right: 14px; padding-bottom: 3px;
+    }
+    td {
+      padding-right: 20px;
+    }
+    td.s, th.s {
+      text-align: right;
+    }
+    table {
+      background-color: white;
+      border-top: 1px solid #646464;
+      border-bottom: 1px solid #646464;
+      padding-top: 10px;
+      padding-bottom: 14px;
+    }
+    address {
+      color: #787878;
+      padding-top: 10px;
+    }
+  </style>
+</head>
+<body>
+  <h2>release/win-64</h2>
+  <table>
+    <tr>
+      <th>Filename</th>
+      <th>Size</th>
+      <th>Last Modified</th>
+      <th>SHA256</th>
+      <th>MD5</th>
+    </tr>
+    <tr>
+      <td><a href="repodata.json">repodata.json</a></td>
+      <td class="s">96 B</td>
+      <td>2025-03-07 16:59:38 +0000</td>
+      <td>80f1769e75036891cfc10fc66eca8134a78b12e6e5cae86132db6148cf970bb1</td>
+      <td>fae3e0350ae934ae1572b2b0f28dcf74</td>
+    </tr>    <tr>
+      <td><a href="repodata_from_packages.json">repodata_from_packages.json</a></td>
+      <td class="s">96 B</td>
+      <td>2025-03-07 16:59:38 +0000</td>
+      <td>80f1769e75036891cfc10fc66eca8134a78b12e6e5cae86132db6148cf970bb1</td>
+      <td>fae3e0350ae934ae1572b2b0f28dcf74</td>
+    </tr>  </table>
+  <address>Updated: 2025-03-07 17:03:09 +0000 - Files: 0</address>
+</body>
+</html>
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/win-64/repodata.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"info":{"subdir":"win-64"},"packages":{},"packages.conda":{},"removed":[],"repodata_version":1}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/dist/release/win-64/repodata_from_packages.json	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,1 @@
+{"info":{"subdir":"win-64"},"packages":{},"packages.conda":{},"removed":[],"repodata_version":1}
\ No newline at end of file
--- a/COBRAxy/flux_simulation.xml	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/flux_simulation.xml	Fri Sep 05 09:18:26 2025 +0000
@@ -114,7 +114,7 @@
    - Biomass sensitivity analysis (single reaction knock-out). It is the ratio between the optimal of the Biomass reaction computed by FBA after knocking-out a reaction and the same over the complete model.
 
 Accepted files:
-   - A model: JSON or XML file reporting reactions and rules contained in the model. It can be ENGRO2 or a custom model. 
+   - A model: JSON, XML, MAT or YAML (.yml) file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2. Filename must follow the pattern: {model_name}.{extension}.[zip|gz|bz2]
    - Context-specific bounds: generated by RAS to Bounds tool. This can be a collection of bounds too (one bounds file per context).
 
 Output:
Binary file COBRAxy/local/pickle files/synonyms.pickle has changed
--- a/COBRAxy/marea_macros.xml	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/marea_macros.xml	Fri Sep 05 09:18:26 2025 +0000
@@ -27,7 +27,8 @@
 
     <xml name="options_ras_to_bounds_medium">
         <param name="medium_selector" argument="--medium_selector" type="select" label="Medium:">
-            <option value="allOpen"  selected="true">Open</option>
+            <option value="Default" selected="true">Default (ENGRO2 built-in medium)</option>
+            <option value="allOpen">Open</option>
             <option value="Custom">Custom medium</option>
             <option value="RPMI_1640">RPMI 1640</option>
             <option value="DMEM">DMEM</option>
--- a/COBRAxy/ras_generator.py	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/ras_generator.py	Fri Sep 05 09:18:26 2025 +0000
@@ -523,8 +523,8 @@
     """
     ras_values_by_cell_line = {}
     dataset.set_index(dataset.columns[0], inplace=True)
-    # Considera tutte le colonne tranne la prima in cui ci sono gli hugo quindi va scartata
-    for cell_line_name in dataset.columns[1:]:
+    
+    for cell_line_name in dataset.columns: #[1:]:
         cell_line = dataset[cell_line_name].to_dict()
         ras_values_by_cell_line[cell_line_name]= get_ras_values(rules, cell_line)
     return ras_values_by_cell_line
@@ -650,8 +650,15 @@
      
     if filenamePath.ext is utils.FileFormat.PICKLE: return utils.readPickle(datFilePath)
 
+    dict_rule = {}
+    for line in utils.readCsv(datFilePath, delimiter = "\t"):
+        if line[2] == "":
+            dict_rule[line[0]] = ruleUtils.OpList([""])
+        else:
+            dict_rule[line[0]] = ruleUtils.parseRuleToNestedList(line[2])
+
     # csv rules need to be parsed, those in a pickle format are taken to be pre-parsed.
-    return { line[0] : ruleUtils.parseRuleToNestedList(line[1]) for line in utils.readCsv(datFilePath) }
+    return dict_rule
 
 def main(args:List[str] = None) -> None:
     """
@@ -692,7 +699,7 @@
     rules      = model.getRules(ARGS.tool_dir)
     genes      = data_gene(dataset, type_gene, name, None)
     ids, rules = load_id_rules(rules.get(type_gene))
-    
+
     resolve_rules, err = resolve(genes, rules, ids, ARGS.none, name)
     create_ras(resolve_rules, name, rules, ids, ARGS.ras_output)
     
--- a/COBRAxy/ras_to_bounds.xml	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/ras_to_bounds.xml	Fri Sep 05 09:18:26 2025 +0000
@@ -87,7 +87,7 @@
 By default, all reactions in model.medium that are not present in the medium file have lower bound set to 0.0 and not set to the default model value.
 
 Accepted files:
-    - A model: JSON or XML file reporting reactions and rules contained in the model.   
+    - A model: JSON, XML, MAT or YAML (.yml) file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2. Filename must follow the pattern: {model_name}.{extension}.[zip|gz|bz2]
     - RAS matrix: tab-separated RAS file as returned by RAS generator. Multiple RAS files having different file name can be uploaded too (e.g. one RAS matrix for normal cells and one for cancer cells). Note that if multiple RAs matrices are uploaded, the bounds are normalzed across all cells.
     - Medium: tab-separated file containing lower and upper-bounds of medium reactions.
 
--- a/COBRAxy/rps_generator.py	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/rps_generator.py	Fri Sep 05 09:18:26 2025 +0000
@@ -121,7 +121,8 @@
     """
     name = clean_metabolite_name(name)
     for id, synonyms in syn_dict.items():
-        if name in synonyms: return id
+        if name in synonyms:
+            return id
     
     return ""
 
@@ -131,7 +132,8 @@
     Check for missing metabolites in the abundances dictionary compared to the reactions dictionary and update abundances accordingly.
 
     Parameters:
-        reactions (dict): A dictionary representing reactions where keys are reaction names and values are dictionaries containing metabolite names as keys and stoichiometric coefficients as values.
+        reactions (dict): A dictionary representing reactions where keys are reaction names and values are dictionaries containing metabolite names as keys and 
+                          stoichiometric coefficients as values.
         dataset_by_rows (dict): A dictionary representing abundances where keys are metabolite names and values are their corresponding abundances for all cell lines.
         cell_lines_amt : amount of cell lines, needed to add a new list of abundances for missing metabolites.
 
@@ -199,23 +201,27 @@
     Returns:
         None
     """
+
     cell_lines = dataset[0][1:]
     abundances_dict = {}
 
     translationIsApplied = ARGS.reaction_choice == "default"
     for row in dataset[1:]:
-        id = get_metabolite_id(row[0], syn_dict) if translationIsApplied else row[0]
-        if id: abundances_dict[id] = list(map(utils.Float(), row[1:]))
-    
+        id = get_metabolite_id(row[0], syn_dict) #if translationIsApplied else row[0]
+        if id: 
+            abundances_dict[id] = list(map(utils.Float(), row[1:]))
+
     missing_list = check_missing_metab(reactions, abundances_dict, len((cell_lines)))
-    
+
     rps_scores :Dict[Dict[str, float]] = {}
     for pos, cell_line_name in enumerate(cell_lines):
         abundances = { metab : abundances[pos] for metab, abundances in abundances_dict.items() }
+
         rps_scores[cell_line_name] = calculate_rps(reactions, abundances, black_list, missing_list, substrateFreqTable)
     
     df = pd.DataFrame.from_dict(rps_scores)
-    
+    df = df.loc[list(reactions.keys()),:]
+    print(df.head(10))
     df.index.name = 'Reactions'
     df.to_csv(ARGS.rps_output, sep='\t', na_rep='None', index=True)
 
@@ -238,19 +244,36 @@
         syn_dict = pk.load(sd)
 
     dataset = utils.readCsv(utils.FilePath.fromStrPath(ARGS.input), '\t', skipHeader = False)
-
+    tmp_dict = None
     if ARGS.reaction_choice == 'default':
         reactions = pk.load(open(ARGS.tool_dir + '/local/pickle files/reactions.pickle', 'rb'))
         substrateFreqTable = pk.load(open(ARGS.tool_dir + '/local/pickle files/substrate_frequencies.pickle', 'rb'))
     
     elif ARGS.reaction_choice == 'custom':
         reactions = reactionUtils.parse_custom_reactions(ARGS.custom)
+        for r, s in reactions.items():
+            tmp_list = list(s.keys())
+            for k in tmp_list:
+                if k[-2] == '_':
+                    s[k[:-2]] = s.pop(k)
         substrateFreqTable = {}
         for _, substrates in reactions.items():
             for substrateName, _ in substrates.items():
                 if substrateName not in substrateFreqTable: substrateFreqTable[substrateName] = 0
                 substrateFreqTable[substrateName] += 1
 
+        print(f"Reactions: {reactions}")
+        print(f"Substrate Frequencies: {substrateFreqTable}")
+        print(f"Synonyms: {syn_dict}")
+        tmp_dict = {}
+        for metabName, freq in substrateFreqTable.items():
+            tmp_metabName = clean_metabolite_name(metabName)
+            for syn_key, syn_list in syn_dict.items():
+                if tmp_metabName in syn_list or tmp_metabName == clean_metabolite_name(syn_key):
+                    print(f"Mapping {tmp_metabName} to {syn_key}")
+                    tmp_dict[syn_key] = syn_list
+                    tmp_dict[syn_key].append(tmp_metabName)
+
     rps_for_cell_lines(dataset, reactions, black_list, syn_dict, substrateFreqTable)
     print('Execution succeded')
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/setup.py	Fri Sep 05 09:18:26 2025 +0000
@@ -0,0 +1,59 @@
+from setuptools import setup, find_packages
+
+setup(
+    name='cobraxy',
+    version='0.1.0',
+    description='A collection of tools for metabolic flux analysis in Galaxy.',
+    long_description=open('README.md').read(),
+    long_description_content_type='text/markdown',
+    author='',  
+    author_email='',
+    url='https://github.com/CompBtBs/COBRAxy.git',
+    license='',
+    packages=find_packages(include=["utils", "utils.*"]),  
+    py_modules=[
+        'custom_data_generator',
+        'flux_simulation',
+        'flux_to_map',
+        'marea_cluster',
+        'marea',
+        'ras_generator',
+        'ras_to_bounds',
+        'rps_generator'
+    ],
+    include_package_data=True, 
+    install_requires=[
+        'cairosvg==2.7.1',
+        'cobra==0.29.0',
+        'joblib==1.4.2',
+        'lxml==5.2.2',
+        'matplotlib==3.7.3',
+        'numpy==1.24.4',
+        'pandas==2.0.3',
+        'pyvips==2.2.3',
+        'scikit-learn==1.3.2',
+        'scipy==1.11',
+        'seaborn==0.13.0',
+        'svglib==1.5.1',
+        'anndata==0.8.0',
+        'pydeseq2==0.5.1'
+    ],
+    entry_points={
+        'console_scripts': [
+            'custom_data_generator=custom_data_generator:main',
+            'flux_simulation=flux_simulation:main',
+            'flux_to_map=flux_to_map:main',
+            'marea_cluster=marea_cluster:main',
+            'marea=marea:main',
+            'ras_generator=ras_generator:main',
+            'ras_to_bounds=ras_to_bounds:main',
+            'rps_generator=rps_generator:main'
+        ],
+    },
+    classifiers=[
+        'Programming Language :: Python :: 3',
+        'License :: OSI Approved :: MIT License',
+        'Operating System :: OS Independent',
+    ],
+    python_requires='>=3.8.20,<3.12',
+)
--- a/COBRAxy/utils/CBS_backend.py	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/utils/CBS_backend.py	Fri Sep 05 09:18:26 2025 +0000
@@ -1,210 +1,210 @@
-from swiglpk import *
-import random
-import pandas as pd
-import numpy as np
-import cobra as cb
-
-# Initialize LP problem
-def initialize_lp_problem(S):
-
-    len_vector=len(S.keys())
-    values=list(S.values())
-    indexes=list(S.keys())
-    ia = intArray(len_vector+1); 
-    ja = intArray(len_vector+1);
-    ar = doubleArray(len_vector+1);
-    
-    i=0
-    ind_row=[indexes[i][0]+1 for i in range(0, len(values) )]
-    ind_col=[indexes[i][1]+1 for i in range(0, len(values) )]
-    for i in range(1, len(values) + 1): 
-        ia[i]=ind_row[i-1]
-        ja[i]=ind_col[i-1]
-        ar[i] = values[i-1]
-    
-    nrows=S.shape[0]
-    ncol=S.shape[1]
-    
-    return len_vector, values, indexes, ia, ja, ar, nrows, ncol
-    
-    
-
-# Solve LP problem from the structure of the metabolic model
-def create_and_solve_lp_problem(lb,ub,nrows, ncol, len_vector, ia, ja, ar, 
-                                obj_coefs,reactions,return_lp=False):
-    
-    
-    lp = glp_create_prob();
-    glp_set_prob_name(lp, "sample");
-    glp_set_obj_dir(lp, GLP_MAX);
-    glp_add_rows(lp, nrows);
-    eps = 1e-16
-    for i in range(nrows):
-        glp_set_row_name(lp, i+1, "constrain_"+str(i+1));
-        glp_set_row_bnds(lp, i+1, GLP_FX, 0.0, 0.0);
-    glp_add_cols(lp, ncol);
-    for i in range(ncol):
-        glp_set_col_name(lp, i+1, "flux_"+str(i+1));
-        glp_set_col_bnds(lp, i+1, GLP_DB,lb[i]-eps,ub[i]+eps);
-    glp_load_matrix(lp, len_vector, ia, ja, ar);
-    
-    try:
-        fluxes,Z=solve_lp_problem(lp,obj_coefs,reactions)
-        if return_lp:
-            return fluxes,Z,lp
-        else:
-            glp_delete_prob(lp);
-            return fluxes,Z
-    except Exception as e:
-        glp_delete_prob(lp)
-        raise Exception(e)
-    
-    
-# Solve LP problem from the structure of the metabolic model
-def solve_lp_problem(lp,obj_coefs,reactions):
-   
-    # Set the coefficients of the objective function
-    i=1
-    for ind_coef in obj_coefs:
-        glp_set_obj_coef(lp, i, ind_coef);
-        i+=1
-
-    # Initialize the parameters    
-    params=glp_smcp()
-    params.presolve=GLP_ON
-    params.msg_lev = GLP_MSG_ERR
-    params.tm_lim=4000
-    glp_init_smcp(params)
-
-    glp_term_out(GLP_OFF)
-
-    try:
-    
-        # Solve the problem
-        glp_scale_prob(lp,GLP_SF_AUTO)
-        
-        value=glp_simplex(lp, params) 
-
-        Z = glp_get_obj_val(lp);
-
-        if value == 0:
-            fluxes = []
-            for i in range(len(reactions)): fluxes.append(glp_get_col_prim(lp, i+1))
-            return fluxes,Z
-        else:
-            raise Exception("error in LP problem. Problem:",str(value)) 
-    except Exception as e:
-        # Re-enable terminal output for error reporting
-        glp_term_out(GLP_ON)
-        raise Exception(e)
-    finally:
-        # Re-enable terminal output after solving
-        glp_term_out(GLP_ON)
-    
-# Create LP structure
-def create_lp_structure(model):
-    
-    reactions=[el.id for el in model.reactions]
-    coefs_obj=[reaction.objective_coefficient for reaction in model.reactions]
-    
-    # Lower and upper bounds
-    lb=[reaction.lower_bound for reaction in model.reactions]
-    ub=[reaction.upper_bound for reaction in model.reactions]
-    
-    # Create S matrix
-    S=cb.util.create_stoichiometric_matrix(model,array_type="dok")
-    
-    return S,lb,ub,coefs_obj,reactions
-
-# CBS sampling interface
-def randomObjectiveFunctionSampling(model, nsample, coefficients_df, df_sample):
-
-    S,lb,ub,coefs_obj,reactions = create_lp_structure(model)
-    len_vector, values, indexes, ia, ja, ar, nrow, ncol = initialize_lp_problem(S)
-    
-    for i in range(nsample):
-      
-        coefs_obj=coefficients_df.iloc[:,i].values
-            
-        if coefs_obj[-1]==1: #minimize
-            coefs_obj= coefs_obj[0:-1] * -1
-        else:
-            coefs_obj=coefs_obj[0:-1]
-
-        fluxes,Z = create_and_solve_lp_problem(lb,ub, nrow, ncol, len_vector, 
-                                                        ia, ja, ar, coefs_obj,reactions,return_lp=False)
-        df_sample.loc[i] = fluxes 
-    pass
-
-def randomObjectiveFunctionSampling_cobrapy(model, nsample, coefficients_df, df_sample):
-    
-    for i in range(nsample):
-
-        dict_coeff={}
-        if(coefficients_df.iloc[-1][i]==1):
-            type_problem = -1 #minimize
-        else:
-            type_problem = 1
-            
-        for rxn in [reaction.id for reaction in model.reactions]:
-            dict_coeff[model.reactions.get_by_id(rxn)] = coefficients_df.loc[rxn][i] * type_problem
-            
-        model.objective = dict_coeff
-        solution =  model.optimize().fluxes
-        for rxn, flux in solution.items():
-            df_sample.loc[i][rxn] = flux
-
-    pass
-
-# Create random coefficients for CBS
-def randomObjectiveFunction(model, n_samples, df_fva, seed=0):
-    
-
-        #reactions = model.reactions
-        reactions = [reaction.id for reaction in model.reactions]
-        cont=seed
-        list_ex=reactions.copy()
-        list_ex.append("type_of_problem")
-        coefficients_df = pd.DataFrame(index=list_ex,columns=[str(i) for i in range(n_samples)])
-
-        for i in range(0, n_samples):
-           
-            cont=cont+1
-            random.seed(cont)
-            
-            # Genera un numero casuale tra 0 e 1
-            threshold = random.random() #coefficiente tra 0 e 1
-            
-            for reaction in reactions:
-
-                cont=cont+1
-                random.seed(cont)
-                        
-                val=random.random()   
-                
-                if val>threshold:
-
-                    cont=cont+1
-                    random.seed(cont)                           
-                   
-                    c=2*random.random()-1 #coefficiente tra -1 e 1
-                    
-                    val_max = np.max([abs(df_fva.loc[reaction,"minimum"]), abs(df_fva.loc[reaction,"maximum"])])
-
-                    if val_max!=0: #solo se la fva è diversa da zero
-                        coefficients_df.loc[reaction,str(i)] = c/val_max #divido per la fva
-                    else:
-                        coefficients_df.loc[reaction,str(i)] = 0
-
-                else:
-                    coefficients_df.loc[reaction,str(i)] = 0
-
-            cont=cont+1
-            random.seed(cont)
-                    
-            if random.random()<0.5:
-                coefficients_df.loc["type_of_problem",str(i)] = 0 #maximize
-            else:
-                coefficients_df.loc["type_of_problem",str(i)] = 1 #minimize
-                            
-        return coefficients_df
+from swiglpk import *
+import random
+import pandas as pd
+import numpy as np
+import cobra as cb
+
+# Initialize LP problem
+def initialize_lp_problem(S):
+
+    len_vector=len(S.keys())
+    values=list(S.values())
+    indexes=list(S.keys())
+    ia = intArray(len_vector+1); 
+    ja = intArray(len_vector+1);
+    ar = doubleArray(len_vector+1);
+    
+    i=0
+    ind_row=[indexes[i][0]+1 for i in range(0, len(values) )]
+    ind_col=[indexes[i][1]+1 for i in range(0, len(values) )]
+    for i in range(1, len(values) + 1): 
+        ia[i]=ind_row[i-1]
+        ja[i]=ind_col[i-1]
+        ar[i] = values[i-1]
+    
+    nrows=S.shape[0]
+    ncol=S.shape[1]
+    
+    return len_vector, values, indexes, ia, ja, ar, nrows, ncol
+    
+    
+
+# Solve LP problem from the structure of the metabolic model
+def create_and_solve_lp_problem(lb,ub,nrows, ncol, len_vector, ia, ja, ar, 
+                                obj_coefs,reactions,return_lp=False):
+    
+    
+    lp = glp_create_prob();
+    glp_set_prob_name(lp, "sample");
+    glp_set_obj_dir(lp, GLP_MAX);
+    glp_add_rows(lp, nrows);
+    eps = 1e-16
+    for i in range(nrows):
+        glp_set_row_name(lp, i+1, "constrain_"+str(i+1));
+        glp_set_row_bnds(lp, i+1, GLP_FX, 0.0, 0.0);
+    glp_add_cols(lp, ncol);
+    for i in range(ncol):
+        glp_set_col_name(lp, i+1, "flux_"+str(i+1));
+        glp_set_col_bnds(lp, i+1, GLP_DB,lb[i]-eps,ub[i]+eps);
+    glp_load_matrix(lp, len_vector, ia, ja, ar);
+    
+    try:
+        fluxes,Z=solve_lp_problem(lp,obj_coefs,reactions)
+        if return_lp:
+            return fluxes,Z,lp
+        else:
+            glp_delete_prob(lp);
+            return fluxes,Z
+    except Exception as e:
+        glp_delete_prob(lp)
+        raise Exception(e)
+    
+    
+# Solve LP problem from the structure of the metabolic model
+def solve_lp_problem(lp,obj_coefs,reactions):
+   
+    # Set the coefficients of the objective function
+    i=1
+    for ind_coef in obj_coefs:
+        glp_set_obj_coef(lp, i, ind_coef);
+        i+=1
+
+    # Initialize the parameters    
+    params=glp_smcp()
+    params.presolve=GLP_ON
+    params.msg_lev = GLP_MSG_ERR
+    params.tm_lim=4000
+    glp_init_smcp(params)
+
+    glp_term_out(GLP_OFF)
+
+    try:
+    
+        # Solve the problem
+        glp_scale_prob(lp,GLP_SF_AUTO)
+        
+        value=glp_simplex(lp, params) 
+
+        Z = glp_get_obj_val(lp);
+
+        if value == 0:
+            fluxes = []
+            for i in range(len(reactions)): fluxes.append(glp_get_col_prim(lp, i+1))
+            return fluxes,Z
+        else:
+            raise Exception("error in LP problem. Problem:",str(value)) 
+    except Exception as e:
+        # Re-enable terminal output for error reporting
+        glp_term_out(GLP_ON)
+        raise Exception(e)
+    finally:
+        # Re-enable terminal output after solving
+        glp_term_out(GLP_ON)
+    
+# Create LP structure
+def create_lp_structure(model):
+    
+    reactions=[el.id for el in model.reactions]
+    coefs_obj=[reaction.objective_coefficient for reaction in model.reactions]
+    
+    # Lower and upper bounds
+    lb=[reaction.lower_bound for reaction in model.reactions]
+    ub=[reaction.upper_bound for reaction in model.reactions]
+    
+    # Create S matrix
+    S=cb.util.create_stoichiometric_matrix(model,array_type="dok")
+    
+    return S,lb,ub,coefs_obj,reactions
+
+# CBS sampling interface
+def randomObjectiveFunctionSampling(model, nsample, coefficients_df, df_sample):
+
+    S,lb,ub,coefs_obj,reactions = create_lp_structure(model)
+    len_vector, values, indexes, ia, ja, ar, nrow, ncol = initialize_lp_problem(S)
+    
+    for i in range(nsample):
+      
+        coefs_obj=coefficients_df.iloc[:,i].values
+            
+        if coefs_obj[-1]==1: #minimize
+            coefs_obj= coefs_obj[0:-1] * -1
+        else:
+            coefs_obj=coefs_obj[0:-1]
+
+        fluxes,Z = create_and_solve_lp_problem(lb,ub, nrow, ncol, len_vector, 
+                                                        ia, ja, ar, coefs_obj,reactions,return_lp=False)
+        df_sample.loc[i] = fluxes 
+    pass
+
+def randomObjectiveFunctionSampling_cobrapy(model, nsample, coefficients_df, df_sample):
+    
+    for i in range(nsample):
+
+        dict_coeff={}
+        if(coefficients_df.iloc[-1][i]==1):
+            type_problem = -1 #minimize
+        else:
+            type_problem = 1
+            
+        for rxn in [reaction.id for reaction in model.reactions]:
+            dict_coeff[model.reactions.get_by_id(rxn)] = coefficients_df.loc[rxn][i] * type_problem
+            
+        model.objective = dict_coeff
+        solution =  model.optimize().fluxes
+        for rxn, flux in solution.items():
+            df_sample.loc[i][rxn] = flux
+
+    pass
+
+# Create random coefficients for CBS
+def randomObjectiveFunction(model, n_samples, df_fva, seed=0):
+    
+
+        #reactions = model.reactions
+        reactions = [reaction.id for reaction in model.reactions]
+        cont=seed
+        list_ex=reactions.copy()
+        list_ex.append("type_of_problem")
+        coefficients_df = pd.DataFrame(index=list_ex,columns=[str(i) for i in range(n_samples)])
+
+        for i in range(0, n_samples):
+           
+            cont=cont+1
+            random.seed(cont)
+            
+            # Genera un numero casuale tra 0 e 1
+            threshold = random.random() #coefficiente tra 0 e 1
+            
+            for reaction in reactions:
+
+                cont=cont+1
+                random.seed(cont)
+                        
+                val=random.random()   
+                
+                if val>threshold:
+
+                    cont=cont+1
+                    random.seed(cont)                           
+                   
+                    c=2*random.random()-1 #coefficiente tra -1 e 1
+                    
+                    val_max = np.max([abs(df_fva.loc[reaction,"minimum"]), abs(df_fva.loc[reaction,"maximum"])])
+
+                    if val_max!=0: #solo se la fva è diversa da zero
+                        coefficients_df.loc[reaction,str(i)] = c/val_max #divido per la fva
+                    else:
+                        coefficients_df.loc[reaction,str(i)] = 0
+
+                else:
+                    coefficients_df.loc[reaction,str(i)] = 0
+
+            cont=cont+1
+            random.seed(cont)
+                    
+            if random.random()<0.5:
+                coefficients_df.loc["type_of_problem",str(i)] = 0 #maximize
+            else:
+                coefficients_df.loc["type_of_problem",str(i)] = 1 #minimize
+                            
+        return coefficients_df
--- a/COBRAxy/utils/general_utils.py	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/utils/general_utils.py	Fri Sep 05 09:18:26 2025 +0000
@@ -1,701 +1,701 @@
-import math
-import re
-import sys
-import csv
-import pickle
-import lxml.etree as ET
-
-from enum import Enum
-from itertools import count
-from typing import Any, Callable, Dict, Generic, List, Literal, Optional, TypeVar, Union
-
-import pandas as pd
-import cobra
-
-import zipfile
-import gzip
-import bz2
-from io import StringIO
-
-class ValueErr(Exception):
-    def __init__(self, param_name, expected, actual):
-        super().__init__(f"Invalid value for {param_name}: expected {expected}, got {actual}")
-
-class PathErr(Exception):
-    def __init__(self, path, message):
-        super().__init__(f"Path error for '{path}': {message}")
-
-class FileFormat(Enum):
-    """
-    Encodes possible file extensions to conditionally save data in a different format.
-    """
-    DAT    = ("dat",) # this is how galaxy treats all your files!
-    CSV    = ("csv",) # this is how most editable input data is written
-    TSV    = ("tsv",) # this is how most editable input data is ACTUALLY written TODO:more support pls!!
-    SVG    = ("svg",) # this is how most metabolic maps are written
-    PNG    = ("png",) # this is a common output format for images (such as metabolic maps)
-    PDF    = ("pdf",) # this is also a common output format for images, as it's required in publications.
-    
-    # Updated to include compressed variants
-    XML    = ("xml", "xml.gz", "xml.zip", "xml.bz2") # SBML files are XML files, sometimes compressed
-    JSON   = ("json", "json.gz", "json.zip", "json.bz2") # COBRA models can be stored as JSON files, sometimes compressed
-    MAT    = ("mat", "mat.gz", "mat.zip", "mat.bz2") # COBRA models can be stored as MAT files, sometimes compressed
-    YML    = ("yml", "yml.gz", "yml.zip", "yml.bz2") # COBRA models can be stored as YML files, sometimes compressed
-
-    TXT    = ("txt",) # this is how most output data is written
-    PICKLE = ("pickle", "pk", "p") # this is how all runtime data structures are saved
-
-    def __init__(self, *extensions):
-        self.extensions = extensions
-        # Store original extension when set via fromExt
-        self._original_extension = None
-
-    @classmethod
-    def fromExt(cls, ext: str) -> "FileFormat":
-        """
-        Converts a file extension string to a FileFormat instance.
-        Args:
-            ext : The file extension as a string.
-        Returns:
-            FileFormat: The FileFormat instance corresponding to the file extension.
-        """
-        variantName = ext.upper()
-        if variantName in FileFormat.__members__: 
-            instance = FileFormat[variantName]
-            instance._original_extension = ext
-            return instance
-        
-        variantName = ext.lower()
-        for member in cls:
-            if variantName in member.value: 
-                # Create a copy-like behavior by storing the original extension
-                member._original_extension = ext
-                return member
-        
-        raise ValueErr("ext", "a valid FileFormat file extension", ext)
-
-    def __str__(self) -> str:
-        """
-        (Private) converts to str representation. Good practice for usage with argparse.
-        Returns:
-            str : the string representation of the file extension.
-        """
-        # If we have an original extension stored (for compressed files only), use it
-        if hasattr(self, '_original_extension') and self._original_extension:
-            return self._original_extension
-        
-        # For XML, JSON, MAT and YML without original extension, use the base extension
-        if self == FileFormat.XML:
-            return "xml"
-        elif self == FileFormat.JSON:
-            return "json"
-        elif self == FileFormat.MAT:
-            return "mat"
-        elif self == FileFormat.YML:
-            return "yml"
-        
-        return self.value[-1]
-
-class FilePath():
-    """
-    Represents a file path. View this as an attempt to standardize file-related operations by expecting
-    values of this type in any process requesting a file path.
-    """
-    def __init__(self, filePath: str, ext: FileFormat, *, prefix="") -> None:
-        """
-        (Private) Initializes an instance of FilePath.
-        Args:
-            path : the end of the path, containing the file name.
-            ext : the file's extension.
-            prefix : anything before path, if the last '/' isn't there it's added by the code.
-        Returns:
-            None : practically, a FilePath instance.
-        """
-        self.ext = ext
-        self.filePath = filePath
-
-        if prefix and prefix[-1] != '/': 
-            prefix += '/'
-        self.prefix = prefix
-    
-    @classmethod
-    def fromStrPath(cls, path: str) -> "FilePath":
-        """
-        Factory method to parse a string from which to obtain, if possible, a valid FilePath instance.
-        It detects double extensions such as .json.gz and .xml.bz2, which are common in COBRA models.
-        These double extensions are not supported for other file types such as .csv.
-        Args:
-            path : the string containing the path
-        Raises:
-            PathErr : if the provided string doesn't represent a valid path.
-        Returns:
-            FilePath : the constructed instance.
-        """
-        result = re.search(r"^(?P<prefix>.*\/)?(?P<name>.*)\.(?P<ext>[^.]*)$", path)
-        if not result or not result["name"] or not result["ext"]:
-            raise PathErr(path, "cannot recognize folder structure or extension in path")
-
-        prefix = result["prefix"] if result["prefix"] else ""
-        name, ext = result["name"], result["ext"]
-
-        # Check for double extensions (json.gz, xml.zip, etc.)
-        parts = path.split(".")
-        if len(parts) >= 3:  
-            penultimate = parts[-2]
-            last = parts[-1]
-            double_ext = f"{penultimate}.{last}"
-            
-            # Try the double extension first
-            try:
-                ext_format = FileFormat.fromExt(double_ext)
-                name = ".".join(parts[:-2])
-                # Extract prefix if it exists
-                if '/' in name:
-                    prefix = name[:name.rfind('/') + 1]
-                    name = name[name.rfind('/') + 1:]
-                return cls(name, ext_format, prefix=prefix)
-            except ValueErr:
-                # If double extension doesn't work, fall back to single extension
-                pass
-
-        # Single extension fallback (original logic)
-        try:
-            ext_format = FileFormat.fromExt(ext)
-            return cls(name, ext_format, prefix=prefix)
-        except ValueErr:
-            raise PathErr(path, f"unsupported file extension: {ext}")
-
-    def show(self) -> str:
-        """
-        Shows the path as a string.
-        Returns:
-            str : the path shown as a string.
-        """
-        return f"{self.prefix}{self.filePath}.{self.ext}"
-    
-    def __str__(self) -> str: 
-        return self.show()
-
-# ERRORS
-def terminate(msg :str) -> None:
-    """
-    Terminate the execution of the script with an error message.
-    
-    Args:
-        msg (str): The error message to be displayed.
-    
-    Returns:
-        None
-    """
-    sys.exit(f"Execution aborted: {msg}\n")
-
-def logWarning(msg :str, loggerPath :str) -> None:
-    """
-    Log a warning message to an output log file and print it to the console. The final period and a
-    newline is added by the function.
-
-    Args:
-        s (str): The warning message to be logged and printed.
-        loggerPath : The file path of the output log file. Given as a string, parsed to a FilePath and
-        immediately read back (beware relative expensive operation, log with caution).
-
-    Returns:
-        None
-    """
-    # building the path and then reading it immediately seems useless, but it's actually a way of
-    # validating that reduces repetition on the caller's side. Besides, logging a message by writing
-    # to a file is supposed to be computationally expensive anyway, so this is also a good deterrent from
-    # mindlessly logging whenever something comes up, log at the very end and tell the user everything
-    # that went wrong. If you don't like it: implement a persistent runtime buffer that gets dumped to
-    # the file only at the end of the program's execution.
-    with open(FilePath.fromStrPath(loggerPath).show(), 'a') as log: log.write(f"{msg}.\n")
-
-class CustomErr(Exception):
-    """
-    Custom error class to handle exceptions in a structured way, with a unique identifier and a message.
-    """
-    __idGenerator = count()
-    errName = "Custom Error"
-    def __init__(self, msg :str, details = "", explicitErrCode = -1) -> None:
-        """
-        (Private) Initializes an instance of CustomErr.
-
-        Args:
-            msg (str): Error message to be displayed.
-            details (str): Informs the user more about the error encountered. Defaults to "".
-            explicitErrCode (int): Explicit error code to be used. Defaults to -1.
-        
-        Returns:
-            None : practically, a CustomErr instance.
-        """
-        self.msg     = msg
-        self.details = details
-
-        self.id = max(explicitErrCode, next(CustomErr.__idGenerator))
-
-    def throw(self, loggerPath = "") -> None:
-        """
-        Raises the current CustomErr instance, logging a warning message before doing so.
-
-        Raises:
-            self: The current CustomErr instance.
-        
-        Returns:
-            None
-        """
-        if loggerPath: logWarning(str(self), loggerPath)
-        raise self
-
-    def abort(self) -> None:
-        """
-        Aborts the execution of the script.
-        
-        Returns:
-            None
-        """
-        terminate(str(self))
-
-    def __str__(self) -> str:
-        """
-        (Private) Returns a string representing the current CustomErr instance.
-
-        Returns:
-            str: A string representing the current CustomErr instance.
-        """
-        return f"{CustomErr.errName} #{self.id}: {self.msg}, {self.details}."
-
-class ArgsErr(CustomErr):
-    """
-    CustomErr subclass for UI arguments errors.
-    """
-    errName = "Args Error"
-    def __init__(self, argName :str, expected :Any, actual :Any, msg = "no further details provided") -> None:
-        super().__init__(f"argument \"{argName}\" expected {expected} but got {actual}", msg)
-
-class DataErr(CustomErr):
-    """
-    CustomErr subclass for data formatting errors.
-    """
-    errName = "Data Format Error"
-    def __init__(self, fileName :str, msg = "no further details provided") -> None:
-        super().__init__(f"file \"{fileName}\" contains malformed data", msg)
-
-class PathErr(CustomErr):
-    """
-    CustomErr subclass for filepath formatting errors.
-    """
-    errName = "Path Error"
-    def __init__(self, path :FilePath, msg = "no further details provided") -> None:
-        super().__init__(f"path \"{path}\" is invalid", msg)
-
-class ValueErr(CustomErr):
-    """
-    CustomErr subclass for any value error.
-    """
-    errName = "Value Error"
-    def __init__(self, valueName: str, expected :Any, actual :Any, msg = "no further details provided") -> None:
-        super().__init__("value " + f"\"{valueName}\" " * bool(valueName) + f"was supposed to be {expected}, but got {actual} instead", msg)
-
-# RESULT
-T = TypeVar('T')
-E = TypeVar('E', bound = CustomErr) # should bind to Result.ResultErr but python happened!
-class Result(Generic[T, E]):
-    class ResultErr(CustomErr):
-        """
-        CustomErr subclass for all Result errors.
-        """
-        errName = "Result Error"
-        def __init__(self, msg = "no further details provided") -> None:
-            super().__init__(msg)
-    """
-    Class to handle the result of an operation, with a value and a boolean flag to indicate
-    whether the operation was successful or not.
-    """
-    def __init__(self, value :Union[T, E], isOk :bool) -> None:
-        """
-        (Private) Initializes an instance of Result.
-
-        Args:
-            value (Union[T, E]): The value to be stored in the Result instance.
-            isOk (bool): A boolean flag to indicate whether the operation was successful or not.
-        
-            Returns:
-                None : practically, a Result instance.
-        """
-        self.isOk  = isOk
-        self.isErr = not isOk
-        self.value = value
-
-    @classmethod
-    def Ok(cls,  value :T) -> "Result":
-        """
-        Constructs a new Result instance with a successful operation.
-
-        Args:
-            value (T): The value to be stored in the Result instance, set as successful.
-
-        Returns:
-            Result: A new Result instance with a successful operation.
-        """
-        return Result(value, isOk = True)
-    
-    @classmethod
-    def Err(cls, value :E) -> "Result": 
-        """
-        Constructs a new Result instance with a failed operation.
-
-        Args:
-            value (E): The value to be stored in the Result instance, set as failed.
-
-        Returns:
-            Result: A new Result instance with a failed operation.
-        """
-        return Result(value, isOk = False)
-
-    def unwrap(self) -> T:
-        """
-        Unwraps the value of the Result instance, if the operation was successful.
-
-        Raises:
-            ResultErr: If the operation was not successful.
-
-        Returns:
-            T: The value of the Result instance, if the operation was successful.
-        """
-        if self.isOk: return self.value
-        raise Result.ResultErr(f"Unwrapped Result.Err : {self.value}")
-
-    def unwrapOr(self, default :T) -> T:
-        """
-        Unwraps the value of the Result instance, if the operation was successful, otherwise
-        it returns a default value.
-
-        Args:
-            default (T): The default value to be returned if the operation was not successful.
-
-        Returns:
-            T: The value of the Result instance, if the operation was successful,
-            otherwise the default value.
-        """
-        return self.value if self.isOk else default
-    
-    def expect(self, err :"Result.ResultErr") -> T:
-        """
-        Expects that the value of the Result instance is successful, otherwise it raises an error.
-
-        Args:
-            err (Exception): The error to be raised if the operation was not successful.
-
-        Raises:
-            err: The error raised if the operation was not successful.
-
-        Returns:
-            T: The value of the Result instance, if the operation was successful.
-        """
-        if self.isOk: return self.value
-        raise err
-
-    U = TypeVar("U")
-    def map(self, mapper: Callable[[T], U]) -> "Result[U, E]":
-        """
-        Maps the value of the current Result to whatever is returned by the mapper function.
-        If the Result contained an unsuccessful operation to begin with it remains unchanged
-        (a reference to the current instance is returned).
-        If the mapper function panics the returned result instance will be of the error kind.
-
-        Args:
-            mapper (Callable[[T], U]): The mapper operation to be applied to the Result value.
-
-        Returns:
-            Result[U, E]: The result of the mapper operation applied to the Result value.
-        """
-        if self.isErr: return self
-        try: return Result.Ok(mapper(self.value))
-        except Exception as e: return Result.Err(e)
-    
-    D = TypeVar("D", bound = "Result.ResultErr")
-    def mapErr(self, mapper :Callable[[E], D]) -> "Result[T, D]":
-        """
-        Maps the error of the current Result to whatever is returned by the mapper function.
-        If the Result contained a successful operation it remains unchanged
-        (a reference to the current instance is returned).
-        If the mapper function panics this method does as well.
-
-        Args:
-            mapper (Callable[[E], D]): The mapper operation to be applied to the Result error.
-
-        Returns:
-            Result[U, E]: The result of the mapper operation applied to the Result error.
-        """
-        if self.isOk: return self
-        return Result.Err(mapper(self.value))
-
-    def __str__(self):
-        return f"Result::{'Ok' if self.isOk else 'Err'}({self.value})"
-
-# FILES
-def read_dataset(path :FilePath, datasetName = "Dataset (not actual file name!)") -> pd.DataFrame:
-    """
-    Reads a .csv or .tsv file and returns it as a Pandas DataFrame.
-
-    Args:
-        path : the path to the dataset file.
-        datasetName : the name of the dataset.
-
-    Raises:
-        DataErr: If anything goes wrong when trying to open the file, if pandas thinks the dataset is empty or if
-        it has less than 2 columns.
-    
-    Returns:
-        pandas.DataFrame: The dataset loaded as a Pandas DataFrame.
-    """
-    # I advise against the use of this function. This is an attempt at standardizing bad legacy code rather than
-    # removing / replacing it to avoid introducing as many bugs as possible in the tools still relying on this code.
-    # First off, this is not the best way to distinguish between .csv and .tsv files and Galaxy itself makes it really
-    # hard to implement anything better. Also, this function's name advertizes it as a dataset-specific operation and
-    # contains dubious responsibility (how many columns..) while being a file-opening function instead. My suggestion is
-    # TODO: stop using dataframes ever at all in anything and find a way to have tight control over file extensions.
-    try: dataset = pd.read_csv(path.show(), sep = '\t', header = None, engine = "python")
-    except:
-        try: dataset = pd.read_csv(path.show(), sep = ',', header = 0, engine = "python")
-        except Exception as err: raise DataErr(datasetName, f"encountered empty or wrongly formatted data: {err}")
-    
-    if len(dataset.columns) < 2: raise DataErr(datasetName, "a dataset is always meant to have at least 2 columns")
-    return dataset
-
-def readPickle(path :FilePath) -> Any:
-    """
-    Reads the contents of a .pickle file, which needs to exist at the given path.
-
-    Args:
-        path : the path to the .pickle file.
-    
-    Returns:
-        Any : the data inside a pickle file, could be anything.
-    """
-    with open(path.show(), "rb") as fd: return pickle.load(fd)
-
-def writePickle(path :FilePath, data :Any) -> None:
-    """
-    Saves any data in a .pickle file, created at the given path.
-
-    Args:
-        path : the path to the .pickle file.
-        data : the data to be written to the file.
-    
-    Returns:
-        None
-    """
-    with open(path.show(), "wb") as fd: pickle.dump(data, fd)
-
-def readCsv(path :FilePath, delimiter = ',', *, skipHeader = True) -> List[List[str]]:
-    """
-    Reads the contents of a .csv file, which needs to exist at the given path.
-
-    Args:
-        path : the path to the .csv file.
-        delimiter : allows other subformats such as .tsv to be opened by the same method (\\t delimiter).
-        skipHeader : whether the first row of the file is a header and should be skipped.
-    
-    Returns:
-        List[List[str]] : list of rows from the file, each parsed as a list of strings originally separated by commas.
-    """
-    with open(path.show(), "r", newline = "") as fd: return list(csv.reader(fd, delimiter = delimiter))[skipHeader:]
-
-def readSvg(path :FilePath, customErr :Optional[Exception] = None) -> ET.ElementTree:
-    """
-    Reads the contents of a .svg file, which needs to exist at the given path.
-
-    Args:
-        path : the path to the .svg file.
-    
-    Raises:
-        DataErr : if the map is malformed.
-    
-    Returns:
-        Any : the data inside a svg file, could be anything.
-    """
-    try: return ET.parse(path.show())
-    except (ET.XMLSyntaxError, ET.XMLSchemaParseError) as err:
-        raise customErr if customErr else err
-
-def writeSvg(path :FilePath, data:ET.ElementTree) -> None:
-    """
-    Saves svg data opened with lxml.etree in a .svg file, created at the given path.
-
-    Args:
-        path : the path to the .svg file.
-        data : the data to be written to the file.
-    
-    Returns:
-        None
-    """
-    with open(path.show(), "wb") as fd: fd.write(ET.tostring(data))
-
-# UI ARGUMENTS
-class Bool:
-    def __init__(self, argName :str) -> None:
-        self.argName = argName
-
-    def __call__(self, s :str) -> bool: return self.check(s)
-
-    def check(self, s :str) -> bool:
-        s = s.lower()
-        if s == "true" : return True
-        if s == "false": return False
-        raise ArgsErr(self.argName, "boolean string (true or false, not case sensitive)", f"\"{s}\"")
-
-class Float:
-    def __init__(self, argName = "Dataset values, not an argument") -> None:
-        self.argName = argName
-    
-    def __call__(self, s :str) -> float: return self.check(s)
-
-    def check(self, s :str) -> float:
-        try: return float(s)
-        except ValueError:
-            s = s.lower()
-            if s == "nan" or s == "none": return math.nan
-            raise ArgsErr(self.argName, "numeric string or \"None\" or \"NaN\" (not case sensitive)", f"\"{s}\"")
-
-# MODELS
-OldRule = List[Union[str, "OldRule"]]
-class Model(Enum):
-    """
-    Represents a metabolic model, either custom or locally supported. Custom models don't point
-    to valid file paths.
-    """
-
-    Recon   = "Recon"
-    ENGRO2  = "ENGRO2"
-    ENGRO2_no_legend = "ENGRO2_no_legend"
-    HMRcore = "HMRcore"
-    HMRcore_no_legend = "HMRcore_no_legend"
-    Custom  = "Custom" # Exists as a valid variant in the UI, but doesn't point to valid file paths.
-
-    def __raiseMissingPathErr(self, path :Optional[FilePath]) -> None:
-        if not path: raise PathErr("<<MISSING>>", "it's necessary to provide a custom path when retrieving files from a custom model")
-
-    def getRules(self, toolDir :str, customPath :Optional[FilePath] = None) -> Dict[str, Dict[str, OldRule]]:
-        """
-        Open "rules" file for this model.
-
-        Returns:
-            Dict[str, Dict[str, OldRule]] : the rules for this model.
-        """
-        path = customPath if self is Model.Custom else FilePath(f"{self.name}_rules", FileFormat.PICKLE, prefix = f"{toolDir}/local/pickle files/")
-        self.__raiseMissingPathErr(path)
-        return readPickle(path)
-    
-    def getTranslator(self, toolDir :str, customPath :Optional[FilePath] = None) -> Dict[str, Dict[str, str]]:
-        """
-        Open "gene translator (old: gene_in_rule)" file for this model.
-
-        Returns:
-            Dict[str, Dict[str, str]] : the translator dict for this model.
-        """
-        path = customPath if self is Model.Custom else FilePath(f"{self.name}_genes", FileFormat.PICKLE, prefix = f"{toolDir}/local/pickle files/")
-        self.__raiseMissingPathErr(path)
-        return readPickle(path)
-    
-    def getMap(self, toolDir = ".", customPath :Optional[FilePath] = None) -> ET.ElementTree:
-        path = customPath if self is Model.Custom else FilePath(f"{self.name}_map", FileFormat.SVG, prefix = f"{toolDir}/local/svg metabolic maps/")
-        self.__raiseMissingPathErr(path)
-        return readSvg(path, customErr = DataErr(path, f"custom map in wrong format"))
-    
-    def getCOBRAmodel(self, toolDir = ".", customPath :Optional[FilePath] = None, customExtension :Optional[FilePath]=None)->cobra.Model:
-        if(self is Model.Custom):
-            return self.load_custom_model(customPath, customExtension)
-        else:
-            return cobra.io.read_sbml_model(FilePath(f"{self.name}", FileFormat.XML, prefix = f"{toolDir}/local/models/").show())
-        
-    def load_custom_model(self, file_path :FilePath, ext :Optional[FileFormat] = None) -> cobra.Model:
-        ext = ext if ext else file_path.ext
-        try:
-            if str(ext) in FileFormat.XML.value:
-                return cobra.io.read_sbml_model(file_path.show())
-            
-            if str(ext) in FileFormat.JSON.value:
-                # Compressed files are not automatically handled by cobra
-                if(ext == "json"):
-                    return cobra.io.load_json_model(file_path.show())
-                else: 
-                    return self.extract_model(file_path, ext, "json")
-
-            if str(ext) in FileFormat.MAT.value:
-                # Compressed files are not automatically handled by cobra
-                if(ext == "mat"):
-                    return cobra.io.load_matlab_model(file_path.show())
-                else: 
-                    return self.extract_model(file_path, ext, "mat")
-
-            if str(ext) in FileFormat.YML.value:
-                # Compressed files are not automatically handled by cobra
-                if(ext == "yml"):
-                    return cobra.io.load_yaml_model(file_path.show())
-                else: 
-                    return self.extract_model(file_path, ext, "yml")
-
-        except Exception as e: raise DataErr(file_path, e.__str__())
-        raise DataErr(file_path,
-            f"Fomat \"{file_path.ext}\" is not recognized, only JSON, XML, MAT and YAML (.yml) files are supported.")
-    
-
-    def extract_model(file_path:FilePath, ext :FileFormat, model_encoding:Literal["json", "mat", "yml"]) -> cobra.Model:
-        """
-        Extract JSON, MAT and YAML COBRA model from a compressed file (zip, gz, bz2).
-        
-        Args:
-            file_path: File path of the model
-            ext: File extensions of class FileFormat (should be .zip, .gz or .bz2)
-            
-        Returns:
-            cobra.Model: COBRApy model 
-            
-        Raises:
-            Exception: Extraction errors
-        """
-        ext_str = str(ext)
-
-        try:
-            if '.zip' in ext_str:
-                with zipfile.ZipFile(file_path.show(), 'r') as zip_ref:
-                    with zip_ref.open(zip_ref.namelist()[0]) as json_file:
-                        content = json_file.read().decode('utf-8')
-                        if model_encoding == "json":
-                            return cobra.io.load_json_model(StringIO(content))
-                        elif model_encoding == "mat":
-                            return cobra.io.load_matlab_model(StringIO(content))
-                        elif model_encoding == "yml":
-                            return cobra.io.load_yaml_model(StringIO(content))
-                        else:
-                            raise ValueError(f"Unsupported model encoding: {model_encoding}. Supported: json, mat, yml")
-            elif '.gz' in ext_str:
-                with gzip.open(file_path.show(), 'rt', encoding='utf-8') as gz_ref:
-                    if model_encoding == "json":
-                        return cobra.io.load_json_model(gz_ref)
-                    elif model_encoding == "mat":
-                        return cobra.io.load_matlab_model(gz_ref)
-                    elif model_encoding == "yml":
-                        return cobra.io.load_yaml_model(gz_ref)
-                    else:
-                        raise ValueError(f"Unsupported model encoding: {model_encoding}. Supported: json, mat, yml")
-            elif '.bz2' in ext_str:
-                with bz2.open(file_path.show(), 'rt', encoding='utf-8') as bz2_ref:
-                    if model_encoding == "json":
-                        return cobra.io.load_json_model(bz2_ref)
-                    elif model_encoding == "mat":
-                        return cobra.io.load_matlab_model(bz2_ref)
-                    elif model_encoding == "yml":
-                        return cobra.io.load_yaml_model(bz2_ref)
-                    else:
-                        raise ValueError(f"Unsupported model encoding: {model_encoding}. Supported: json, mat, yml")
-            else:
-                raise ValueError(f"Compression format not supported: {ext_str}. Supported: .zip, .gz and .bz2")
-            
-        except Exception as e:
-            raise Exception(f"Error during model extraction: {str(e)}")
-        
-
-
+import math
+import re
+import sys
+import csv
+import pickle
+import lxml.etree as ET
+
+from enum import Enum
+from itertools import count
+from typing import Any, Callable, Dict, Generic, List, Literal, Optional, TypeVar, Union
+
+import pandas as pd
+import cobra
+
+import zipfile
+import gzip
+import bz2
+from io import StringIO
+
+class ValueErr(Exception):
+    def __init__(self, param_name, expected, actual):
+        super().__init__(f"Invalid value for {param_name}: expected {expected}, got {actual}")
+
+class PathErr(Exception):
+    def __init__(self, path, message):
+        super().__init__(f"Path error for '{path}': {message}")
+
+class FileFormat(Enum):
+    """
+    Encodes possible file extensions to conditionally save data in a different format.
+    """
+    DAT    = ("dat",) # this is how galaxy treats all your files!
+    CSV    = ("csv",) # this is how most editable input data is written
+    TSV    = ("tsv",) # this is how most editable input data is ACTUALLY written TODO:more support pls!!
+    SVG    = ("svg",) # this is how most metabolic maps are written
+    PNG    = ("png",) # this is a common output format for images (such as metabolic maps)
+    PDF    = ("pdf",) # this is also a common output format for images, as it's required in publications.
+    
+    # Updated to include compressed variants
+    XML    = ("xml", "xml.gz", "xml.zip", "xml.bz2") # SBML files are XML files, sometimes compressed
+    JSON   = ("json", "json.gz", "json.zip", "json.bz2") # COBRA models can be stored as JSON files, sometimes compressed
+    MAT    = ("mat", "mat.gz", "mat.zip", "mat.bz2") # COBRA models can be stored as MAT files, sometimes compressed
+    YML    = ("yml", "yml.gz", "yml.zip", "yml.bz2") # COBRA models can be stored as YML files, sometimes compressed
+
+    TXT    = ("txt",) # this is how most output data is written
+    PICKLE = ("pickle", "pk", "p") # this is how all runtime data structures are saved
+
+    def __init__(self, *extensions):
+        self.extensions = extensions
+        # Store original extension when set via fromExt
+        self._original_extension = None
+
+    @classmethod
+    def fromExt(cls, ext: str) -> "FileFormat":
+        """
+        Converts a file extension string to a FileFormat instance.
+        Args:
+            ext : The file extension as a string.
+        Returns:
+            FileFormat: The FileFormat instance corresponding to the file extension.
+        """
+        variantName = ext.upper()
+        if variantName in FileFormat.__members__: 
+            instance = FileFormat[variantName]
+            instance._original_extension = ext
+            return instance
+        
+        variantName = ext.lower()
+        for member in cls:
+            if variantName in member.value: 
+                # Create a copy-like behavior by storing the original extension
+                member._original_extension = ext
+                return member
+        
+        raise ValueErr("ext", "a valid FileFormat file extension", ext)
+
+    def __str__(self) -> str:
+        """
+        (Private) converts to str representation. Good practice for usage with argparse.
+        Returns:
+            str : the string representation of the file extension.
+        """
+        # If we have an original extension stored (for compressed files only), use it
+        if hasattr(self, '_original_extension') and self._original_extension:
+            return self._original_extension
+        
+        # For XML, JSON, MAT and YML without original extension, use the base extension
+        if self == FileFormat.XML:
+            return "xml"
+        elif self == FileFormat.JSON:
+            return "json"
+        elif self == FileFormat.MAT:
+            return "mat"
+        elif self == FileFormat.YML:
+            return "yml"
+        
+        return self.value[-1]
+
+class FilePath():
+    """
+    Represents a file path. View this as an attempt to standardize file-related operations by expecting
+    values of this type in any process requesting a file path.
+    """
+    def __init__(self, filePath: str, ext: FileFormat, *, prefix="") -> None:
+        """
+        (Private) Initializes an instance of FilePath.
+        Args:
+            path : the end of the path, containing the file name.
+            ext : the file's extension.
+            prefix : anything before path, if the last '/' isn't there it's added by the code.
+        Returns:
+            None : practically, a FilePath instance.
+        """
+        self.ext = ext
+        self.filePath = filePath
+
+        if prefix and prefix[-1] != '/': 
+            prefix += '/'
+        self.prefix = prefix
+    
+    @classmethod
+    def fromStrPath(cls, path: str) -> "FilePath":
+        """
+        Factory method to parse a string from which to obtain, if possible, a valid FilePath instance.
+        It detects double extensions such as .json.gz and .xml.bz2, which are common in COBRA models.
+        These double extensions are not supported for other file types such as .csv.
+        Args:
+            path : the string containing the path
+        Raises:
+            PathErr : if the provided string doesn't represent a valid path.
+        Returns:
+            FilePath : the constructed instance.
+        """
+        result = re.search(r"^(?P<prefix>.*\/)?(?P<name>.*)\.(?P<ext>[^.]*)$", path)
+        if not result or not result["name"] or not result["ext"]:
+            raise PathErr(path, "cannot recognize folder structure or extension in path")
+
+        prefix = result["prefix"] if result["prefix"] else ""
+        name, ext = result["name"], result["ext"]
+
+        # Check for double extensions (json.gz, xml.zip, etc.)
+        parts = path.split(".")
+        if len(parts) >= 3:  
+            penultimate = parts[-2]
+            last = parts[-1]
+            double_ext = f"{penultimate}.{last}"
+            
+            # Try the double extension first
+            try:
+                ext_format = FileFormat.fromExt(double_ext)
+                name = ".".join(parts[:-2])
+                # Extract prefix if it exists
+                if '/' in name:
+                    prefix = name[:name.rfind('/') + 1]
+                    name = name[name.rfind('/') + 1:]
+                return cls(name, ext_format, prefix=prefix)
+            except ValueErr:
+                # If double extension doesn't work, fall back to single extension
+                pass
+
+        # Single extension fallback (original logic)
+        try:
+            ext_format = FileFormat.fromExt(ext)
+            return cls(name, ext_format, prefix=prefix)
+        except ValueErr:
+            raise PathErr(path, f"unsupported file extension: {ext}")
+
+    def show(self) -> str:
+        """
+        Shows the path as a string.
+        Returns:
+            str : the path shown as a string.
+        """
+        return f"{self.prefix}{self.filePath}.{self.ext}"
+    
+    def __str__(self) -> str: 
+        return self.show()
+
+# ERRORS
+def terminate(msg :str) -> None:
+    """
+    Terminate the execution of the script with an error message.
+    
+    Args:
+        msg (str): The error message to be displayed.
+    
+    Returns:
+        None
+    """
+    sys.exit(f"Execution aborted: {msg}\n")
+
+def logWarning(msg :str, loggerPath :str) -> None:
+    """
+    Log a warning message to an output log file and print it to the console. The final period and a
+    newline is added by the function.
+
+    Args:
+        s (str): The warning message to be logged and printed.
+        loggerPath : The file path of the output log file. Given as a string, parsed to a FilePath and
+        immediately read back (beware relative expensive operation, log with caution).
+
+    Returns:
+        None
+    """
+    # building the path and then reading it immediately seems useless, but it's actually a way of
+    # validating that reduces repetition on the caller's side. Besides, logging a message by writing
+    # to a file is supposed to be computationally expensive anyway, so this is also a good deterrent from
+    # mindlessly logging whenever something comes up, log at the very end and tell the user everything
+    # that went wrong. If you don't like it: implement a persistent runtime buffer that gets dumped to
+    # the file only at the end of the program's execution.
+    with open(FilePath.fromStrPath(loggerPath).show(), 'a') as log: log.write(f"{msg}.\n")
+
+class CustomErr(Exception):
+    """
+    Custom error class to handle exceptions in a structured way, with a unique identifier and a message.
+    """
+    __idGenerator = count()
+    errName = "Custom Error"
+    def __init__(self, msg :str, details = "", explicitErrCode = -1) -> None:
+        """
+        (Private) Initializes an instance of CustomErr.
+
+        Args:
+            msg (str): Error message to be displayed.
+            details (str): Informs the user more about the error encountered. Defaults to "".
+            explicitErrCode (int): Explicit error code to be used. Defaults to -1.
+        
+        Returns:
+            None : practically, a CustomErr instance.
+        """
+        self.msg     = msg
+        self.details = details
+
+        self.id = max(explicitErrCode, next(CustomErr.__idGenerator))
+
+    def throw(self, loggerPath = "") -> None:
+        """
+        Raises the current CustomErr instance, logging a warning message before doing so.
+
+        Raises:
+            self: The current CustomErr instance.
+        
+        Returns:
+            None
+        """
+        if loggerPath: logWarning(str(self), loggerPath)
+        raise self
+
+    def abort(self) -> None:
+        """
+        Aborts the execution of the script.
+        
+        Returns:
+            None
+        """
+        terminate(str(self))
+
+    def __str__(self) -> str:
+        """
+        (Private) Returns a string representing the current CustomErr instance.
+
+        Returns:
+            str: A string representing the current CustomErr instance.
+        """
+        return f"{CustomErr.errName} #{self.id}: {self.msg}, {self.details}."
+
+class ArgsErr(CustomErr):
+    """
+    CustomErr subclass for UI arguments errors.
+    """
+    errName = "Args Error"
+    def __init__(self, argName :str, expected :Any, actual :Any, msg = "no further details provided") -> None:
+        super().__init__(f"argument \"{argName}\" expected {expected} but got {actual}", msg)
+
+class DataErr(CustomErr):
+    """
+    CustomErr subclass for data formatting errors.
+    """
+    errName = "Data Format Error"
+    def __init__(self, fileName :str, msg = "no further details provided") -> None:
+        super().__init__(f"file \"{fileName}\" contains malformed data", msg)
+
+class PathErr(CustomErr):
+    """
+    CustomErr subclass for filepath formatting errors.
+    """
+    errName = "Path Error"
+    def __init__(self, path :FilePath, msg = "no further details provided") -> None:
+        super().__init__(f"path \"{path}\" is invalid", msg)
+
+class ValueErr(CustomErr):
+    """
+    CustomErr subclass for any value error.
+    """
+    errName = "Value Error"
+    def __init__(self, valueName: str, expected :Any, actual :Any, msg = "no further details provided") -> None:
+        super().__init__("value " + f"\"{valueName}\" " * bool(valueName) + f"was supposed to be {expected}, but got {actual} instead", msg)
+
+# RESULT
+T = TypeVar('T')
+E = TypeVar('E', bound = CustomErr) # should bind to Result.ResultErr but python happened!
+class Result(Generic[T, E]):
+    class ResultErr(CustomErr):
+        """
+        CustomErr subclass for all Result errors.
+        """
+        errName = "Result Error"
+        def __init__(self, msg = "no further details provided") -> None:
+            super().__init__(msg)
+    """
+    Class to handle the result of an operation, with a value and a boolean flag to indicate
+    whether the operation was successful or not.
+    """
+    def __init__(self, value :Union[T, E], isOk :bool) -> None:
+        """
+        (Private) Initializes an instance of Result.
+
+        Args:
+            value (Union[T, E]): The value to be stored in the Result instance.
+            isOk (bool): A boolean flag to indicate whether the operation was successful or not.
+        
+            Returns:
+                None : practically, a Result instance.
+        """
+        self.isOk  = isOk
+        self.isErr = not isOk
+        self.value = value
+
+    @classmethod
+    def Ok(cls,  value :T) -> "Result":
+        """
+        Constructs a new Result instance with a successful operation.
+
+        Args:
+            value (T): The value to be stored in the Result instance, set as successful.
+
+        Returns:
+            Result: A new Result instance with a successful operation.
+        """
+        return Result(value, isOk = True)
+    
+    @classmethod
+    def Err(cls, value :E) -> "Result": 
+        """
+        Constructs a new Result instance with a failed operation.
+
+        Args:
+            value (E): The value to be stored in the Result instance, set as failed.
+
+        Returns:
+            Result: A new Result instance with a failed operation.
+        """
+        return Result(value, isOk = False)
+
+    def unwrap(self) -> T:
+        """
+        Unwraps the value of the Result instance, if the operation was successful.
+
+        Raises:
+            ResultErr: If the operation was not successful.
+
+        Returns:
+            T: The value of the Result instance, if the operation was successful.
+        """
+        if self.isOk: return self.value
+        raise Result.ResultErr(f"Unwrapped Result.Err : {self.value}")
+
+    def unwrapOr(self, default :T) -> T:
+        """
+        Unwraps the value of the Result instance, if the operation was successful, otherwise
+        it returns a default value.
+
+        Args:
+            default (T): The default value to be returned if the operation was not successful.
+
+        Returns:
+            T: The value of the Result instance, if the operation was successful,
+            otherwise the default value.
+        """
+        return self.value if self.isOk else default
+    
+    def expect(self, err :"Result.ResultErr") -> T:
+        """
+        Expects that the value of the Result instance is successful, otherwise it raises an error.
+
+        Args:
+            err (Exception): The error to be raised if the operation was not successful.
+
+        Raises:
+            err: The error raised if the operation was not successful.
+
+        Returns:
+            T: The value of the Result instance, if the operation was successful.
+        """
+        if self.isOk: return self.value
+        raise err
+
+    U = TypeVar("U")
+    def map(self, mapper: Callable[[T], U]) -> "Result[U, E]":
+        """
+        Maps the value of the current Result to whatever is returned by the mapper function.
+        If the Result contained an unsuccessful operation to begin with it remains unchanged
+        (a reference to the current instance is returned).
+        If the mapper function panics the returned result instance will be of the error kind.
+
+        Args:
+            mapper (Callable[[T], U]): The mapper operation to be applied to the Result value.
+
+        Returns:
+            Result[U, E]: The result of the mapper operation applied to the Result value.
+        """
+        if self.isErr: return self
+        try: return Result.Ok(mapper(self.value))
+        except Exception as e: return Result.Err(e)
+    
+    D = TypeVar("D", bound = "Result.ResultErr")
+    def mapErr(self, mapper :Callable[[E], D]) -> "Result[T, D]":
+        """
+        Maps the error of the current Result to whatever is returned by the mapper function.
+        If the Result contained a successful operation it remains unchanged
+        (a reference to the current instance is returned).
+        If the mapper function panics this method does as well.
+
+        Args:
+            mapper (Callable[[E], D]): The mapper operation to be applied to the Result error.
+
+        Returns:
+            Result[U, E]: The result of the mapper operation applied to the Result error.
+        """
+        if self.isOk: return self
+        return Result.Err(mapper(self.value))
+
+    def __str__(self):
+        return f"Result::{'Ok' if self.isOk else 'Err'}({self.value})"
+
+# FILES
+def read_dataset(path :FilePath, datasetName = "Dataset (not actual file name!)") -> pd.DataFrame:
+    """
+    Reads a .csv or .tsv file and returns it as a Pandas DataFrame.
+
+    Args:
+        path : the path to the dataset file.
+        datasetName : the name of the dataset.
+
+    Raises:
+        DataErr: If anything goes wrong when trying to open the file, if pandas thinks the dataset is empty or if
+        it has less than 2 columns.
+    
+    Returns:
+        pandas.DataFrame: The dataset loaded as a Pandas DataFrame.
+    """
+    # I advise against the use of this function. This is an attempt at standardizing bad legacy code rather than
+    # removing / replacing it to avoid introducing as many bugs as possible in the tools still relying on this code.
+    # First off, this is not the best way to distinguish between .csv and .tsv files and Galaxy itself makes it really
+    # hard to implement anything better. Also, this function's name advertizes it as a dataset-specific operation and
+    # contains dubious responsibility (how many columns..) while being a file-opening function instead. My suggestion is
+    # TODO: stop using dataframes ever at all in anything and find a way to have tight control over file extensions.
+    try: dataset = pd.read_csv(path.show(), sep = '\t', header = None, engine = "python")
+    except:
+        try: dataset = pd.read_csv(path.show(), sep = ',', header = 0, engine = "python")
+        except Exception as err: raise DataErr(datasetName, f"encountered empty or wrongly formatted data: {err}")
+    
+    if len(dataset.columns) < 2: raise DataErr(datasetName, "a dataset is always meant to have at least 2 columns")
+    return dataset
+
+def readPickle(path :FilePath) -> Any:
+    """
+    Reads the contents of a .pickle file, which needs to exist at the given path.
+
+    Args:
+        path : the path to the .pickle file.
+    
+    Returns:
+        Any : the data inside a pickle file, could be anything.
+    """
+    with open(path.show(), "rb") as fd: return pickle.load(fd)
+
+def writePickle(path :FilePath, data :Any) -> None:
+    """
+    Saves any data in a .pickle file, created at the given path.
+
+    Args:
+        path : the path to the .pickle file.
+        data : the data to be written to the file.
+    
+    Returns:
+        None
+    """
+    with open(path.show(), "wb") as fd: pickle.dump(data, fd)
+
+def readCsv(path :FilePath, delimiter = ',', *, skipHeader = True) -> List[List[str]]:
+    """
+    Reads the contents of a .csv file, which needs to exist at the given path.
+
+    Args:
+        path : the path to the .csv file.
+        delimiter : allows other subformats such as .tsv to be opened by the same method (\\t delimiter).
+        skipHeader : whether the first row of the file is a header and should be skipped.
+    
+    Returns:
+        List[List[str]] : list of rows from the file, each parsed as a list of strings originally separated by commas.
+    """
+    with open(path.show(), "r", newline = "") as fd: return list(csv.reader(fd, delimiter = delimiter))[skipHeader:]
+
+def readSvg(path :FilePath, customErr :Optional[Exception] = None) -> ET.ElementTree:
+    """
+    Reads the contents of a .svg file, which needs to exist at the given path.
+
+    Args:
+        path : the path to the .svg file.
+    
+    Raises:
+        DataErr : if the map is malformed.
+    
+    Returns:
+        Any : the data inside a svg file, could be anything.
+    """
+    try: return ET.parse(path.show())
+    except (ET.XMLSyntaxError, ET.XMLSchemaParseError) as err:
+        raise customErr if customErr else err
+
+def writeSvg(path :FilePath, data:ET.ElementTree) -> None:
+    """
+    Saves svg data opened with lxml.etree in a .svg file, created at the given path.
+
+    Args:
+        path : the path to the .svg file.
+        data : the data to be written to the file.
+    
+    Returns:
+        None
+    """
+    with open(path.show(), "wb") as fd: fd.write(ET.tostring(data))
+
+# UI ARGUMENTS
+class Bool:
+    def __init__(self, argName :str) -> None:
+        self.argName = argName
+
+    def __call__(self, s :str) -> bool: return self.check(s)
+
+    def check(self, s :str) -> bool:
+        s = s.lower()
+        if s == "true" : return True
+        if s == "false": return False
+        raise ArgsErr(self.argName, "boolean string (true or false, not case sensitive)", f"\"{s}\"")
+
+class Float:
+    def __init__(self, argName = "Dataset values, not an argument") -> None:
+        self.argName = argName
+    
+    def __call__(self, s :str) -> float: return self.check(s)
+
+    def check(self, s :str) -> float:
+        try: return float(s)
+        except ValueError:
+            s = s.lower()
+            if s == "nan" or s == "none": return math.nan
+            raise ArgsErr(self.argName, "numeric string or \"None\" or \"NaN\" (not case sensitive)", f"\"{s}\"")
+
+# MODELS
+OldRule = List[Union[str, "OldRule"]]
+class Model(Enum):
+    """
+    Represents a metabolic model, either custom or locally supported. Custom models don't point
+    to valid file paths.
+    """
+
+    Recon   = "Recon"
+    ENGRO2  = "ENGRO2"
+    ENGRO2_no_legend = "ENGRO2_no_legend"
+    HMRcore = "HMRcore"
+    HMRcore_no_legend = "HMRcore_no_legend"
+    Custom  = "Custom" # Exists as a valid variant in the UI, but doesn't point to valid file paths.
+
+    def __raiseMissingPathErr(self, path :Optional[FilePath]) -> None:
+        if not path: raise PathErr("<<MISSING>>", "it's necessary to provide a custom path when retrieving files from a custom model")
+
+    def getRules(self, toolDir :str, customPath :Optional[FilePath] = None) -> Dict[str, Dict[str, OldRule]]:
+        """
+        Open "rules" file for this model.
+
+        Returns:
+            Dict[str, Dict[str, OldRule]] : the rules for this model.
+        """
+        path = customPath if self is Model.Custom else FilePath(f"{self.name}_rules", FileFormat.PICKLE, prefix = f"{toolDir}/local/pickle files/")
+        self.__raiseMissingPathErr(path)
+        return readPickle(path)
+    
+    def getTranslator(self, toolDir :str, customPath :Optional[FilePath] = None) -> Dict[str, Dict[str, str]]:
+        """
+        Open "gene translator (old: gene_in_rule)" file for this model.
+
+        Returns:
+            Dict[str, Dict[str, str]] : the translator dict for this model.
+        """
+        path = customPath if self is Model.Custom else FilePath(f"{self.name}_genes", FileFormat.PICKLE, prefix = f"{toolDir}/local/pickle files/")
+        self.__raiseMissingPathErr(path)
+        return readPickle(path)
+    
+    def getMap(self, toolDir = ".", customPath :Optional[FilePath] = None) -> ET.ElementTree:
+        path = customPath if self is Model.Custom else FilePath(f"{self.name}_map", FileFormat.SVG, prefix = f"{toolDir}/local/svg metabolic maps/")
+        self.__raiseMissingPathErr(path)
+        return readSvg(path, customErr = DataErr(path, f"custom map in wrong format"))
+    
+    def getCOBRAmodel(self, toolDir = ".", customPath :Optional[FilePath] = None, customExtension :Optional[FilePath]=None)->cobra.Model:
+        if(self is Model.Custom):
+            return self.load_custom_model(customPath, customExtension)
+        else:
+            return cobra.io.read_sbml_model(FilePath(f"{self.name}", FileFormat.XML, prefix = f"{toolDir}/local/models/").show())
+        
+    def load_custom_model(self, file_path :FilePath, ext :Optional[FileFormat] = None) -> cobra.Model:
+        ext = ext if ext else file_path.ext
+        try:
+            if str(ext) in FileFormat.XML.value:
+                return cobra.io.read_sbml_model(file_path.show())
+            
+            if str(ext) in FileFormat.JSON.value:
+                # Compressed files are not automatically handled by cobra
+                if(ext == "json"):
+                    return cobra.io.load_json_model(file_path.show())
+                else: 
+                    return self.extract_model(file_path, ext, "json")
+
+            if str(ext) in FileFormat.MAT.value:
+                # Compressed files are not automatically handled by cobra
+                if(ext == "mat"):
+                    return cobra.io.load_matlab_model(file_path.show())
+                else: 
+                    return self.extract_model(file_path, ext, "mat")
+
+            if str(ext) in FileFormat.YML.value:
+                # Compressed files are not automatically handled by cobra
+                if(ext == "yml"):
+                    return cobra.io.load_yaml_model(file_path.show())
+                else: 
+                    return self.extract_model(file_path, ext, "yml")
+
+        except Exception as e: raise DataErr(file_path, e.__str__())
+        raise DataErr(file_path,
+            f"Fomat \"{file_path.ext}\" is not recognized, only JSON, XML, MAT and YAML (.yml) files are supported.")
+    
+
+    def extract_model(file_path:FilePath, ext :FileFormat, model_encoding:Literal["json", "mat", "yml"]) -> cobra.Model:
+        """
+        Extract JSON, MAT and YAML COBRA model from a compressed file (zip, gz, bz2).
+        
+        Args:
+            file_path: File path of the model
+            ext: File extensions of class FileFormat (should be .zip, .gz or .bz2)
+            
+        Returns:
+            cobra.Model: COBRApy model 
+            
+        Raises:
+            Exception: Extraction errors
+        """
+        ext_str = str(ext)
+
+        try:
+            if '.zip' in ext_str:
+                with zipfile.ZipFile(file_path.show(), 'r') as zip_ref:
+                    with zip_ref.open(zip_ref.namelist()[0]) as json_file:
+                        content = json_file.read().decode('utf-8')
+                        if model_encoding == "json":
+                            return cobra.io.load_json_model(StringIO(content))
+                        elif model_encoding == "mat":
+                            return cobra.io.load_matlab_model(StringIO(content))
+                        elif model_encoding == "yml":
+                            return cobra.io.load_yaml_model(StringIO(content))
+                        else:
+                            raise ValueError(f"Unsupported model encoding: {model_encoding}. Supported: json, mat, yml")
+            elif '.gz' in ext_str:
+                with gzip.open(file_path.show(), 'rt', encoding='utf-8') as gz_ref:
+                    if model_encoding == "json":
+                        return cobra.io.load_json_model(gz_ref)
+                    elif model_encoding == "mat":
+                        return cobra.io.load_matlab_model(gz_ref)
+                    elif model_encoding == "yml":
+                        return cobra.io.load_yaml_model(gz_ref)
+                    else:
+                        raise ValueError(f"Unsupported model encoding: {model_encoding}. Supported: json, mat, yml")
+            elif '.bz2' in ext_str:
+                with bz2.open(file_path.show(), 'rt', encoding='utf-8') as bz2_ref:
+                    if model_encoding == "json":
+                        return cobra.io.load_json_model(bz2_ref)
+                    elif model_encoding == "mat":
+                        return cobra.io.load_matlab_model(bz2_ref)
+                    elif model_encoding == "yml":
+                        return cobra.io.load_yaml_model(bz2_ref)
+                    else:
+                        raise ValueError(f"Unsupported model encoding: {model_encoding}. Supported: json, mat, yml")
+            else:
+                raise ValueError(f"Compression format not supported: {ext_str}. Supported: .zip, .gz and .bz2")
+            
+        except Exception as e:
+            raise Exception(f"Error during model extraction: {str(e)}")
+        
+
+
     def __str__(self) -> str: return self.value
\ No newline at end of file
--- a/COBRAxy/utils/reaction_parsing.py	Fri Sep 05 08:27:04 2025 +0000
+++ b/COBRAxy/utils/reaction_parsing.py	Fri Sep 05 09:18:26 2025 +0000
@@ -124,7 +124,6 @@
   Returns:
     ReactionsDict : dictionary encoding custom reactions information.
   """
-  reactionsData :Dict[str, str] = {row[0]: row[1] for row in utils.readCsv(utils.FilePath.fromStrPath(customReactionsPath))} 
-  
+  reactionsData :Dict[str, str] = {row[0]: row[1] for row in utils.readCsv(utils.FilePath.fromStrPath(customReactionsPath), delimiter = "\t")} 
   return create_reaction_dict(reactionsData)