changeset 9:c4c64002d01e draft default tip

planemo upload for repository https://github.com/galaxyproteomics/tools-galaxyp/tree/master/tools/openms commit 5c080b1e2b99f1c88f4557e9fec8c45c9d23b906
author galaxyp
date Fri, 14 Jun 2024 21:40:27 +0000
parents 085863ba8f0e
children
files MapAlignerSpectrum.xml fill_ctd_clargs.py generate-foo.sh get_tests.py macros.xml prepare_test_data_manual.sh readme.md test-data.sh
diffstat 8 files changed, 476 insertions(+), 428 deletions(-) [+]
line wrap: on
line diff
--- a/MapAlignerSpectrum.xml	Thu Dec 01 19:24:40 2022 +0000
+++ b/MapAlignerSpectrum.xml	Fri Jun 14 21:40:27 2024 +0000
@@ -1,8 +1,7 @@
-<?xml version='1.0' encoding='UTF-8'?>
 <!--This is a configuration file for the integration of a tools into Galaxy (https://galaxyproject.org/). This file was automatically generated using CTDConverter.-->
 <!--Proposed Tool Section: [Map Alignment]-->
 <tool id="MapAlignerSpectrum" name="MapAlignerSpectrum" version="@TOOL_VERSION@+galaxy@VERSION_SUFFIX@" profile="21.05">
-  <description>Corrects retention time distortions between maps by spectrum alignment.</description>
+  <description>Corrects retention time distortions between maps by spectrum alignment</description>
   <macros>
     <token name="@EXECUTABLE@">MapAlignerSpectrum</token>
     <import>macros.xml</import>
@@ -17,9 +16,9 @@
 mkdir in_cond.in &&
 #if $in_cond.in_select == "no"
 mkdir ${' '.join(["'in_cond.in/%s'" % (i) for i, f in enumerate($in_cond.in) if f])} && 
-${' '.join(["ln -s '%s' 'in_cond.in/%s/%s.%s' && " % (f, i, re.sub('[^\w\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($in_cond.in) if f])}
+${' '.join(["cp '%s' 'in_cond.in/%s/%s.%s' && " % (f, i, re.sub('[^\w\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($in_cond.in) if f])}
 #else
-ln -s '$in_cond.in' 'in_cond.in/${re.sub("[^\w\-_]", "_", $in_cond.in.element_identifier)}.$gxy2omsext($in_cond.in.ext)' &&
+cp '$in_cond.in' 'in_cond.in/${re.sub("[^\w\-_]", "_", $in_cond.in.element_identifier)}.$gxy2omsext($in_cond.in.ext)' &&
 #end if
 #if "out_FLAG" in str($OPTIONAL_OUTPUTS).split(',')
   mkdir out &&
@@ -75,27 +74,27 @@
         <option value="yes">Yes: process each dataset in an independent job</option>
       </param>
       <when value="no">
-        <param argument="-in" type="data" format="mzml" multiple="true" optional="false" label="Input files to align (all must have the same file type)" help=" select mzml data sets(s)"/>
+        <param argument="-in" type="data" format="mzml" multiple="true" label="Input files to align (all must have the same file type)" help=" select mzml data sets(s)"/>
       </when>
       <when value="yes">
-        <param argument="-in" type="data" format="mzml" multiple="false" optional="false" label="Input files to align (all must have the same file type)" help=" select mzml data sets(s)"/>
+        <param argument="-in" type="data" format="mzml" label="Input files to align (all must have the same file type)" help=" select mzml data sets(s)"/>
       </when>
     </conditional>
     <section name="algorithm" title="Algorithm parameters section" help="" expanded="false">
-      <param name="gapcost" argument="-algorithm:gapcost" type="float" optional="true" min="0.0" value="1.0" label="This Parameter stands for the cost of opening a gap in the Alignment" help="A gap means that one spectrum can not be aligned directly to another spectrum in the Map. This happens, when the similarity of both spectra a too low or even not present. Imagine it as a insert or delete of the spectrum in the map (similar to sequence alignment). The gap is necessary for aligning, if we open a gap there is a possibility that an another spectrum can be correct aligned with a higher score as before without gap. But to open a gap is a negative event and needs to carry a punishment, so a gap should only be opened if the benefits outweigh the downsides. The Parameter is to giving as a positive number, the implementation convert it to a negative number"/>
-      <param name="affinegapcost" argument="-algorithm:affinegapcost" type="float" optional="true" min="0.0" value="0.5" label="This Parameter controls the cost of extension a already open gap" help="The idea behind the affine gapcost lies under the assumption, that it is better to get a long distance of connected gaps than to have a structure of gaps interspersed with matches (gap match gap match etc.). Therefore the punishment for the extension of a gap generally should be lower than the normal gapcost. If the result of the alignment shows high compression, it is a good idea to lower either the affine gapcost or gap opening cost"/>
-      <param name="cutoff_score" argument="-algorithm:cutoff_score" type="float" optional="true" min="0.0" max="1.0" value="0.7" label="The Parameter defines the threshold which filtered spectra, these spectra are high potential candidate for deciding the interval of a sub-alignment" help="Only those pair of spectra are selected, which has a score higher or same of the threshold"/>
-      <param name="bucketsize" argument="-algorithm:bucketsize" type="integer" optional="true" min="1" value="100" label="Defines the numbers of buckets" help="It is a quantize of the interval of those points, which defines the main alignment (match points). These points have to filtered, to reduce the amount of points for the calculating a smoother spline curve"/>
-      <param name="anchorpoints" argument="-algorithm:anchorpoints" type="integer" optional="true" min="1" max="100" value="100" label="Defines the percent of numbers of match points which a selected from one bucket" help="The high score pairs are previously selected. The reduction of match points helps to get a smoother spline curve"/>
-      <param name="mismatchscore" argument="-algorithm:mismatchscore" type="float" optional="true" max="0.0" value="-5.0" label="Defines the score of two spectra if they have no similarity to each othe" help=""/>
-      <param name="scorefunction" argument="-algorithm:scorefunction" type="select" optional="true" label="The score function is the core of an alignment" help="The success of an alignment depends mostly of the elected score function. The score function return the similarity of two spectra. The score influence defines later the way of possible traceback. There are multiple spectra similarity scores available">
+      <param name="gapcost" argument="-algorithm:gapcost" type="float" min="0.0" value="1.0" label="This Parameter stands for the cost of opening a gap in the Alignment" help="A gap means that one spectrum can not be aligned directly to another spectrum in the Map. This happens, when the similarity of both spectra a too low or even not present. Imagine it as a insert or delete of the spectrum in the map (similar to sequence alignment). The gap is necessary for aligning, if we open a gap there is a possibility that an another spectrum can be correct aligned with a higher score as before without gap. But to open a gap is a negative event and needs to carry a punishment, so a gap should only be opened if the benefits outweigh the downsides. The Parameter is to giving as a positive number, the implementation convert it to a negative number"/>
+      <param name="affinegapcost" argument="-algorithm:affinegapcost" type="float" min="0.0" value="0.5" label="This Parameter controls the cost of extension a already open gap" help="The idea behind the affine gapcost lies under the assumption, that it is better to get a long distance of connected gaps than to have a structure of gaps interspersed with matches (gap match gap match etc.). Therefore the punishment for the extension of a gap generally should be lower than the normal gapcost. If the result of the alignment shows high compression, it is a good idea to lower either the affine gapcost or gap opening cost"/>
+      <param name="cutoff_score" argument="-algorithm:cutoff_score" type="float" min="0.0" max="1.0" value="0.7" label="The Parameter defines the threshold which filtered spectra, these spectra are high potential candidate for deciding the interval of a sub-alignment" help="Only those pair of spectra are selected, which has a score higher or same of the threshold"/>
+      <param name="bucketsize" argument="-algorithm:bucketsize" type="integer" min="1" value="100" label="Defines the numbers of buckets" help="It is a quantize of the interval of those points, which defines the main alignment (match points). These points have to filtered, to reduce the amount of points for the calculating a smoother spline curve"/>
+      <param name="anchorpoints" argument="-algorithm:anchorpoints" type="integer" min="1" max="100" value="100" label="Defines the percent of numbers of match points which a selected from one bucket" help="The high score pairs are previously selected. The reduction of match points helps to get a smoother spline curve"/>
+      <param name="mismatchscore" argument="-algorithm:mismatchscore" type="float" max="0.0" value="-5.0" label="Defines the score of two spectra if they have no similarity to each othe" help=""/>
+      <param name="scorefunction" argument="-algorithm:scorefunction" type="select" label="The score function is the core of an alignment" help="The success of an alignment depends mostly of the elected score function. The score function return the similarity of two spectra. The score influence defines later the way of possible traceback. There are multiple spectra similarity scores available">
         <option value="SteinScottImproveScore" selected="true">SteinScottImproveScore</option>
         <option value="ZhangSimilarityScore">ZhangSimilarityScore</option>
         <expand macro="list_string_san" name="scorefunction"/>
       </param>
     </section>
     <section name="model" title="Options to control the modeling of retention time transformations from data" help="" expanded="false">
-      <param name="type" argument="-model:type" type="select" optional="true" label="Type of model" help="">
+      <param name="type" argument="-model:type" type="select" label="Type of model" help="">
         <option value="linear">linear</option>
         <option value="b_spline">b_spline</option>
         <option value="lowess">lowess</option>
@@ -104,48 +103,48 @@
       </param>
       <section name="linear" title="Parameters for 'linear' model" help="" expanded="false">
         <param name="symmetric_regression" argument="-model:linear:symmetric_regression" type="boolean" truevalue="true" falsevalue="false" checked="false" label="Perform linear regression on 'y - x' vs" help="'y + x', instead of on 'y' vs. 'x'"/>
-        <param name="x_weight" argument="-model:linear:x_weight" type="select" optional="true" label="Weight x values" help="">
+        <param name="x_weight" argument="-model:linear:x_weight" type="select" label="Weight x values" help="">
           <option value="1/x">1/x</option>
           <option value="1/x2">1/x2</option>
           <option value="ln(x)">ln(x)</option>
-          <option value=""></option>
+          <option value="x" selected="true">x</option>
           <expand macro="list_string_san" name="x_weight"/>
         </param>
-        <param name="y_weight" argument="-model:linear:y_weight" type="select" optional="true" label="Weight y values" help="">
+        <param name="y_weight" argument="-model:linear:y_weight" type="select" label="Weight y values" help="">
           <option value="1/y">1/y</option>
           <option value="1/y2">1/y2</option>
           <option value="ln(y)">ln(y)</option>
-          <option value=""></option>
+          <option value="y" selected="true">y</option>
           <expand macro="list_string_san" name="y_weight"/>
         </param>
-        <param name="x_datum_min" argument="-model:linear:x_datum_min" type="float" optional="true" value="1e-15" label="Minimum x value" help=""/>
-        <param name="x_datum_max" argument="-model:linear:x_datum_max" type="float" optional="true" value="1000000000000000.0" label="Maximum x value" help=""/>
-        <param name="y_datum_min" argument="-model:linear:y_datum_min" type="float" optional="true" value="1e-15" label="Minimum y value" help=""/>
-        <param name="y_datum_max" argument="-model:linear:y_datum_max" type="float" optional="true" value="1000000000000000.0" label="Maximum y value" help=""/>
+        <param name="x_datum_min" argument="-model:linear:x_datum_min" type="float" value="1e-15" label="Minimum x value" help=""/>
+        <param name="x_datum_max" argument="-model:linear:x_datum_max" type="float" value="1000000000000000.0" label="Maximum x value" help=""/>
+        <param name="y_datum_min" argument="-model:linear:y_datum_min" type="float" value="1e-15" label="Minimum y value" help=""/>
+        <param name="y_datum_max" argument="-model:linear:y_datum_max" type="float" value="1000000000000000.0" label="Maximum y value" help=""/>
       </section>
       <section name="b_spline" title="Parameters for 'b_spline' model" help="" expanded="false">
-        <param name="wavelength" argument="-model:b_spline:wavelength" type="float" optional="true" min="0.0" value="0.0" label="Determines the amount of smoothing by setting the number of nodes for the B-spline" help="The number is chosen so that the spline approximates a low-pass filter with this cutoff wavelength. The wavelength is given in the same units as the data; a higher value means more smoothing. '0' sets the number of nodes to twice the number of input points"/>
-        <param name="num_nodes" argument="-model:b_spline:num_nodes" type="integer" optional="true" min="0" value="5" label="Number of nodes for B-spline fitting" help="Overrides 'wavelength' if set (to two or greater). A lower value means more smoothing"/>
-        <param name="extrapolate" argument="-model:b_spline:extrapolate" type="select" optional="true" label="Method to use for extrapolation beyond the original data range" help="'linear': Linear extrapolation using the slope of the B-spline at the corresponding endpoint. 'b_spline': Use the B-spline (as for interpolation). 'constant': Use the constant value of the B-spline at the corresponding endpoint. 'global_linear': Use a linear fit through the data (which will most probably introduce discontinuities at the ends of the data range)">
+        <param name="wavelength" argument="-model:b_spline:wavelength" type="float" min="0.0" value="0.0" label="Determines the amount of smoothing by setting the number of nodes for the B-spline" help="The number is chosen so that the spline approximates a low-pass filter with this cutoff wavelength. The wavelength is given in the same units as the data; a higher value means more smoothing. '0' sets the number of nodes to twice the number of input points"/>
+        <param name="num_nodes" argument="-model:b_spline:num_nodes" type="integer" min="0" value="5" label="Number of nodes for B-spline fitting" help="Overrides 'wavelength' if set (to two or greater). A lower value means more smoothing"/>
+        <param name="extrapolate" argument="-model:b_spline:extrapolate" type="select" label="Method to use for extrapolation beyond the original data range" help="'linear': Linear extrapolation using the slope of the B-spline at the corresponding endpoint. 'b_spline': Use the B-spline (as for interpolation). 'constant': Use the constant value of the B-spline at the corresponding endpoint. 'global_linear': Use a linear fit through the data (which will most probably introduce discontinuities at the ends of the data range)">
           <option value="linear" selected="true">linear</option>
           <option value="b_spline">b_spline</option>
           <option value="constant">constant</option>
           <option value="global_linear">global_linear</option>
           <expand macro="list_string_san" name="extrapolate"/>
         </param>
-        <param name="boundary_condition" argument="-model:b_spline:boundary_condition" type="integer" optional="true" min="0" max="2" value="2" label="Boundary condition at B-spline endpoints: 0 (value zero), 1 (first derivative zero) or 2 (second derivative zero)" help=""/>
+        <param name="boundary_condition" argument="-model:b_spline:boundary_condition" type="integer" min="0" max="2" value="2" label="Boundary condition at B-spline endpoints: 0 (value zero), 1 (first derivative zero) or 2 (second derivative zero)" help=""/>
       </section>
       <section name="lowess" title="Parameters for 'lowess' model" help="" expanded="false">
-        <param name="span" argument="-model:lowess:span" type="float" optional="true" min="0.0" max="1.0" value="0.666666666666667" label="Fraction of datapoints (f) to use for each local regression (determines the amount of smoothing)" help="Choosing this parameter in the range .2 to .8 usually results in a good fit"/>
-        <param name="num_iterations" argument="-model:lowess:num_iterations" type="integer" optional="true" min="0" value="3" label="Number of robustifying iterations for lowess fitting" help=""/>
-        <param name="delta" argument="-model:lowess:delta" type="float" optional="true" value="-1.0" label="Nonnegative parameter which may be used to save computations (recommended value is 0.01 of the range of the input" help="e.g. for data ranging from 1000 seconds to 2000 seconds, it could be set to 10). Setting a negative value will automatically do this"/>
-        <param name="interpolation_type" argument="-model:lowess:interpolation_type" type="select" optional="true" label="Method to use for interpolation between datapoints computed by lowess" help="'linear': Linear interpolation. 'cspline': Use the cubic spline for interpolation. 'akima': Use an akima spline for interpolation">
+        <param name="span" argument="-model:lowess:span" type="float" min="0.0" max="1.0" value="0.666666666666667" label="Fraction of datapoints (f) to use for each local regression (determines the amount of smoothing)" help="Choosing this parameter in the range .2 to .8 usually results in a good fit"/>
+        <param name="num_iterations" argument="-model:lowess:num_iterations" type="integer" min="0" value="3" label="Number of robustifying iterations for lowess fitting" help=""/>
+        <param name="delta" argument="-model:lowess:delta" type="float" value="-1.0" label="Nonnegative parameter which may be used to save computations (recommended value is 0.01 of the range of the input" help="e.g. for data ranging from 1000 seconds to 2000 seconds, it could be set to 10). Setting a negative value will automatically do this"/>
+        <param name="interpolation_type" argument="-model:lowess:interpolation_type" type="select" label="Method to use for interpolation between datapoints computed by lowess" help="'linear': Linear interpolation. 'cspline': Use the cubic spline for interpolation. 'akima': Use an akima spline for interpolation">
           <option value="linear">linear</option>
           <option value="cspline" selected="true">cspline</option>
           <option value="akima">akima</option>
           <expand macro="list_string_san" name="interpolation_type"/>
         </param>
-        <param name="extrapolation_type" argument="-model:lowess:extrapolation_type" type="select" optional="true" label="Method to use for extrapolation outside the data range" help="'two-point-linear': Uses a line through the first and last point to extrapolate. 'four-point-linear': Uses a line through the first and second point to extrapolate in front and and a line through the last and second-to-last point in the end. 'global-linear': Uses a linear regression to fit a line through all data points and use it for interpolation">
+        <param name="extrapolation_type" argument="-model:lowess:extrapolation_type" type="select" label="Method to use for extrapolation outside the data range" help="'two-point-linear': Uses a line through the first and last point to extrapolate. 'four-point-linear': Uses a line through the first and second point to extrapolate in front and and a line through the last and second-to-last point in the end. 'global-linear': Uses a linear regression to fit a line through all data points and use it for interpolation">
           <option value="two-point-linear">two-point-linear</option>
           <option value="four-point-linear" selected="true">four-point-linear</option>
           <option value="global-linear">global-linear</option>
@@ -153,13 +152,13 @@
         </param>
       </section>
       <section name="interpolated" title="Parameters for 'interpolated' model" help="" expanded="false">
-        <param name="interpolation_type" argument="-model:interpolated:interpolation_type" type="select" optional="true" label="Type of interpolation to apply" help="">
+        <param name="interpolation_type" argument="-model:interpolated:interpolation_type" type="select" label="Type of interpolation to apply" help="">
           <option value="linear">linear</option>
           <option value="cspline" selected="true">cspline</option>
           <option value="akima">akima</option>
           <expand macro="list_string_san" name="interpolation_type"/>
         </param>
-        <param name="extrapolation_type" argument="-model:interpolated:extrapolation_type" type="select" optional="true" label="Type of extrapolation to apply: two-point-linear: use the first and last data point to build a single linear model, four-point-linear: build two linear models on both ends using the first two / last two points, global-linear: use all points to build a single linear model" help="Note that global-linear may not be continuous at the border">
+        <param name="extrapolation_type" argument="-model:interpolated:extrapolation_type" type="select" label="Type of extrapolation to apply: two-point-linear: use the first and last data point to build a single linear model, four-point-linear: build two linear models on both ends using the first two / last two points, global-linear: use all points to build a single linear model" help="Note that global-linear may not be continuous at the border">
           <option value="two-point-linear" selected="true">two-point-linear</option>
           <option value="four-point-linear">four-point-linear</option>
           <option value="global-linear">global-linear</option>
@@ -169,7 +168,7 @@
     </section>
     <expand macro="adv_opts_macro">
       <param argument="-force" type="boolean" truevalue="true" falsevalue="false" checked="false" label="Overrides tool-specific checks" help=""/>
-      <param argument="-test" type="hidden" optional="true" value="False" label="Enables the test mode (needed for internal use only)" help="">
+      <param argument="-test" type="hidden" value="False" label="Enables the test mode (needed for internal use only)" help="" optional="true">
         <expand macro="list_string_san" name="test"/>
       </param>
     </expand>
@@ -195,7 +194,8 @@
       <filter>OPTIONAL_OUTPUTS is not None and "ctd_out_FLAG" in OPTIONAL_OUTPUTS</filter>
     </data>
   </outputs>
-  <tests><!-- TOPP_MapAlignerSpectrum_1 -->
+  <tests>
+    <!-- TOPP_MapAlignerSpectrum_1 -->
     <test expect_num_outputs="2">
       <section name="adv_opts">
         <param name="force" value="false"/>
@@ -218,8 +218,8 @@
         <param name="type" value="interpolated"/>
         <section name="linear">
           <param name="symmetric_regression" value="false"/>
-          <param name="x_weight"/>
-          <param name="y_weight"/>
+          <param name="x_weight" value="x"/>
+          <param name="y_weight" value="y"/>
           <param name="x_datum_min" value="1e-15"/>
           <param name="x_datum_max" value="1000000000000000.0"/>
           <param name="y_datum_min" value="1e-15"/>
@@ -249,11 +249,14 @@
           <is_valid_xml/>
         </assert_contents>
       </output>
+      <assert_stdout>
+        <has_text_matching expression="@EXECUTABLE@ took .* \(wall\), .* \(CPU\), .* \(system\), .* \(user\)(; Peak Memory Usage: 32 MB)?."/>
+      </assert_stdout>
     </test>
   </tests>
   <help><![CDATA[Corrects retention time distortions between maps by spectrum alignment.
 
 
-For more information, visit http://www.openms.de/doxygen/release/2.8.0/html/TOPP_MapAlignerSpectrum.html]]></help>
+For more information, visit https://openms.de/doxygen/release/3.1.0/html/TOPP_MapAlignerSpectrum.html]]></help>
   <expand macro="references"/>
 </tool>
--- a/fill_ctd_clargs.py	Thu Dec 01 19:24:40 2022 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-#!/usr/bin/env python3
-
-import operator
-from argparse import ArgumentParser
-from functools import reduce  # forward compatibility for Python 3
-from io import StringIO
-
-from CTDopts.CTDopts import (
-    _Null,
-    CTDModel,
-    ModelTypeError,
-    Parameters
-)
-
-
-def getFromDict(dataDict, mapList):
-    return reduce(operator.getitem, mapList, dataDict)
-
-
-def setInDict(dataDict, mapList, value):
-    getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value
-
-
-if __name__ == "__main__":
-    # note add_help=False since otherwise arguments starting with -h will
-    # trigger an error (despite allow_abbreviate)
-    parser = ArgumentParser(prog="fill_ctd_clargs",
-                            description="fill command line arguments"
-                            "into a CTD file and write the CTD file to stdout",
-                            add_help=False, allow_abbrev=False)
-    parser.add_argument("--ini_file", dest="ini_file", help="input ini file",
-                        metavar='INI', default=None, required=True)
-    parser.add_argument("--ctd_file", dest="ctd_file", help="input ctd file"
-                        "if given then optional parameters from the ini file"
-                        "will be filled with the defaults from this CTD file",
-                        metavar='CTD', default=None, required=False)
-    args, cliargs = parser.parse_known_args()
-
-    # load CTDModel
-    ini_model = None
-    try:
-        ini_model = CTDModel(from_file=args.ini_file)
-    except ModelTypeError:
-        pass
-    try:
-        ini_model = Parameters(from_file=args.ini_file)
-    except ModelTypeError:
-        pass
-    assert ini_model is not None, "Could not parse %s, seems to be no CTD/PARAMS" % (args.ini_file)
-
-    # get a dictionary of the ctd arguments where the values of the parameters
-    # given on the command line are overwritten
-    ini_values = ini_model.parse_cl_args(cl_args=cliargs, ignore_required=True)
-
-    if args.ctd_file:
-        ctd_model = CTDModel(from_file=args.ctd_file)
-        ctd_values = ctd_model.get_defaults()
-        for param in ini_model.get_parameters():
-            if not param.required and (param.default is None or type(param.default) is _Null):
-                lineage = param.get_lineage(name_only=True)
-                try:
-                    default = getFromDict(ctd_values, lineage)
-                except KeyError:
-                    continue
-                setInDict(ini_values, lineage, default)
-
-    # write the ctd with the values taken from the dictionary
-    out = StringIO()
-    ctd_tree = ini_model.write_ctd(out, ini_values)
-    print(out.getvalue())
--- a/generate-foo.sh	Thu Dec 01 19:24:40 2022 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,215 +0,0 @@
-#!/usr/bin/env bash
-
-# parse test definitions from OpenMS sources for a tool with a given id
-function get_tests2 {
-    id=$1
-    >&2 echo "generate tests for $id"
-    echo '<xml name="autotest_'"$id"'">'
-
-    # get the tests from the CMakeLists.txt
-    # 1st remove some tests
-    # - OpenSwathMzMLFileCacher with -convert_back argument https://github.com/OpenMS/OpenMS/issues/4399
-    # - IDRipper PATH gets empty causing problems. TODO But overall the option needs to be handled differentlt
-    # - several tools with duplicated input (leads to conflict when linking)
-    # - MaRaCluster with -consensus_out (parameter blacklister: https://github.com/OpenMS/OpenMS/issues/4456)
-    # - FileMerger with mixed dta dta2d input (ftype can not be specified in the test, dta can not be sniffed)
-    # - some input files are originally in a subdir (degenerated cases/), but not in test-data
-    # - OpenSwathAnalyzer 9/10: cachedMzML (not supported yet)
-    # - SiriusAdapter_4 depends on online service which may timeout .. so keep disabled https://github.com/OpenMS/OpenMS/pull/5010
-    # - SiriusAdapter_10 should work in >2.8 https://github.com/OpenMS/OpenMS/issues/5869
-    CMAKE=$(cat $OPENMSGIT/src/tests/topp/CMakeLists.txt $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake  |
-        sed 's@${DATA_DIR_SHARE}/@@g' |
-        grep -v 'OpenSwathMzMLFileCacher .*-convert_back' |
-        sed 's/${TMP_RIP_PATH}/""/' |
-        grep -v "MaRaClusterAdapter.*-consensus_out"|
-        grep -v "FileMerger_1_input1.dta2d.*FileMerger_1_input2.dta " |
-        sed 's@degenerate_cases/@@g' |
-        egrep -v 'TOPP_OpenSwathAnalyzer_test_3"|TOPP_OpenSwathAnalyzer_test_4"' |
-        sed 's/\("TOPP_SiriusAdapter_4".*\)-sirius:database all\(.*\)/\1-sirius:database pubchem\2/' |
-        grep -v '"TOPP_SiriusAdapter_10"')
-
-    # 1st part is a dirty hack to join lines containing a single function call, e.g.
-    # addtest(....
-    #         ....)
-    echo "$CMAKE" | sed 's/#.*//; s/^\s*//; s/\s*$//' | grep -v "^#" | grep -v "^$"  | awk '{printf("%s@NEWLINE@", $0)}' | sed 's/)@NEWLINE@/)\n/g' | sed 's/@NEWLINE@/ /g' | 
-        grep -iE "add_test\(\"(TOPP|UTILS)_.*/$id " | egrep -v "_prepare\"|_convert|WRITEINI|WRITECTD|INVALIDVALUE"  | while read -r line
-    do
-        line=$(echo "$line" | sed 's/add_test("\([^"]\+\)"/\1/; s/)$//; s/\${TOPP_BIN_PATH}\///g;s/\${DATA_DIR_TOPP}\///g; s#THIRDPARTY/##g')
-        # >&2 echo $line
-        test_id=$(echo "$line" | cut -d" " -f 1)
-        tool_id=$(echo "$line" | cut -d" " -f 2)
-        # >&2 echo "test_id $test_id"
-        if [[ $test_id =~ _out_?[0-9]? ]]; then
-            >&2 echo "    skip $test_id $line"
-            continue
-        fi
-        if [[ ${id,,} != ${tool_id,,} ]]; then
-            >&2 echo "    skip $test_id ($id != $tool_id) $line"
-            continue
-        fi
-
-        #remove tests with set_tests_properties(....PROPERTIES WILL_FAIL 1)
-        if grep -lq "$test_id"'\".* PROPERTIES WILL_FAIL 1' $OPENMSGIT/src/tests/topp/CMakeLists.txt $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake; then
-            >&2 echo "    skip failing "$test_id
-            continue
-        fi
-        tes="  <test>\n"
-        line=$(fix_tmp_files "$line")
-        line=$(unique_files "$line")
-        # >&2 echo LINE $line
-        #if there is an ini file then we use this to generate the test
-        #otherwise the ctd file is used
-        #other command line parameters are inserted later into this xml
-        if grep -lq "\-ini" <<<"$line"; then
-            ini=$(echo $line | sed 's/.*-ini \([^ ]\+\).*/\1/')
-            ini="test-data/$ini"
-        else
-            ini="ctd/$tool_id.ctd"
-        fi
-        # >&2 echo "========================================================"
-        # >&2 echo "USING ini $ini"
-        cli=$(echo $line |cut -d" " -f3- | sed 's/-ini [^ ]\+//')
-
-        ctdtmp=$(mktemp)
-        # using eval: otherwise for some reason quoted values are not used properly ('A B' -> ["'A", "B'"])
-        # >&2 echo "python3 fill_ctd_clargs.py --ini_file $ini $cli" 
-        eval "python3 fill_ctd_clargs.py --ini_file $ini $cli" > "$ctdtmp"
-        # >&2 echo $ctdtmp
-        # >&2 cat $ctdtmp
-        testtmp=$(mktemp)
-        # >&2 echo CTDConverter galaxy -i $ctdtmp -o $testtmp -s aux/tools_blacklist.txt -f "$FILETYPES" -m macros.xml -t tool.conf  -p aux/hardcoded_params.json --tool-version $VERSION --test-only --test-unsniffable csv tsv txt dta dta2d edta mrm splib --test-condition "compare=sim_size" "delta_frac=0.7"
-        CTDConverter galaxy -i $ctdtmp -o $testtmp -s aux/tools_blacklist.txt -f "$FILETYPES" -m macros.xml -t tool.conf  -p aux/hardcoded_params.json --tool-version $VERSION --test-only --test-unsniffable csv tsv txt dta dta2d edta mrm splib --test-condition "compare=sim_size" "delta_frac=0.7" > /dev/null
-        echo "<!-- $test_id -->"
-        cat $testtmp | grep -v '<output.*file=""' # | grep -v 'CHEMISTRY/'
-
-        rm "$ctdtmp" "$testtmp"
-
-        #> /dev/null
-
-        #rm $testtmp
-    done 
-    echo '</xml>'
-}
-
-#some tests use the same file twice which does not work in planemo tests
-#hence we create symlinks for each file used twice
-function unique_files {
-    line=$@
-    for arg in $@
-    do
-        if [[ ! -f "test-data/$arg" ]]; then
-            continue
-        fi
-        cnt=$(grep -c $arg <<< $(echo "$line" | tr ' ' '\n'))
-        while [[ $cnt -gt 1 ]]; do
-            new_arg=$(echo $arg | sed "s/\(.*\)\./\1_$cnt./")
-            ln -fs $arg test-data/$new_arg
-            line=$(echo $line | sed "s/\($arg.*\)$arg/\1$new_arg/")
-            cnt=$(grep -c $arg <<< $(echo "$line" | tr ' ' '\n'))
-        done
-    done
-
-    echo $line
-}
-
-# options of out_type selects need to be fixed to Galaxy data types
-function fix_out_type {
-    grep "^$1" "$2" | awk '{print $2}'
-}
-
-#OpenMS tests output to tmp files and compare with FuzzyDiff to the expected file.
-#problem: the extension of the tmp files is unusable for test generation.
-#unfortunately the extensions used in the DIFF lines are not always usable for the CLI
-#(e.g. for prepare_test_data, e.g. CLI expects csv but test file is txt)
-#this function replaces the tmp file by the expected file. 
-function fix_tmp_files {
-    # >&2 echo "FIX $line"
-    ret=""
-    for a in $@; do
-        # >&2 echo "    a "$a
-        if [[ ! $a =~ .tmp$ ]] && [[ ! $a =~ _tmp_ ]]; then
-            ret="$ret $a"
-            continue
-        fi
-        diff_line=$(cat $OPENMSGIT/src/tests/topp/CMakeLists.txt $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake | awk '{printf("%s@NEWLINE@", $0)}' | sed 's/)@NEWLINE@/)\n/g' | sed 's/@NEWLINE@/ /g' | grep '\${DIFF}.*'"$a")
-        # >&2 echo "    diff_line "$diff_line
-        in1=$(sed 's/.*-in1 \([^ ]\+\).*/\1/' <<<$diff_line)
-        # >&2 echo "    in1 "$in1
-        if [[  "$a" != "$in1" ]]; then
-            ret="$ret $a"
-            continue
-        fi
-        in2=$(sed 's/.*-in2 \([^ ]\+\).*/\1/' <<<$diff_line)
-        in2=$(basename $in2 | sed 's/)$//')
-        # >&2 echo "    in2 "$in2
-        if [[ -f "test-data/$in2" ]]; then
-            ln -fs "$in1" "test-data/$in2"
-            ret="$ret $in2"
-        else
-            ret="$ret $a"
-        fi
-    done
-#    >&2 echo "--> $ret"
-    echo "$ret"
-}
-
-function link_tmp_files {
-    # note this also considers commented lines (starting with a #)
-    # because of tests where the diff command is commented and we
-    # still want to use the extension of these files
-    cat $OPENMSGIT/src/tests/topp/CMakeLists.txt $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake | sed 's/^\s*//; s/\s*$//' | grep -v "^$"  | awk '{printf("%s@NEWLINE@", $0)}' | sed 's/)@NEWLINE@/)\n/g' | sed 's/@NEWLINE@/ /g' | grep "\${DIFF}" | while read -r line
-    do
-        in1=$(sed 's/.*-in1 \([^ ]\+\).*/\1/' <<<$line)
-        in1=$(basename $in1 | sed 's/)$//')
-        in2=$(sed 's/.*-in2 \([^ ]\+\).*/\1/' <<<$line)
-        in2=$(basename $in2 | sed 's/)$//')
-        if [[ "$in1" == "$in2" ]]; then
-            >&2 echo "not linking equal $in1 $in2"
-            continue
-        fi
-        ln -f -s $in1 test-data/$in2
-    done
-    
-    find test-data/ -name "*.tmp" -print0 | 
-    while IFS= read -r -d '' i; do 
-        if [ ! -e test-data/$(basename $i .tmp) ]; then
-            ln -s $(basename $i) test-data/$(basename $i .tmp)
-        else
-            ln -fs $(basename $i) test-data/$(basename $i .tmp)
-        fi
-    done
-}
-
-
-
-# parse data preparation calls from OpenMS sources for a tool with a given id
-function prepare_test_data {
-#     id=$1
-# | egrep -i "$id\_.*[0-9]+(_prepare\"|_convert)?"
-
-    # TODO SiriusAdapter depends on online service which may timeout .. so keep disabled https://github.com/OpenMS/OpenMS/pull/5010
-    cat $OPENMSGIT/src/tests/topp/CMakeLists.txt  $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake | sed 's/#.*$//'| sed 's/^\s*//; s/\s*$//' | grep -v "^$"  | awk '{printf("%s@NEWLINE@", $0)}' | sed 's/)@NEWLINE@/)\n/g' | sed 's/@NEWLINE@/ /g' | 
-        sed 's/degenerate_cases\///' | 
-        egrep -v "WRITEINI|WRITECTD|INVALIDVALUE|DIFF" | 
-        grep add_test | 
-        egrep "TOPP|UTILS" |
-        sed 's@${DATA_DIR_SHARE}/@@g;'|
-        sed 's@${TMP_RIP_PATH}@./@g'|
-        sed 's@TOFCalibration_ref_masses @TOFCalibration_ref_masses.txt @g; s@TOFCalibration_const @TOFCalibration_const.csv @'| 
-	sed 's/\("TOPP_SiriusAdapter_4".*\)-sirius:database all\(.*\)/\1-sirius:database pubchem\2/' |
-    while read line
-    do
-        test_id=$(echo "$line" | sed 's/add_test(//; s/"//g;  s/)[^)]*$//; s/\${TOPP_BIN_PATH}\///g;s/\${DATA_DIR_TOPP}\///g; s#THIRDPARTY/##g' | cut -d" " -f1)
-
-        if grep -lq "$test_id"'\".* PROPERTIES WILL_FAIL 1' $OPENMSGIT/src/tests/topp/CMakeLists.txt $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake; then
-            >&2 echo "    skip failing "$test_id
-            continue
-        fi
-
-        line=$(echo "$line" | sed 's/add_test("//; s/)[^)]*$//; s/\${TOPP_BIN_PATH}\///g;s/\${DATA_DIR_TOPP}\///g; s#THIRDPARTY/##g' | cut -d" " -f2-)
-        # line="$(fix_tmp_files $line)"
-        echo 'echo executing "'$test_id'"'
-	echo "$line > $test_id.stdout 2> $test_id.stderr"
-        echo "if [[ \"\$?\" -ne \"0\" ]]; then >&2 echo '$test_id failed'; >&2 echo -e \"stderr:\n\$(cat $test_id.stderr | sed 's/^/    /')\"; echo -e \"stdout:\n\$(cat $test_id.stdout)\";fi"    
-    done
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/get_tests.py	Fri Jun 14 21:40:27 2024 +0000
@@ -0,0 +1,344 @@
+#!/usr/bin/env python
+
+import argparse
+import os.path
+import re
+import shlex
+import sys
+import tempfile
+from typing import (
+    Dict,
+    List,
+    Optional,
+    TextIO,
+    Tuple,
+)
+
+from ctdconverter.common.utils import (
+    ParameterHardcoder,
+    parse_hardcoded_parameters,
+    parse_input_ctds,
+)
+from ctdconverter.galaxy.converter import convert_models
+from CTDopts.CTDopts import (
+    CTDModel,
+    ModelTypeError,
+    Parameters,
+)
+
+SKIP_LIST = [
+    r"_prepare\"",
+    r"_convert",
+    r"WRITEINI",
+    r"WRITECTD",
+    r"INVALIDVALUE",
+    r"\.ini\.json",
+    r"OpenSwathMzMLFileCacher .*-convert_back",  # - OpenSwathMzMLFileCacher with -convert_back argument https://github.com/OpenMS/OpenMS/issues/4399
+    r"MaRaClusterAdapter.*-consensus_out",  # - MaRaCluster with -consensus_out (parameter blacklister: https://github.com/OpenMS/OpenMS/issues/4456)
+    r"FileMerger_1_input1.dta2d.*FileMerger_1_input2.dta ",  # - FileMerger with mixed dta dta2d input (ftype can not be specified in the test, dta can not be sniffed)
+    r'^(TOPP_OpenSwathAnalyzer_test_3|TOPP_OpenSwathAnalyzer_test_4)$',  # no  suppert for cached mzML
+    r'TOPP_SiriusAdapter_[0-9]+$',  # Do not test SiriusAdapter https://github.com/OpenMS/OpenMS/issues/7000 .. will be removed anyway
+    r'TOPP_AssayGeneratorMetabo_(7|8|9|10|11|12|13|14|15|16|17|18)$'  # Skip AssayGeneratorMetabo tests using Sirius  https://github.com/OpenMS/OpenMS/issues/7150 (will be replaced by two tools)
+]
+
+
+def get_failing_tests(cmake: List[str]) -> List[str]:
+    failing_tests = []
+    re_fail = re.compile(r"set_tests_properties\(\"([^\"]+)\" PROPERTIES WILL_FAIL 1\)")
+
+    for cmake in args.cmake:
+        with open(cmake) as cmake_fh:
+            for line in cmake_fh:
+                match = re_fail.search(line)
+                if match:
+                    failing_tests.append(match.group(1))
+    return failing_tests
+
+
+def fix_tmp_files(line: str, diff_pairs: Dict[str, str]) -> str:
+    """
+    OpenMS tests output to tmp files and compare with FuzzyDiff to the expected file.
+    problem: the extension of the tmp files is unusable for test generation.
+    unfortunately the extensions used in the DIFF lines are not always usable for the CLI
+    (e.g. for prepare_test_data, e.g. CLI expects csv but test file is txt)
+    this function replaces the tmp file by the expected file.
+    """
+    cmd = shlex.split(line)
+    for i, e in enumerate(cmd):
+        if e in diff_pairs:
+            dst = os.path.join("test-data", diff_pairs[e])
+            if os.path.exists(dst):
+                os.unlink(dst)
+            sys.stderr.write(f"symlink {e} {dst}\n")
+            os.symlink(e, dst)
+            cmd[i] = diff_pairs[e]
+    return shlex.join(cmd)
+
+
+def get_ini(line: str, tool_id: str) -> Tuple[str, str]:
+    """
+    if there is an ini file then we use this to generate the test
+    otherwise the ctd file is used
+    other command line parameters are inserted later into this xml
+    """
+    cmd = shlex.split(line)
+    ini = None
+    for i, e in enumerate(cmd):
+        if e == "-ini":
+            ini = cmd[i + 1]
+            cmd = cmd[:i] + cmd[i + 2:]
+    if ini:
+        return os.path.join("test-data", ini), shlex.join(cmd)
+    else:
+        return os.path.join("ctd", f"{tool_id}.ctd"), line
+
+
+def unique_files(line: str):
+    """
+    some tests use the same file twice which does not work in planemo tests
+    hence we create symlinks for each file used twice
+    """
+    cmd = shlex.split(line)
+    # print(f"{cmd}")
+    files = {}
+    # determine the list of indexes where each file argument (anything appearing in test-data/) appears
+    for idx, e in enumerate(cmd):
+        p = os.path.join("test-data", e)
+        if not os.path.exists(p) and not os.path.islink(p):
+            continue
+        try:
+            files[e].append(idx)
+        except KeyError:
+            files[e] = [idx]
+    # print(f"{files=}")
+    for f in files:
+        if len(files[f]) < 2:
+            continue
+        for i, idx in enumerate(files[f]):
+            f_parts = f.split(".")
+            f_parts[0] = f"{f_parts[0]}_{i}"
+            new_f = ".".join(f_parts)
+            # if os.path.exists(os.path.join("test-data", new_f)):
+            #     os.unlink(os.path.join("test-data", new_f))
+            sys.stderr.write(
+                f'\tsymlink {os.path.join("test-data", new_f)} {f}\n'
+            )
+            try:
+                os.symlink(f, os.path.join("test-data", new_f))
+            except FileExistsError:
+                pass
+            cmd[idx] = new_f
+    return shlex.join(cmd)
+
+
+def fill_ctd_clargs(ini: str, line: str, ctd_tmp: TextIO) -> None:
+    cmd = shlex.split(line)
+
+    # load CTDModel
+    ini_model = None
+    try:
+        ini_model = CTDModel(from_file=ini)
+    except ModelTypeError:
+        pass
+    try:
+        ini_model = Parameters(from_file=ini)
+    except ModelTypeError:
+        pass
+    assert ini_model is not None, "Could not parse %s, seems to be no CTD/PARAMS" % (
+        args.ini_file
+    )
+
+    # get a dictionary of the ctd arguments where the values of the parameters
+    # given on the command line are overwritten
+    ini_values = ini_model.parse_cl_args(cl_args=cmd, ignore_required=True)
+    ini_model.write_ctd(ctd_tmp, ini_values)
+
+
+def process_test_line(
+    id: str,
+    line: str,
+    failing_tests: List[str],
+    skip_list: List[str],
+    diff_pairs: Dict[str, str],
+) -> Optional[str]:
+
+    re_test_id = re.compile(r"add_test\(\"([^\"]+)\" ([^ ]+) (.*)")
+    re_id_out_test = re.compile(r"_out_?[0-9]?")
+
+    # TODO auto extract from  set(OLD_OSW_PARAM ... lin
+    line = line.replace(
+        "${OLD_OSW_PARAM}",
+        " -test -mz_extraction_window 0.05 -mz_extraction_window_unit Th -ms1_isotopes 0 -Scoring:TransitionGroupPicker:compute_peak_quality -Scoring:Scores:use_ms1_mi false -Scoring:Scores:use_mi_score false",
+    )
+
+    line = line.replace("${TOPP_BIN_PATH}/", "")
+    line = line.replace("${DATA_DIR_TOPP}/", "")
+    line = line.replace("THIRDPARTY/", "")
+    line = line.replace("${DATA_DIR_SHARE}/", "")
+    # IDRipper PATH gets empty causing problems. TODO But overall the option needs to be handled differently
+    line = line.replace("${TMP_RIP_PATH}/", "")
+    # some input files are originally in a subdir (degenerated cases/), but not in test-data
+    line = line.replace("degenerate_cases/", "")
+    # determine the test and tool ids and remove the 1) add_test("TESTID" 2) trailing )
+    match = re_test_id.match(line)
+    if not match:
+        sys.exit(f"Ill formated test line {line}\n")
+    test_id = match.group(1)
+    tool_id = match.group(2)
+
+    line = f"{match.group(2)} {match.group(3)}"
+
+    if test_id in failing_tests:
+        sys.stderr.write(f"    skip failing {test_id} {line}\n")
+        return
+
+    if id != tool_id:
+        sys.stderr.write(f"    skip {test_id} ({id} != {tool_id}) {line}\n")
+        return
+
+    if re_id_out_test.search(test_id):
+        sys.stderr.write(f"    skip {test_id} {line}\n")
+        return
+
+    for skip in skip_list:
+        if re.search(skip, line):
+            return
+        if re.search(skip, test_id):
+            return
+
+    line = fix_tmp_files(line, diff_pairs)
+    # print(f"fix {line=}")
+    line = unique_files(line)
+    # print(f"unq {line=}")
+    ini, line = get_ini(line, tool_id)
+
+    from dataclasses import dataclass, field
+
+    @dataclass
+    class CTDConverterArgs:
+        input_files: list
+        output_destination: str
+        default_executable_path: Optional[str] = None
+        hardcoded_parameters: Optional[str] = None
+        parameter_hardcoder: Optional[ParameterHardcoder] = None
+        xsd_location: Optional[str] = None
+        formats_file: Optional[str] = None
+        add_to_command_line: str = ""
+        required_tools_file: Optional[str] = None
+        skip_tools_file: Optional[str] = None
+        macros_files: Optional[List[str]] = field(default_factory=list)
+        test_macros_files: Optional[List[str]] = field(default_factory=list)
+        test_macros_prefix: Optional[List[str]] = field(default_factory=list)
+        test_test: bool = False
+        test_only: bool = False
+        test_unsniffable: Optional[List[str]] = field(default_factory=list)
+        test_condition: Optional[List[str]] = ("compare=sim_size", "delta_frac=0.05")
+        tool_version: str = None
+        tool_profile: str = None
+        bump_file: str = None
+
+    # create an ini/ctd file where the values are equal to the arguments from the command line
+    # and transform it to xml
+    test = [f"<!-- {test_id} -->\n"]
+    with tempfile.NamedTemporaryFile(
+        mode="w+", delete_on_close=False
+    ) as ctd_tmp, tempfile.NamedTemporaryFile(
+        mode="w+", delete_on_close=False
+    ) as xml_tmp:
+        fill_ctd_clargs(ini, line, ctd_tmp)
+        ctd_tmp.close()
+        xml_tmp.close()
+        parsed_ctd = parse_input_ctds(None, [ctd_tmp.name], xml_tmp.name, "xml")
+        ctd_args = CTDConverterArgs(
+            input_files=[ctd_tmp.name],
+            output_destination=xml_tmp.name,
+            macros_files=["macros.xml"],
+            skip_tools_file="aux/tools_blacklist.txt",
+            formats_file="aux/filetypes.txt",
+            # tool_conf_destination = "tool.conf",
+            hardcoded_parameters="aux/hardcoded_params.json",
+            tool_version="3.1",
+            test_only=True,
+            test_unsniffable=[
+                "csv",
+                "tsv",
+                "txt",
+                "dta",
+                "dta2d",
+                "edta",
+                "mrm",
+                "splib",
+            ],
+            test_condition=["compare=sim_size", "delta_frac=0.7"],
+        )
+        ctd_args.parameter_hardcoder = parse_hardcoded_parameters(
+            ctd_args.hardcoded_parameters
+        )
+        convert_models(ctd_args, parsed_ctd)
+        xml_tmp = open(xml_tmp.name, "r")
+        for l in xml_tmp:
+            test.append(l)
+
+    return "".join(test)
+
+
+parser = argparse.ArgumentParser(description="Create Galaxy tests for a OpenMS tools")
+parser.add_argument("--id", dest="id", help="tool id")
+parser.add_argument("--cmake", nargs="+", help="OpenMS test CMake files")
+args = parser.parse_args()
+sys.stderr.write(f"generate tests for {args.id}\n")
+
+re_comment = re.compile("#.*")
+re_empty_prefix = re.compile(r"^\s*")
+re_empty_suffix = re.compile(r"\s*$")
+re_add_test = re.compile(r"add_test\(\"(TOPP|UTILS)_.*/" + args.id)
+re_diff = re.compile(r"\$\{DIFF\}.* -in1 ([^ ]+) -in2 ([^ ]+)")
+failing_tests = get_failing_tests(args.cmake)
+tests = []
+
+# process the given CMake files and compile lists of
+# - test lines .. essentially add_test(...)
+# - and pairs of files that are diffed
+jline = ""
+test_lines = []
+diff_pairs = {}
+for cmake in args.cmake:
+    with open(cmake) as cmake_fh:
+        for line in cmake_fh:
+            # remove comments, empty prefixes and suffixes
+            line = re_comment.sub("", line)
+            line = re_empty_prefix.sub("", line)
+            line = re_empty_suffix.sub("", line)
+            # skip empty lines
+            if line == "":
+                continue
+
+            # join test statements that are split over multiple lines
+            if line.endswith(")"):
+                jline += " " + line[:-1]
+            else:
+                jline = line
+                continue
+            line, jline = jline.strip(), ""
+            match = re_diff.search(line)
+            if match:
+                in1 = match.group(1).split("/")[-1]
+                in2 = match.group(2).split("/")[-1]
+                if in1 != in2:
+                    diff_pairs[in1] = in2
+            elif re_add_test.match(line):
+                test_lines.append(line)
+
+for line in test_lines:
+    test = process_test_line(args.id, line, failing_tests, SKIP_LIST, diff_pairs)
+    if test:
+        tests.append(test)
+
+tests = "\n".join(tests)
+print(
+    f"""
+<xml name="autotest_{args.id}">
+{tests}
+</xml>
+"""
+)
--- a/macros.xml	Thu Dec 01 19:24:40 2022 +0000
+++ b/macros.xml	Fri Jun 14 21:40:27 2024 +0000
@@ -3,19 +3,19 @@
      You can edit this file to add your own macros, if you so desire, or you can
      add additional macro files using the m/macros parameter -->
 <macros>
-  <token name="@TOOL_VERSION@">2.8</token>
+  <token name="@TOOL_VERSION@">3.1</token>
   <token name="@VERSION_SUFFIX@">0</token>
+  <token name="@TEST_DATA_LOCATION@"/>
   <xml name="requirements">
     <requirements>
       <requirement type="package" version="@TOOL_VERSION@">openms</requirement>
       <requirement type="package" version="@TOOL_VERSION@">openms-thirdparty</requirement>
       <!-- omssa (which has been excluded from 3rdparty) and makeblastdb for OMSSAAdapter -->
-      <requirement type="package" version="2.1.9">omssa</requirement>
-      <requirement type="package" version="2.13.0">blast</requirement>
+      <requirement type="package" version="2.14.1">blast</requirement>
       <!--<requirement type="package" version="5.0.0">tpp</requirement>-->
       <!-- for realpath (used e.g. in LuciphorAdapter) -->
 	  <!--<requirement type="package" version="8.25">coreutils</requirement>-->
-	  <requirement type="package" version="1.4">ctdopts</requirement>
+	  <requirement type="package" version="1.5">ctdopts</requirement>
       <yield/>
     </requirements>
   </xml>
@@ -26,6 +26,8 @@
       <regex match="Could not allocate metaspace" level="fatal_oom" description="Java memory Exception"/>
       <regex match="Cannot create VM thread" level="fatal_oom" description="Java memory Exception"/>
       <regex match="qUncompress: could not allocate enough memory to uncompress data" level="fatal_oom" description="Java memory Exception"/>
+      <regex match="OMSSA ran out of RAM" level="fatal_oom" description="Could not allocate memory"/>
+      <regex match="comet' crashed hard (segfault-like). Please check the log." level="fatal_oom" description="Could not allocate memory"/>
     </stdio>
   </xml>
   <xml name="references">
@@ -113,11 +115,11 @@
   </token>
 
 <token name="@EXT_FOO@"><![CDATA[#def oms2gxyext(o)
-    #set m={'txt': 'txt', 'tsv': 'tabular', 'bioml': 'xml', 'consensusXML': 'consensusxml', 'csv': 'csv', 'dta': 'dta', 'dta2d': 'dta2d', 'edta': 'edta', 'fa': 'fasta', 'fas': 'fasta', 'fasta': 'fasta', 'FASTA': 'fasta', 'featureXML': 'featurexml', 'featurexml': 'featurexml', 'html': 'html', 'HTML': 'html', 'idXML': 'idxml', 'json': 'json', 'kroenik': 'kroenik', 'mascotXML': 'mascotxml', 'mgf': 'mgf', 'mrm': 'mrm', 'ms': 'sirius.ms', 'ms2': 'ms2', 'msp': 'msp', 'mzData': 'mzdata', 'mzid': 'mzid', 'mzML': 'mzml', 'mzml': 'mzml', 'mzq': 'mzq', 'mzQC': 'mzqc', 'mzTab': 'mztab', 'mzXML': 'mzxml', 'novor': 'txt', 'obo': 'obo', 'oms': 'sqlite', 'omssaXML': 'idxml', 'osw': 'osw', 'OSW': 'osw', 'params': 'txt', 'paramXML': 'paramxml', 'peplist': 'peplist', 'pep.xml': 'pepxml', 'pepXML': 'pepxml', 'png': 'png', 'PNG': 'png', 'protXML': 'protxml', 'psms': 'psms', 'pqp': 'pqp', 'qcML': 'qcml', 'spec.xml': 'spec.xml', 'splib': 'splib', 'sqMass': 'sqmass', 'tandem.xml': 'tandem', 'trafoXML': 'trafoxml', 'traML': 'traml', 'TraML': 'traml', 'tab': 'tabular', 'raw': 'thermo.raw', 'xls': 'tsv', 'XML': 'xml', 'xml': 'xml', 'xquest.xml': 'xquest.xml', 'xsd': 'xml'}
+    #set m={'txt': 'txt', 'tsv': 'tabular', 'bioml': 'xml', 'consensusXML': 'consensusxml', 'csv': 'csv', 'dta': 'dta', 'dta2d': 'dta2d', 'edta': 'edta', 'fa': 'fasta', 'fas': 'fasta', 'fasta': 'fasta', 'FASTA': 'fasta', 'featureXML': 'featurexml', 'featurexml': 'featurexml', 'html': 'html', 'HTML': 'html', 'idXML': 'idxml', 'json': 'json', 'kroenik': 'kroenik', 'mascotXML': 'mascotxml', 'mgf': 'mgf', 'mrm': 'mrm', 'ms': 'sirius.ms', 'ms2': 'ms2', 'msp': 'msp', 'mzData': 'mzdata', 'mzid': 'mzid', 'mzML': 'mzml', 'mzml': 'mzml', 'mzq': 'mzq', 'mzQC': 'mzqc', 'mzTab': 'mztab', 'mzXML': 'mzxml', 'novor': 'txt', 'obo': 'obo', 'oms': 'sqlite', 'omssaXML': 'idxml', 'osw': 'osw', 'OSW': 'osw', 'params': 'txt', 'paramXML': 'paramxml', 'peplist': 'peplist', 'pep.xml': 'pepxml', 'pepXML': 'pepxml', 'png': 'png', 'PNG': 'png', 'protXML': 'protxml', 'psms': 'psms', 'pqp': 'pqp', 'qcML': 'qcml', 'spec.xml': 'spec.xml', 'splib': 'splib', 'sqMass': 'sqmass', 'tandem.xml': 'tandem', 'trafoXML': 'trafoxml', 'traML': 'traml', 'TraML': 'traml', 'tab': 'tabular', 'raw': 'thermo.raw', 'xls': 'tsv', 'XML': 'xml', 'xml': 'xml', 'xquest.xml': 'xquest.xml', 'xsd': 'xsd', 'zip': 'zip'}
     #return m[o]
 #end def
 #def gxy2omsext(g)
-    #set m={'txt': 'txt', 'tabular': 'tsv', 'xml': 'bioml', 'consensusxml': 'consensusXML', 'csv': 'csv', 'dta': 'dta', 'dta2d': 'dta2d', 'edta': 'edta', 'fasta': 'fa', 'featurexml': 'featureXML', 'html': 'html', 'idxml': 'idXML', 'json': 'json', 'kroenik': 'kroenik', 'mascotxml': 'mascotXML', 'mgf': 'mgf', 'mrm': 'mrm', 'sirius.ms': 'ms', 'ms2': 'ms2', 'msp': 'msp', 'mzdata': 'mzData', 'mzid': 'mzid', 'mzml': 'mzML', 'mzq': 'mzq', 'mzqc': 'mzQC', 'mztab': 'mzTab', 'mzxml': 'mzXML', 'obo': 'obo', 'sqlite': 'oms', 'osw': 'osw', 'paramxml': 'paramXML', 'peff': 'fasta', 'peplist': 'peplist', 'pepxml': 'pep.xml', 'png': 'png', 'protxml': 'protXML', 'psms': 'psms', 'pqp': 'pqp', 'qcml': 'qcML', 'spec.xml': 'spec.xml', 'splib': 'splib', 'sqmass': 'sqMass', 'tandem': 'tandem.xml', 'trafoxml': 'trafoXML', 'traml': 'traML', 'thermo.raw': 'raw', 'tsv': 'xls', 'xquest.xml': 'xquest.xml'}
+    #set m={'txt': 'txt', 'tabular': 'tsv', 'xml': 'bioml', 'consensusxml': 'consensusXML', 'csv': 'csv', 'dta': 'dta', 'dta2d': 'dta2d', 'edta': 'edta', 'fasta': 'fa', 'featurexml': 'featureXML', 'html': 'html', 'idxml': 'idXML', 'json': 'json', 'kroenik': 'kroenik', 'mascotxml': 'mascotXML', 'mgf': 'mgf', 'mrm': 'mrm', 'sirius.ms': 'ms', 'ms2': 'ms2', 'msp': 'msp', 'mzdata': 'mzData', 'mzid': 'mzid', 'mzml': 'mzML', 'mzq': 'mzq', 'mzqc': 'mzQC', 'mztab': 'mzTab', 'mzxml': 'mzXML', 'obo': 'obo', 'sqlite': 'oms', 'osw': 'osw', 'paramxml': 'paramXML', 'peff': 'fasta', 'peplist': 'peplist', 'pepxml': 'pep.xml', 'png': 'png', 'protxml': 'protXML', 'psms': 'psms', 'pqp': 'pqp', 'qcml': 'qcML', 'spec.xml': 'spec.xml', 'splib': 'splib', 'sqmass': 'sqMass', 'tandem': 'tandem.xml', 'trafoxml': 'trafoXML', 'traml': 'traML', 'thermo.raw': 'raw', 'tsv': 'xls', 'xquest.xml': 'xquest.xml', 'xsd': 'xsd', 'zip': 'zip'}
     #return m[g]
 #end def
 ]]></token></macros>
--- a/prepare_test_data_manual.sh	Thu Dec 01 19:24:40 2022 +0000
+++ b/prepare_test_data_manual.sh	Fri Jun 14 21:40:27 2024 +0000
@@ -1,8 +1,3 @@
-MSSimulator -test -in DecoyDatabase_1.fasta -out MSsimulator.mzml -algorithm:RandomNumberGenerators:biological reproducible -algorithm:RandomNumberGenerators:technical reproducible > MSSimulator_1.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'MSSimulator_1 failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
-MSSimulator -test -in DecoyDatabase_1.fasta -out MSsimulator_MALDI.mzml -algorithm:RandomNumberGenerators:biological reproducible -algorithm:RandomNumberGenerators:technical reproducible -algorithm:MSSim:Global:ionization_type MALDI > MSSimulator_2.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'MSSimulator_2 failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
 ClusterMassTracesByPrecursor -test -in_ms1 ConsensusMapNormalizer_input.consensusXML -in_swath ConsensusMapNormalizer_input.consensusXML -out ClusterMassTracesByPrecursor.mzml > ClusterMassTracesByPrecursor.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'ClusterMassTracesByPrecursor failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
@@ -13,8 +8,7 @@
 CVInspector -test -cv_files CHEMISTRY/XLMOD.obo -cv_names XLMOD -mapping_file MAPPING/ms-mapping.xml -html CVInspector.html > CVInspector.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'CVInspector failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-DeMeanderize -test -in MSsimulator_MALDI.mzml -out DeMeanderize.mzml > DeMeanderize.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'DeMeanderize failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
+# TODO DeMeanderize
 
 # TODO DigestorMotif
 
@@ -30,26 +24,17 @@
 if [[ "$?" -ne "0" ]]; then >&2 echo 'FeatureFinderIsotopeWavelet failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
 
-FFEval -test -in  FeatureFinderCentroided_1_output.featureXML -truth  FeatureFinderCentroided_1_output.featureXML -out  FFEval.featureXML -out_roc FFEval_roc.csv  > FFEval.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'FFEval failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
 # TODO? deprecated IDDecoyProbability
 
 IDExtractor -test -in MSGFPlusAdapter_1_out.idXML -best_hits -number_of_peptides  1 -out  IDExtractor.idXML   > IDExtractor.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'IDExtractor failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-LabeledEval -test -in  FeatureLinkerLabeled_1_input.featureXML -truth  FeatureLinkerLabeled_1_output.consensusXML> LabeledEval.txt > LabeledEval.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'LabeledEval failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
 MapStatistics -test -in SiriusAdapter_3_input.featureXML -out MapStatistics.txt > MapStatistics_1.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'MapStatistics_1 failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
 MapStatistics -test -in ConsensusXMLFile_1.consensusXML -out MapStatistics2.txt > MapStatistics_2.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'MapStatistics_2 failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-MetaboliteAdductDecharger -test -in Decharger_input.featureXML -out_cm MetaboliteAdductDecharger_cm.consensusXML -out_fm MetaboliteAdductDecharger_fm.featureXML -outpairs MetaboliteAdductDecharger_pairs.consensusXML > MetaboliteAdductDecharger.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'MetaboliteAdductDecharger failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
 MetaboliteSpectralMatcher -test -in spectra.mzML -database MetaboliteSpectralDB.mzML -out MetaboliteSpectralMatcher.mzTab > MetaboliteSpectralMatcher.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'MetaboliteSpectralMatcher failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
@@ -68,20 +53,6 @@
 OpenSwathRewriteToFeatureXML -featureXML OpenSwathFeatureXMLToTSV_input.featureXML -out OpenSwathRewriteToFeatureXML.featureXML > OpenSwathRewriteToFeatureXML.stdout 2> stderr
 # if [[ "$?" -ne "0" ]]; then >&2 echo 'OpenSwathRewriteToFeatureXML failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-# adapted from the commented tests in OpenMS TODO may be removed later https://github.com/OpenMS/OpenMS/issues/4719
-FileConverter -in PepNovo.mzXML -out PepNovo_1.mzML > /dev/null 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'FileConverter failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
-PepNovoAdapter -ini PepNovoAdapter_1_parameters.ini -in PepNovo_1.mzML -out PepNovoAdapter_3_output.idXML -model_directory pepnovo_models/ -pepnovo_executable pepnovo > PepNovo_1.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'PhosphoScoring failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
-FileConverter -in PepNovo.mzData -out PepNovo_4.mzML > /dev/null 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'FileConverter failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-PepNovoAdapter -ini PepNovoAdapter_1_parameters.ini -in PepNovo_4.mzML -out PepNovoAdapter_4_output.idXML -model_directory pepnovo_models/ -pepnovo_executable pepnovo > PepNovo_1.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'PhosphoScoring failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
-#PepNovoAdapter -ini PepNovoAdapter_5_parameters.ini -in PepNovoAdapter_5_output.pepnovo_out -out PepNovoAdapter_5_output.idXML -model_directory pepnovo_models/ 
-
 # TODO PhosphoScoring 
 PhosphoScoring -in spectra.mzML -id MSGFPlusAdapter_1_out1.tmp -out PhosphoScoring.idxml > PhosphoScoring.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'PhosphoScoring failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
@@ -118,9 +89,6 @@
 RNPxlXICFilter -test -control FileFilter_1_input.mzML -treatment FileFilter_1_input.mzML -out RNPxlXICFilter.mzML > RNPxlXICFilter.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'RNPxlXICFilter failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-RTEvaluation -in PeptideIndexer_1.idXML -out RTEvaluation.tsv > RTEvaluation.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'RTEvaluation failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
 SemanticValidator -test -in FileFilter_1_input.mzML -mapping_file MAPPING/ms-mapping.xml > SemanticValidator.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'SemanticValidator failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
@@ -152,13 +120,8 @@
 SpectraFilterThresholdMower -test -in  SpectraFilterSqrtMower_1_input.mzML -out  SpectraFilterThresholdMower.mzML > SpectraFilterThresholdMower.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'SpectraFilterThresholdMower failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-SpectraMerger -test -in NovorAdapter_in.mzML -out SpectraMerger_1.mzML > SpectraMerger.stdout 2> stderr
+SpectraMerger -test -in NovorAdapter_in.mzML -out SpectraMerger_1.mzML -algorithm:average_gaussian:ms_level 2 > SpectraMerger.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'SpectraMerger failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
 
-# TODO SvmTheoreticalSpectrumGeneratorTrainer
-
-TransformationEvaluation -test -in FileInfo_16_input.trafoXML -out TransformationEvaluation.trafoXML > TransformationEvaluation.stdout 2> stderr
-if [[ "$?" -ne "0" ]]; then >&2 echo 'TransformationEvaluation failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
-
 XMLValidator -test -in FileFilter_1_input.mzML > XMLValidator.stdout 2> stderr
 if [[ "$?" -ne "0" ]]; then >&2 echo 'XMLValidator failed'; >&2 echo -e "stderr:\n$(cat stderr | sed 's/^/    /')"; fi
--- a/readme.md	Thu Dec 01 19:24:40 2022 +0000
+++ b/readme.md	Fri Jun 14 21:40:27 2024 +0000
@@ -11,7 +11,7 @@
  * https://www.openms.de/
 
 The wrappers for these tools and most of their tests are automatically
-generated using the `generate.sh` script. The generation of the tools is
+generated using the `./aux/generate.sh` script. The generation of the tools is
 based on the CTDConverter (https://github.com/WorkflowConversion/CTDConverter)
 which can be fine tuned via the `hardcoded_params.json` file. This file allows
 to blacklist and hardcode parameters and to modify or set arbitrary
@@ -129,15 +129,6 @@
 Open problems
 =============
 
-Some tools stall in CI testing using `--biocontainers` which is why the OpenMS
-tools are currently listed in `.tt_biocontainer_skip`. This is
-
-- AssayGeneratorMetabo and SiriusAdapter (both depend on sirius)
-- OMSSAAdapter
-
-Using `docker -t` seems to solve the problem (see
-https://github.com/galaxyproject/galaxy/issues/10153).
-
 Licence (MIT)
 =============
 
--- a/test-data.sh	Thu Dec 01 19:24:40 2022 +0000
+++ b/test-data.sh	Fri Jun 14 21:40:27 2024 +0000
@@ -1,11 +1,10 @@
 #!/usr/bin/env bash
 
-VERSION=2.8
+# set -x
+
+VERSION=3.1
 FILETYPES="aux/filetypes.txt"
-CONDAPKG="https://anaconda.org/bioconda/openms/2.8.0/download/linux-64/openms-2.8.0-h7ca0330_0.tar.bz2"
-
-# import the magic
-. ./generate-foo.sh
+CONDAPKG="https://anaconda.org/bioconda/openms/3.1.0/download/linux-64/openms-3.1.0-h8964181_1.tar.bz2"
 
 # install conda
 if [ -z "$tmp" ]; then
@@ -45,17 +44,20 @@
 
 echo "Clone OpenMS $VERSION sources"
 if [[ ! -d $OPENMSGIT ]]; then
-    # TODO >2.8 reenable original release branch .. also in else branch
-    # the plus branch contains commits from https://github.com/OpenMS/OpenMS/pull/5920 and https://github.com/OpenMS/OpenMS/pull/5917
-    # git clone -b release/$VERSION.0 https://github.com/OpenMS/OpenMS.git $OPENMSGIT
-    git clone -b release/$VERSION.0-plus https://github.com/bernt-matthias/OpenMS.git $OPENMSGIT
-    cd $OPENMSGIT
-    git submodule init
-    git submodule update
-    cd -
+    if [[ "$created" == "yes" ]]; then
+        GIT_DIR=$(mktemp -d --dry-run)
+        GIT_EXTRA_OPTS="--separate-git-dir=$GIT_DIR"
+    fi
+    git clone -b release/$VERSION.0 --depth 1 --recurse-submodules=THIRDPARTY --shallow-submodules $GIT_EXTRA_OPTS https://github.com/OpenMS/OpenMS.git $OPENMSGIT
+    ## save some space by just keeping the needed binaries
+    find $OPENMSGIT/THIRDPARTY/ -type f -not \( -name maracluster -o -name spectrast \) -delete
+    find $OPENMSGIT/THIRDPARTY/ -empty -type d -delete
+    if [[ "$created" == "yes" ]]; then
+        rm -rf $GIT_DIR
+    fi
 else
     cd $OPENMSGIT
-    git pull origin release/$VERSION.0-plus
+    git pull origin release/$VERSION.0
     cd -
 fi
 
@@ -65,7 +67,7 @@
 if conda env list | grep "$OPENMSENV"; then
     true
 else
-    conda create -y --quiet --override-channels --channel iuc --channel conda-forge --channel bioconda --channel defaults -n $OPENMSENV openms=$VERSION openms-thirdparty=$VERSION omssa=2.1.9 ctdopts=1.5 lxml
+    conda create -y --quiet --solver libmamba --override-channels --strict-channel-priority --channel conda-forge --channel bioconda -n $OPENMSENV openms=$VERSION openms-thirdparty=$VERSION ctdopts=1.5 lxml
 # chmod -R u-w $OPENMSENV 
 fi
 ###############################################################################
@@ -101,33 +103,34 @@
 conda deactivate
 
 
-# ###############################################################################
-# ## copy all the test data files to test-data
-# ## most of it (outputs) will be overwritten later, but its needed for
-# ## prepare_test_data
-# ###############################################################################
+# # ###############################################################################
+# # ## copy all the test data files to test-data
+# # ## most of it (outputs) will be overwritten later, but its needed for
+# # ## prepare_test_data
+# # ###############################################################################
 echo "Get test data"
-find test-data -type f,l,d ! -name "*fa" ! -name "*loc" ! -name "test-data" -delete
+find test-data -type f,l,d ! -name "*fa" ! -name "*loc" ! -name "test-data" ! -name MetaboliteSpectralDB.mzML -delete
 
 cp $(find $OPENMSGIT/src/tests/topp/ -type f | grep -Ev "third_party_tests.cmake|CMakeLists.txt|check_ini") test-data/
 cp -r $OPENMSGIT/share/OpenMS/MAPPING/ test-data/
 cp -r $OPENMSGIT/share/OpenMS/CHEMISTRY test-data/
 cp -r $OPENMSGIT/share/OpenMS/examples/ test-data/
 if [ ! -f test-data/MetaboliteSpectralDB.mzML ]; then 
-    wget -nc https://abibuilder.cs.uni-tuebingen.de/archive/openms/Tutorials/Data/latest/Example_Data/Metabolomics/databases/MetaboliteSpectralDB.mzML
+    wget -nc https://raw.githubusercontent.com/sneumann/OpenMS/master/share/OpenMS/CHEMISTRY/MetaboliteSpectralDB.mzML
+    # wget -nc https://abibuilder.cs.uni-tuebingen.de/archive/openms/Tutorials/Data/latest/Example_Data/Metabolomics/databases/MetaboliteSpectralDB.mzML
     mv MetaboliteSpectralDB.mzML test-data/
 fi
 ln -fs TOFCalibration_ref_masses test-data/TOFCalibration_ref_masses.txt
 ln -fs TOFCalibration_const test-data/TOFCalibration_const.csv
 
-if [ ! -d test-data/pepnovo_models/ ]; then
-    mkdir -p /tmp/pepnovo
-    wget -nc http://proteomics.ucsd.edu/Software/PepNovo/PepNovo.20120423.zip
-    unzip PepNovo.20120423.zip -d /tmp/pepnovo/
-    mv /tmp/pepnovo/Models test-data/pepnovo_models/
-    rm PepNovo.20120423.zip
-    rm -rf /tmp/pepnovo
-fi
+# if [ ! -d test-data/pepnovo_models/ ]; then
+#     mkdir -p /tmp/pepnovo
+#     wget -nc http://proteomics.ucsd.edu/Software/PepNovo/PepNovo.20120423.zip
+#     unzip PepNovo.20120423.zip -d /tmp/pepnovo/
+#     mv /tmp/pepnovo/Models test-data/pepnovo_models/
+#     rm PepNovo.20120423.zip
+#     rm -rf /tmp/pepnovo
+# fi
 ###############################################################################
 ## generate ctd files using the binaries in the conda package 
 ###############################################################################
@@ -170,6 +173,47 @@
 ###############################################################################
 ## create script to create results for the tests and run it
 ###############################################################################
+# parse data preparation calls from OpenMS sources for a tool with a given id
+function prepare_test_data {
+#     id=$1
+# | egrep -i "$id\_.*[0-9]+(_prepare\"|_convert)?"
+
+    OLD_OSW_PARAM=$(cat $OPENMSGIT/src/tests/topp/CMakeLists.txt |sed 's/#.*$//'| sed 's/^\s*//; s/\s*$//' |awk '{printf("%s@NEWLINE@", $0)}' |  sed 's/)@NEWLINE@/)\n/g' | sed 's/@NEWLINE@/ /g' | grep OLD_OSW_PARAM | head -n 1 | sed 's/^[^"]\+//; s/)$//; s/"//g')
+    # TODO SiriusAdapter depends on online service which may timeout .. so keep disabled https://github.com/OpenMS/OpenMS/pull/5010
+    cat $OPENMSGIT/src/tests/topp/CMakeLists.txt  $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake |
+        sed "s/\${OLD_OSW_PARAM}/$OLD_OSW_PARAM/" |
+        grep -v "\.ini\.json" |
+        sed 's/.ini.json /ini /' | 
+        sed 's/#.*$//'| 
+        sed 's/^\s*//; s/\s*$//' | 
+        grep -v "^$"  | 
+        awk '{printf("%s@NEWLINE@", $0)}' | 
+        sed 's/)@NEWLINE@/)\n/g' | sed 's/@NEWLINE@/ /g' | 
+        sed 's/degenerate_cases\///' | 
+        egrep -v "WRITEINI|WRITECTD|INVALIDVALUE|DIFF" | 
+        grep add_test | 
+        egrep "TOPP|UTILS" |
+        sed 's@${DATA_DIR_SHARE}/@@g;'|
+        sed 's@${TMP_RIP_PATH}@./@g'|
+        sed 's@TOFCalibration_ref_masses @TOFCalibration_ref_masses.txt @g; s@TOFCalibration_const @TOFCalibration_const.csv @'| 
+	sed 's/\("TOPP_SiriusAdapter_4".*\)-sirius:database all\(.*\)/\1-sirius:database pubchem\2/' |
+    while read line
+    do
+        test_id=$(echo "$line" | sed 's/add_test(//; s/"//g;  s/)[^)]*$//; s/\${TOPP_BIN_PATH}\///g;s/\${DATA_DIR_TOPP}\///g; s#THIRDPARTY/##g' | cut -d" " -f1)
+
+        if grep -lq "$test_id"'\".* PROPERTIES WILL_FAIL 1' $OPENMSGIT/src/tests/topp/CMakeLists.txt $OPENMSGIT/src/tests/topp/THIRDPARTY/third_party_tests.cmake; then
+            >&2 echo "    skip failing "$test_id
+            continue
+        fi
+
+        line=$(echo "$line" | sed 's/add_test("//; s/)[^)]*$//; s/\${TOPP_BIN_PATH}\///g;s/\${DATA_DIR_TOPP}\///g; s#THIRDPARTY/##g' | cut -d" " -f2-)
+        # line="$(fix_tmp_files $line)"
+        echo 'echo executing "'$test_id'"'
+        echo "$line > $test_id.stdout 2> $test_id.stderr"
+        echo "if [[ \"\$?\" -ne \"0\" ]]; then >&2 echo '$test_id failed'; >&2 echo -e \"stderr:\n\$(cat $test_id.stderr | sed 's/^/    /')\"; echo -e \"stdout:\n\$(cat $test_id.stdout)\";fi"    
+    done
+}
+
 echo "Create test shell script"
 
 echo -n "" > prepare_test_data.sh
@@ -180,31 +224,19 @@
 echo 'export LUCIPHOR_BINARY="$(dirname $(realpath $(which luciphor2)))/luciphor2.jar"' >> prepare_test_data.sh
 
 echo 'export MARACLUSTER_BINARY="'"$OPENMSGIT"'/THIRDPARTY/Linux/64bit/MaRaCluster/maracluster"'>> prepare_test_data.sh
-echo 'export MSFRAGGER_BINARY="/home/berntm/Downloads/MSFragger-20171106/MSFragger-20171106.jar"'>> prepare_test_data.sh
+echo 'export MSFRAGGER_BINARY="/home/berntm/Downloads/MSFragger-3.5/MSFragger-3.5.jar"'>> prepare_test_data.sh
 echo 'export MSGFPLUS_BINARY="$(msgf_plus -get_jar_path)"' >> prepare_test_data.sh
 echo 'export MYRIMATCH_BINARY="myrimatch"'>> prepare_test_data.sh
 echo 'export NOVOR_BINARY="/home/berntm/Downloads/novor/lib/novor.jar"' >> prepare_test_data.sh
-echo 'export OMSSA_BINARY="$(dirname $(realpath $(which omssacl)))/omssacl"'>> prepare_test_data.sh
 echo 'export PERCOLATOR_BINARY="percolator"'>> prepare_test_data.sh
 echo 'export SIRIUS_BINARY="$(which sirius)"' >> prepare_test_data.sh
 echo 'export SPECTRAST_BINARY="'"$OPENMSGIT"'/THIRDPARTY/Linux/64bit/SpectraST/spectrast"' >> prepare_test_data.sh
 echo 'export XTANDEM_BINARY="xtandem"' >> prepare_test_data.sh
 echo 'export THERMORAWFILEPARSER_BINARY="ThermoRawFileParser.exe"' >> prepare_test_data.sh
+echo 'export SAGE_BINARY=sage' >> prepare_test_data.sh
 
 prepare_test_data >> prepare_test_data.sh #tmp_test_data.sh
 
-## prepare_test_data > tmp_test_data.sh
-## # remove calls not needed for the tools listed in any .list file
-## echo LIST $LIST
-## if [ ! -z "$LIST" ]; then
-##     REX=$(echo $LIST | sed 's/ /\n/g' | sed 's@.*/\([^/]\+\).xml$@\1@' | tr '\n' '|' | sed 's/|$//')
-## else
-##     REX=".*"
-## fi
-## echo REX $REX
-## cat tmp_test_data.sh | egrep "($REX)" >> prepare_test_data.sh
-## rm tmp_test_data.sh
-
 echo "Execute test shell script"
 chmod u+x prepare_test_data.sh
 cd ./test-data || exit
@@ -234,14 +266,11 @@
 for i in $(ls ctd/*ctd)
 do
     b=$(basename "$i" .ctd)
-    get_tests2 "$b" >> "$autotests"
+    ./get_tests.py --id "$b" --cmake "$OPENMSGIT"/src/tests/topp/CMakeLists.txt "$OPENMSGIT"/src/tests/topp/THIRDPARTY/third_party_tests.cmake >> "$autotests"
+    wc -l "$autotests"
 done
 echo "</macros>" >> "$autotests"
 
-# echo "Create test data links"
-# Breaks DecoyDatabase
-# link_tmp_files
-
 # tests for tools using output_prefix parameters can not be auto generated
 # hence we output the tests for manual curation in macros_test.xml
 # and remove them from the autotests
@@ -255,6 +284,7 @@
 #
 # not able to specify composite test data  
 # -> SpectraSTSearchAdapter 
+echo "Discard some tests"
 if [[ ! -z "$1" ]]; then
     echo "" > macros_discarded_auto.xml
     for i in OpenSwathFileSplitter IDRipper MzMLSplitter SeedListGenerator MSFraggerAdapter MaRaClusterAdapter NovorAdapter SpectraSTSearchAdapter
@@ -272,7 +302,7 @@
 ## remove broken symlinks in test-data
 find test-data/ -xtype l -delete
 
-if [ ! -z "$created" ]; then
+if [[ "$created" == "yes" ]]; then
     echo "Removing temporary directory"
     rm -rf "$tmp"
 fi