Mercurial > repos > muon-spectroscopy-computational-project > larch_artemis
changeset 5:7acb53ffb96f draft
planemo upload for repository https://github.com/MaterialsGalaxy/larch-tools/tree/main/larch_artemis commit 4814f53888643f1d3667789050914675fffb7d59
author | muon-spectroscopy-computational-project |
---|---|
date | Fri, 23 Aug 2024 13:46:13 +0000 |
parents | 39ab361e6d59 |
children | d17c5d62802f |
files | common.py larch_artemis.py larch_artemis.xml test-data/fit_report.txt test-data/gds.csv test-data/sp.csv |
diffstat | 6 files changed, 233 insertions(+), 159 deletions(-) [+] |
line wrap: on
line diff
--- a/common.py Mon Jun 17 13:54:30 2024 +0000 +++ b/common.py Fri Aug 23 13:46:13 2024 +0000 @@ -11,8 +11,6 @@ group_keys = list(athena_group.keys()) if key is None: key = group_keys[0] - else: - key = key.replace("-", "_") try: return extract_athenagroup(athena_group.groups[key]) @@ -20,7 +18,7 @@ raise KeyError(f"{key} not in {group_keys}") from e -def read_all_groups(dat_file: str, key: str = None) -> "dict[str, Group]": +def read_all_groups(dat_file: str) -> "dict[str, Group]": # Cannot rely on do_ABC as _larch is None athena_group = read_athena( dat_file, @@ -40,14 +38,20 @@ def read_group(dat_file: str, key: str = None): + if key: + match_ = key.replace(" ", "_").replace("-", "_").replace(".", "_") + else: + match_ = None + # Cannot rely on do_ABC as _larch is None athena_group = read_athena( dat_file, + match=match_, do_preedge=False, do_bkg=False, do_fft=False, ) - group = get_group(athena_group, key) + group = get_group(athena_group, match_) pre_edge_with_defaults(group=group) xftf_with_defaults(group=group) return group
--- a/larch_artemis.py Mon Jun 17 13:54:30 2024 +0000 +++ b/larch_artemis.py Fri Aug 23 13:46:13 2024 +0000 @@ -45,7 +45,7 @@ try: gds_val = float(data_dict[par_idx]["value"]) except ValueError: - gds_val = 0.00 + continue gds_expr = data_dict[par_idx]["expr"] gds_vary = ( True @@ -68,74 +68,50 @@ return dgs_group -def plot_rmr(path: str, data_set, rmin, rmax): +def plot_rmr(path: str, datasets: list, rmin, rmax): plt.figure() - plt.plot(data_set.data.r, data_set.data.chir_mag, color="b") - plt.plot(data_set.data.r, data_set.data.chir_re, color="b", label="expt.") - plt.plot(data_set.model.r, data_set.model.chir_mag, color="r") - plt.plot(data_set.model.r, data_set.model.chir_re, color="r", label="fit") - plt.ylabel( - "Magnitude of Fourier Transform of " - r"$k^2 \cdot \chi$/$\mathrm{\AA}^{-3}$" - ) - plt.xlabel(r"Radial distance/$\mathrm{\AA}$") - plt.xlim(0, 5) + for i, dataset in enumerate(datasets): + plt.subplot(len(datasets), 1, i + 1) + data = dataset.data + model = dataset.model + plt.plot(data.r, data.chir_mag, color="b") + plt.plot(data.r, data.chir_re, color="b", label="expt.") + plt.plot(model.r, model.chir_mag, color="r") + plt.plot(model.r, model.chir_re, color="r", label="fit") + plt.ylabel( + "Magnitude of Fourier Transform of " + r"$k^2 \cdot \chi$/$\mathrm{\AA}^{-3}$" + ) + plt.xlabel(r"Radial distance/$\mathrm{\AA}$") + plt.axvspan(xmin=rmin, xmax=rmax, color="g", alpha=0.1) + plt.legend() - plt.fill( - [rmin, rmin, rmax, rmax], - [-rmax, rmax, rmax, -rmax], - color="g", - alpha=0.1, - ) - plt.text(rmax - 0.65, -rmax + 0.5, "fit range") - plt.legend() plt.savefig(path, format="png") plt.close("all") -def plot_chikr(path: str, data_set, rmin, rmax, kmin, kmax): +def plot_chikr(path: str, datasets, rmin, rmax, kmin, kmax): fig = plt.figure(figsize=(16, 4)) - ax1 = fig.add_subplot(121) - ax2 = fig.add_subplot(122) - ax1.plot( - data_set.data.k, - data_set.data.chi * data_set.data.k**2, - color="b", - label="expt.", - ) - ax1.plot( - data_set.model.k, - data_set.model.chi * data_set.data.k**2, - color="r", - label="fit", - ) - ax1.set_xlim(0, 15) - ax1.set_xlabel(r"$k (\mathrm{\AA})^{-1}$") - ax1.set_ylabel(r"$k^2$ $\chi (k)(\mathrm{\AA})^{-2}$") + for i, dataset in enumerate(datasets): + data = dataset.data + model = dataset.model + ax1 = fig.add_subplot(len(datasets), 2, 2*i + 1) + ax2 = fig.add_subplot(len(datasets), 2, 2*i + 2) + ax1.plot(data.k, data.chi * data.k**2, color="b", label="expt.") + ax1.plot(model.k, model.chi * data.k**2, color="r", label="fit") + ax1.set_xlabel(r"$k (\mathrm{\AA})^{-1}$") + ax1.set_ylabel(r"$k^2$ $\chi (k)(\mathrm{\AA})^{-2}$") + ax1.axvspan(xmin=kmin, xmax=kmax, color="g", alpha=0.1) + ax1.legend() - ax1.fill( - [kmin, kmin, kmax, kmax], - [-rmax, rmax, rmax, -rmax], - color="g", - alpha=0.1, - ) - ax1.text(kmax - 1.65, -rmax + 0.5, "fit range") - ax1.legend() + ax2.plot(data.r, data.chir_mag, color="b", label="expt.") + ax2.plot(model.r, model.chir_mag, color="r", label="fit") + ax2.set_xlim(0, 5) + ax2.set_xlabel(r"$R(\mathrm{\AA})$") + ax2.set_ylabel(r"$|\chi(R)|(\mathrm{\AA}^{-3})$") + ax2.legend(loc="upper right") + ax2.axvspan(xmin=rmin, xmax=rmax, color="g", alpha=0.1) - ax2.plot(data_set.data.r, data_set.data.chir_mag, color="b", label="expt.") - ax2.plot(data_set.model.r, data_set.model.chir_mag, color="r", label="fit") - ax2.set_xlim(0, 5) - ax2.set_xlabel(r"$R(\mathrm{\AA})$") - ax2.set_ylabel(r"$|\chi(R)|(\mathrm{\AA}^{-3})$") - ax2.legend(loc="upper right") - - ax2.fill( - [rmin, rmin, rmax, rmax], - [-rmax, rmax, rmax, -rmax], - color="g", - alpha=0.1, - ) - ax2.text(rmax - 0.65, -rmax + 0.5, "fit range") fig.savefig(path, format="png") plt.close("all") @@ -149,22 +125,31 @@ def read_selected_paths_list(file_name): sp_dict = read_csv_data(file_name) sp_list = [] - for path_id in sp_dict: - filename = sp_dict[path_id]["filename"] + for path_dict in sp_dict.values(): + filename = path_dict["filename"] + if not os.path.isfile(filename): + raise FileNotFoundError( + f"{filename} not found, check paths in the Selected Paths " + "table match those in the zipped directory structure." + ) + print(f"Reading selected path for file {filename}") new_path = FeffPathGroup( filename=filename, - label=sp_dict[path_id]["label"], - s02=sp_dict[path_id]["s02"], - e0=sp_dict[path_id]["e0"], - sigma2=sp_dict[path_id]["sigma2"], - deltar=sp_dict[path_id]["deltar"], + label=path_dict["label"], + degen=path_dict["degen"] if path_dict["degen"] != "" else None, + s02=path_dict["s02"], + e0=path_dict["e0"], + sigma2=path_dict["sigma2"], + deltar=path_dict["deltar"], ) sp_list.append(new_path) return sp_list -def run_fit(data_group, gds, selected_paths, fv): +def run_fit( + data_groups: list, gds, pathlist, fv, selected_path_ids: list = None +): # create the transform group (prepare the fit space). trans = TransformGroup( fitspace=fv["fitspace"], @@ -177,16 +162,30 @@ rmax=fv["rmax"], ) - dset = FeffitDataSet( - data=data_group, pathlist=selected_paths, transform=trans - ) + datasets = [] + for i, data_group in enumerate(data_groups): + if selected_path_ids: + selected_paths = [] + for path_id in selected_path_ids[i]: + selected_paths.append(pathlist[path_id - 1]) - out = feffit(gds, dset) - return dset, out + dataset = FeffitDataSet( + data=data_group, pathlist=selected_paths, transform=trans + ) + + else: + dataset = FeffitDataSet( + data=data_group, pathlist=pathlist, transform=trans + ) + + datasets.append(dataset) + + out = feffit(gds, datasets) + return datasets, out def main( - prj_file: str, + prj_file: list, gds_file: str, sp_file: str, fit_vars: dict, @@ -197,26 +196,40 @@ rmr_path = f"rmr/rmr{series_id}.png" chikr_path = f"chikr/chikr{series_id}.png" + gds = read_gds(gds_file) + pathlist = read_selected_paths_list(sp_file) + # calc_with_defaults will hang indefinitely (>6 hours recorded) if the # data contains any NaNs - consider adding an early error here if this is # not fixed in Larch? - data_group = read_group(prj_file) - - print(f"Fitting project from file {data_group.filename}") + selected_path_ids = [] + if isinstance(prj_file[0], dict): + data_groups = [] + for dataset in prj_file: + data_groups.append(read_group(dataset["prj_file"])) + selected_path_ids.append([p["path_id"] for p in dataset["paths"]]) + else: + data_groups = [read_group(p) for p in prj_file] - gds = read_gds(gds_file) - selected_paths = read_selected_paths_list(sp_file) - dset, out = run_fit(data_group, gds, selected_paths, fit_vars) + print(f"Fitting project from file {[d.filename for d in data_groups]}") + + datasets, out = run_fit( + data_groups, + gds, + pathlist, + fit_vars, + selected_path_ids=selected_path_ids, + ) fit_report = feffit_report(out) with open(report_path, "w") as fit_report_file: fit_report_file.write(fit_report) if plot_graph: - plot_rmr(rmr_path, dset, fit_vars["rmin"], fit_vars["rmax"]) + plot_rmr(rmr_path, datasets, fit_vars["rmin"], fit_vars["rmax"]) plot_chikr( chikr_path, - dset, + datasets, fit_vars["rmin"], fit_vars["rmax"], fit_vars["kmin"], @@ -266,7 +279,7 @@ series_id = str(series_index).zfill(id_length) try: out = main( - series_file, + [series_file], gds_file, sp_file, fit_vars, @@ -343,8 +356,10 @@ plot_graph = input_values["plot_graph"] if input_values["execution"]["execution"] == "parallel": - main(prj_file, gds_file, sp_file, fit_vars, plot_graph) - + main([prj_file], gds_file, sp_file, fit_vars, plot_graph) + elif input_values["execution"]["execution"] == "simultaneous": + dataset_dicts = input_values["execution"]["simultaneous"] + main(dataset_dicts, gds_file, sp_file, fit_vars, plot_graph) else: if os.path.isdir(prj_file): # Sort the unzipped directory, all filenames should be zero-padded
--- a/larch_artemis.xml Mon Jun 17 13:54:30 2024 +0000 +++ b/larch_artemis.xml Fri Aug 23 13:46:13 2024 +0000 @@ -2,32 +2,12 @@ <description>generate Artemis projects from XAFS data</description> <macros> <!-- version of underlying tool (PEP 440) --> - <token name="@TOOL_VERSION@">0.9.75</token> + <token name="@TOOL_VERSION@">0.9.80</token> <!-- version of this tool wrapper (integer) --> - <token name="@WRAPPER_VERSION@">1</token> + <token name="@WRAPPER_VERSION@">0</token> <!-- citation should be updated with every underlying tool version --> <!-- typical fields to update are version, month, year, and doi --> <token name="@TOOL_CITATION@">10.1088/1742-6596/430/1/012007</token> - <xml name="series_options"> - <param name="stop_on_error" type="boolean" label="Stop on error" help="Whether to stop gracefully or continue with subsequent projects if an error is encountered when fitting."/> - <repeat name="report_criteria" title="Report Criteria"> - <param name="variable" type="text" label="Variable" help="Name of the variable of that appears in the statistics or variables sections of the report."/> - <conditional name="action"> - <param name="action" type="select" display="radio" label="Action" help="What to do with the named variable."> - <option value="log" selected="true">Log</option> - <option value="warn">Warn</option> - <option value="stop">Early stopping</option> - </param> - <when value="log"/> - <when value="warn"> - <param name="threshold" type="float" value="0.0" min="0.0" label="Warning threshold" help="In addition to logging, a warning will be printed if the absolute value of this variable goes above this threshold."/> - </when> - <when value="stop"> - <param name="threshold" type="float" value="0.0" min="0.0" label="Early stopping threshold" help="In addition to logging, execution will stop if the absolute value of this variable goes above this threshold."/> - </when> - </conditional> - </repeat> - </xml> <import>macros.xml</import> </macros> <creator> @@ -45,9 +25,15 @@ <command detect_errors="exit_code"><![CDATA[ unzip '$feff_paths' && mkdir report rmr chikr - #if $execution.execution=="zipped": - && unzip '$execution.prj_file' - && python '${__tool_directory__}/larch_artemis.py' prj '$gds_file' '$sp_file' '$inputs' + #if $execution.execution=="series": + #if $execution.format.format=="zipped": + && unzip '$execution.format.prj_file' + && python '${__tool_directory__}/larch_artemis.py' prj '$gds_file' '$sp_file' '$inputs' + #else + && python '${__tool_directory__}/larch_artemis.py' '$execution.format.prj_file' '$gds_file' '$sp_file' '$inputs' + #end if + #elif $execution.execution=="simultaneous": + && python '${__tool_directory__}/larch_artemis.py' _ '$gds_file' '$sp_file' '$inputs' #else && python '${__tool_directory__}/larch_artemis.py' '$execution.prj_file' '$gds_file' '$sp_file' '$inputs' #end if @@ -56,25 +42,57 @@ #end if ]]></command> <configfiles> - <inputs name="inputs"/> + <inputs name="inputs" data_style="paths"/> </configfiles> <inputs> - <conditional name="execution" > - <param name="execution" type="select" display="radio" label="Execution mode" help="Whether to execute: on individual Athena projects as parallel jobs, as one job with each project fit occurring in series, or as one job using a zipped input."> + <conditional name="execution"> + <param name="execution" type="select" display="radio" label="Execution mode" help="Whether to execute: on individual Athena projects as parallel jobs, on several projects using a simultaneous fit, or as one job with each project fit occurring in series."> <option value="parallel" selected="true">Parallel</option> + <option value="simultaneous">Simultaneous</option> <option value="series">Series</option> - <option value="zipped">Zipped</option> </param> <when value="parallel"> <param name="prj_file" type="data" format="prj" label="Athena project file" help="Normalised X-ray Absorption Fine Structure (XAFS) data, in Athena project format. If a collection of files is provided, these will be submitted and executed in parallel."/> </when> - <when value="series"> - <param name="prj_file" type="data" format="prj" multiple="true" label="Athena project files" help="Normalised X-ray Absorption Fine Structure (XAFS) data, in Athena project format. These will be submitted as a single job working in series, enabling early stopping criteria."/> - <expand macro="series_options"/> + <when value="simultaneous"> + <repeat name="simultaneous" title="Simultaneous datasets" min="1" default="1"> + <param name="prj_file" type="data" format="prj" label="Athena project files" help="Normalised X-ray Absorption Fine Structure (XAFS) data, in Athena project format. These will be submitted as a single job performing a simultaneous fit."/> + <repeat name="paths" title="Path IDs to select" min="1" default="1"> + <param name="path_id" type="integer" min="1" label="Path ID"/> + </repeat> + </repeat> </when> - <when value="zipped"> - <param name="prj_file" type="data" format="zip" label="Zipped Athena outputs" help="Normalised X-ray Absorption Fine Structure (XAFS) data, in Athena project format, and zipped. These will be submitted as a single job working in series, enabling early stopping criteria."/> - <expand macro="series_options"/> + <when value="series"> + <conditional name="format"> + <param name="format" type="select" display="radio" label="Data format"> + <option value="prj" selected="true">Athena projects (.prj)</option> + <option value="zipped">Zipped</option> + </param> + <when value="prj"> + <param name="prj_file" type="data" format="prj" multiple="true" label="Athena project files" help="Normalised X-ray Absorption Fine Structure (XAFS) data, in Athena project format. These will be submitted as a single job working in series, enabling early stopping criteria."/> + </when> + <when value="zipped"> + <param name="prj_file" type="data" format="zip" label="Zipped Athena outputs" help="Normalised X-ray Absorption Fine Structure (XAFS) data, in Athena project format, and zipped. These will be submitted as a single job working in series, enabling early stopping criteria."/> + </when> + </conditional> + <param name="stop_on_error" type="boolean" label="Stop on error" help="Whether to stop gracefully or continue with subsequent projects if an error is encountered when fitting."/> + <repeat name="report_criteria" title="Report Criteria"> + <param name="variable" type="text" label="Variable" help="Name of the variable of that appears in the statistics or variables sections of the report."/> + <conditional name="action"> + <param name="action" type="select" display="radio" label="Action" help="What to do with the named variable."> + <option value="log" selected="true">Log</option> + <option value="warn">Warn</option> + <option value="stop">Early stopping</option> + </param> + <when value="log"/> + <when value="warn"> + <param name="threshold" type="float" value="0.0" min="0.0" label="Warning threshold" help="In addition to logging, a warning will be printed if the absolute value of this variable goes above this threshold."/> + </when> + <when value="stop"> + <param name="threshold" type="float" value="0.0" min="0.0" label="Early stopping threshold" help="In addition to logging, execution will stop if the absolute value of this variable goes above this threshold."/> + </when> + </conditional> + </repeat> </when> </conditional> <param name="feff_paths" type="data" format="zip" label="FEFF paths file" help="Zipped directory of the FEFF paths."/> @@ -110,38 +128,38 @@ <filter>zip_outputs</filter> </data> <data name="fit_report" format="feffit" from_work_dir="report/fit_report.txt" label="Fit report on ${on_string}"> - <filter>execution["execution"]=="parallel"</filter> + <filter>execution["execution"]!="series"</filter> <filter>not zip_outputs</filter> </data> <data name="rmr" format="png" from_work_dir="rmr/rmr.png" label="RMR plot on ${on_string}"> - <filter>execution["execution"]=="parallel"</filter> + <filter>execution["execution"]!="series"</filter> <filter>plot_graph</filter> <filter>not zip_outputs</filter> </data> <data name="chikr" format="png" from_work_dir="chikr/chikr.png" label="ChiKR plot on ${on_string}"> - <filter>execution["execution"]=="parallel"</filter> + <filter>execution["execution"]!="series"</filter> <filter>plot_graph</filter> <filter>not zip_outputs</filter> </data> <collection name="fit_report_collection" format="feffit" type="list" label="Fit reports on ${on_string}"> <discover_datasets pattern="__name_and_ext__" directory="report"/> - <filter>execution["execution"]!="parallel"</filter> + <filter>execution["execution"]=="series"</filter> <filter>not zip_outputs</filter> </collection> <collection name="rmr_collection" format="png" type="list" label="RMR plots on ${on_string}"> <discover_datasets pattern="__name_and_ext__" directory="rmr"/> - <filter>execution["execution"]!="parallel"</filter> + <filter>execution["execution"]=="series"</filter> <filter>plot_graph</filter> <filter>not zip_outputs</filter> </collection> <collection name="chikr_collection" format="png" type="list" label="ChiKR plots on ${on_string}"> <discover_datasets pattern="__name_and_ext__" directory="chikr"/> - <filter>execution["execution"]!="parallel"</filter> + <filter>execution["execution"]=="series"</filter> <filter>plot_graph</filter> <filter>not zip_outputs</filter> </collection> <data name="criteria_report" format="csv" from_work_dir="criteria_report.csv" label="Selected criteria from ${on_string}"> - <filter>execution["execution"]!="parallel"</filter> + <filter>execution["execution"]=="series"</filter> <filter>len(execution["report_criteria"])>0</filter> </data> </outputs> @@ -182,12 +200,12 @@ <output name="fit_report" file="fit_report.txt" compare="re_match"/> <output name="rmr"> <assert_contents> - <has_size value="55000" delta="1000"/> + <has_size value="50000" delta="10000"/> </assert_contents> </output> <output name="chikr"> <assert_contents> - <has_size value="65000" delta="1000"/> + <has_size value="70000" delta="10000"/> </assert_contents> </output> </test> @@ -294,7 +312,8 @@ </test> <!-- Zipped --> <test expect_num_outputs="1"> - <param name="execution" value="zipped"/> + <param name="execution" value="series"/> + <param name="format" value="zipped"/> <param name="prj_file" value="test.zip"/> <param name="feff_paths" value="[FEFF_paths_of_test.inp].zip"/> <param name="gds_file" value="gds.csv"/> @@ -311,13 +330,14 @@ <param name="zip_outputs" value="true"/> <output name="out_zip"> <assert_contents> - <has_size value="230000" delta="500"/> + <has_size value="250000" delta="10000"/> </assert_contents> </output> </test> <!-- 7: Zipped numerical sort --> <test expect_num_outputs="2"> - <param name="execution" value="zipped"/> + <param name="execution" value="series"/> + <param name="format" value="zipped"/> <param name="prj_file" value="numeric_sort.zip"/> <param name="variable" value="rfactor"/> <param name="action" value="log"/> @@ -335,6 +355,40 @@ <output_collection name="fit_report_collection" type="list" count="3"/> <output name="criteria_report" file="numeric_sort_criteria_report.csv" compare="re_match"/> </test> + <!-- 8: Simultaneous --> + <test expect_num_outputs="1"> + <param name="execution" value="simultaneous"/> + <repeat name="simultaneous"> + <param name="prj_file" value="test.prj"/> + <repeat name="paths"> + <param name="path_id" value="1"/> + <param name="path_id" value="2"/> + <param name="path_id" value="3"/> + <param name="path_id" value="4"/> + </repeat> + </repeat> + <repeat name="simultaneous"> + <param name="prj_file" value="test.prj"/> + <repeat name="paths"> + <param name="path_id" value="1"/> + <param name="path_id" value="2"/> + <param name="path_id" value="3"/> + <param name="path_id" value="4"/> + </repeat> + </repeat> + <param name="feff_paths" value="[FEFF_paths_of_test.inp].zip"/> + <param name="gds_file" value="gds.csv"/> + <param name="sp_file" value="sp.csv"/> + <param name="fitspace" value="r"/> + <param name="kmin" value="3"/> + <param name="kmax" value="14"/> + <param name="kweight" value="2"/> + <param name="dk" value="1"/> + <param name="window" value="hanning"/> + <param name="rmin" value="1.4"/> + <param name="rmax" value="3.0"/> + <output name="fit_report" file="fit_report.txt" compare="re_match"/> + </test> </tests> <help><![CDATA[ Using Larch, perform fitting on an Athena project file, originally from the input X-ray Absorption Fine Structure (XAFS) data file.
--- a/test-data/fit_report.txt Mon Jun 17 13:54:30 2024 +0000 +++ b/test-data/fit_report.txt Fri Aug 23 13:46:13 2024 +0000 @@ -12,8 +12,8 @@ \[\[Variables\]\] alpha = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 1\.0000e-7\) - amp = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 1\.0000000\) - enot = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 1\.0000e-7\) + e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 1\.0000e-7\) + s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 1\.0000000\) ss = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 0\.0030000\) ss2 = [-\s][\d\.]{9} \+/- [\d\.]{9} \(init= 0\.0030000\) ss3 = [-\s][\d\.]{9} \+/- [\d\.]{9} = 'ss2' @@ -56,8 +56,8 @@ C -0\.7410, 0\.2885, -1\.7419 3 reff = [\d\.]{9} degen = 1\.0000000 - n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'amp' - e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'enot' + n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 's02' + e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'e0' r = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'reff \+ alpha\*reff' deltar = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'alpha\*reff' sigma2 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'ss' @@ -69,8 +69,8 @@ C 1\.4414, 0\.4279, 1\.2965 3 reff = [\d\.]{9} degen = 1\.0000000 - n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'amp' - e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'enot' + n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 's02' + e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'e0' r = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'reff \+ alpha\*reff' deltar = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'alpha\*reff' sigma2 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'ss2' @@ -82,8 +82,8 @@ C -1\.6586, -0\.1094, 1\.2084 3 reff = [\d\.]{9} degen = 1\.0000000 - n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'amp' - e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'enot' + n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 's02' + e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'e0' r = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'reff \+ alpha\*reff' deltar = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'alpha\*reff' sigma2 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'ss3' @@ -95,8 +95,8 @@ C 0\.6043, -2\.0001, 0\.0975 3 reff = [\d\.]{9} degen = 1\.0000000 - n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'amp' - e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'enot' + n\*s02 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 's02' + e0 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'e0' r = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'reff \+ alpha\*reff' deltar = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'alpha\*reff' sigma2 = [-\s][\d\.]{9} \+/- [\d\.]{9} := 'ssfe'
--- a/test-data/gds.csv Mon Jun 17 13:54:30 2024 +0000 +++ b/test-data/gds.csv Fri Aug 23 13:46:13 2024 +0000 @@ -1,8 +1,9 @@ -id,name,value,expr,vary -1,alpha,1e-07,,True -2,amp,1.0,,True -3,enot,1e-07,,True -4,ss,0.003,,True -5,ss2,0.003,,True -6,ss3,0.003,ss2,False -7,ssfe,0.003,,True \ No newline at end of file + id, name, value, expr, vary + 1, degen, , , True + 2, s02, 1.0, , True + 3, e0, 1e-07, , True + 4, alpha, 1e-07, , True + 5, ss, 0.003, , True + 6, ss2, 0.003, , True + 7, ss3, 0.003, ss2,False + 8, ssfe, 0.003, , True \ No newline at end of file
--- a/test-data/sp.csv Mon Jun 17 13:54:30 2024 +0000 +++ b/test-data/sp.csv Fri Aug 23 13:46:13 2024 +0000 @@ -1,5 +1,5 @@ -id,filename,label,s02,e0,sigma2,deltar -1,feff/feff0001.dat,S1,amp,enot,ss,alpha*reff -2,feff/feff0002.dat,S2,amp,enot,ss2,alpha*reff -3,feff/feff0003.dat,S3,amp,enot,ss3,alpha*reff -4,feff/feff0004.dat,Fe,amp,enot,ssfe,alpha*reff \ No newline at end of file + id, filename, label, degen, s02, e0, sigma2, deltar + 1, feff/feff0001.dat, S1, degen, s02, e0, ss, alpha*reff + 2, feff/feff0002.dat, S2, degen, s02, e0, ss2, alpha*reff + 3, feff/feff0003.dat, S3, degen, s02, e0, ss3, alpha*reff + 4, feff/feff0004.dat, Fe, degen, s02, e0, ssfe, alpha*reff \ No newline at end of file