Mercurial > repos > bgruening > sklearn_sample_generator
changeset 45:fed1850f01c3 draft
planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit 80417bf0158a9b596e485dd66408f738f405145a
author | bgruening |
---|---|
date | Mon, 02 Oct 2023 10:16:38 +0000 (14 months ago) |
parents | ab9977548806 |
children | 04f34751dae3 |
files | keras_train_and_eval.py |
diffstat | 1 files changed, 18 insertions(+), 6 deletions(-) [+] |
line wrap: on
line diff
--- a/keras_train_and_eval.py Fri Sep 22 16:43:00 2023 +0000 +++ b/keras_train_and_eval.py Mon Oct 02 10:16:38 2023 +0000 @@ -188,6 +188,7 @@ infile1, infile2, outfile_result, + outfile_history=None, outfile_object=None, outfile_y_true=None, outfile_y_preds=None, @@ -215,6 +216,9 @@ outfile_result : str File path to save the results, either cv_results or test result. + outfile_history : str, optional + File path to save the training history. + outfile_object : str, optional File path to save searchCV object. @@ -253,9 +257,7 @@ swapping = params["experiment_schemes"]["hyperparams_swapping"] swap_params = _eval_swap_params(swapping) estimator.set_params(**swap_params) - estimator_params = estimator.get_params() - # store read dataframe object loaded_df = {} @@ -448,12 +450,20 @@ # train and eval if hasattr(estimator, "config") and hasattr(estimator, "model_type"): if exp_scheme == "train_val_test": - estimator.fit(X_train, y_train, validation_data=(X_val, y_val)) + history = estimator.fit(X_train, y_train, validation_data=(X_val, y_val)) else: - estimator.fit(X_train, y_train, validation_data=(X_test, y_test)) + history = estimator.fit(X_train, y_train, validation_data=(X_test, y_test)) else: - estimator.fit(X_train, y_train) - + history = estimator.fit(X_train, y_train) + if "callbacks" in estimator_params: + for cb in estimator_params["callbacks"]: + if cb["callback_selection"]["callback_type"] == "CSVLogger": + hist_df = pd.DataFrame(history.history) + hist_df["epoch"] = np.arange(1, estimator_params["epochs"] + 1) + epo_col = hist_df.pop('epoch') + hist_df.insert(0, 'epoch', epo_col) + hist_df.to_csv(path_or_buf=outfile_history, sep="\t", header=True, index=False) + break if isinstance(estimator, KerasGBatchClassifier): scores = {} steps = estimator.prediction_steps @@ -526,6 +536,7 @@ aparser.add_argument("-X", "--infile1", dest="infile1") aparser.add_argument("-y", "--infile2", dest="infile2") aparser.add_argument("-O", "--outfile_result", dest="outfile_result") + aparser.add_argument("-hi", "--outfile_history", dest="outfile_history") aparser.add_argument("-o", "--outfile_object", dest="outfile_object") aparser.add_argument("-l", "--outfile_y_true", dest="outfile_y_true") aparser.add_argument("-p", "--outfile_y_preds", dest="outfile_y_preds") @@ -542,6 +553,7 @@ args.infile1, args.infile2, args.outfile_result, + outfile_history=args.outfile_history, outfile_object=args.outfile_object, outfile_y_true=args.outfile_y_true, outfile_y_preds=args.outfile_y_preds,