Repository 'sklearn_build_pipeline'
hg clone https://toolshed.g2.bx.psu.edu/repos/bgruening/sklearn_build_pipeline

Changeset 15:3f3c6dc38f3e (2019-12-16)
Previous changeset 14:c33145a815ee (2019-11-07) Next changeset 16:840f29aad145 (2020-01-22)
Commit message:
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit 5b2ac730ec6d3b762faa9034eddd19ad1b347476"
modified:
keras_deep_learning.py
keras_macros.xml
main_macros.xml
ml_visualization_ex.py
model_prediction.py
pipeline.xml
search_model_validation.py
test-data/RandomForestClassifier.zip
test-data/StackingCVRegressor01.zip
test-data/StackingRegressor02.zip
test-data/StackingVoting03.zip
test-data/abc_model01
test-data/abr_model01
test-data/best_estimator_.zip
test-data/brier_score_loss.txt
test-data/classification_report.txt
test-data/gbc_model01
test-data/gbr_model01
test-data/get_params05.tabular
test-data/get_params12.tabular
test-data/glm_model01
test-data/glm_model02
test-data/glm_model03
test-data/glm_model04
test-data/glm_model05
test-data/glm_model06
test-data/glm_model07
test-data/glm_model08
test-data/glm_result01
test-data/glm_result02
test-data/glm_result08
test-data/keras01.json
test-data/keras02.json
test-data/keras04.json
test-data/keras_batch_model01
test-data/keras_batch_model02
test-data/keras_batch_params01.tabular
test-data/keras_model01
test-data/keras_model02
test-data/keras_model04
test-data/keras_params04.tabular
test-data/keras_prefitted01.zip
test-data/keras_save_weights01.h5
test-data/lda_model01
test-data/lda_model02
test-data/ml_vis01.html
test-data/ml_vis02.html
test-data/ml_vis03.html
test-data/ml_vis04.html
test-data/ml_vis05.html
test-data/ml_vis05.png
test-data/model_fit01
test-data/model_fit02
test-data/model_fit02.h5
test-data/named_steps.txt
test-data/nn_model01
test-data/nn_model02
test-data/nn_model03
test-data/pipeline01
test-data/pipeline02
test-data/pipeline03
test-data/pipeline04
test-data/pipeline05
test-data/pipeline06
test-data/pipeline07
test-data/pipeline08
test-data/pipeline09
test-data/pipeline10
test-data/pipeline11
test-data/pipeline12
test-data/pipeline14
test-data/pipeline15
test-data/pipeline16
test-data/prp_model03
test-data/prp_model05
test-data/prp_model08
test-data/prp_model09
test-data/qda_model01
test-data/rfc_model01
test-data/rfr_model01
test-data/searchCV01
test-data/searchCV02
test-data/train_test_eval_model01
test-data/train_test_eval_weights01.h5
test-data/train_test_eval_weights02.h5
train_test_eval.py
added:
keras_train_and_eval.py
test-data/keras_batch_model04
test-data/keras_batch_params04.tabular
test-data/keras_train_eval_y_true02.tabular
test-data/pipeline17
test-data/pipeline_params05.tabular
test-data/pipeline_params18
b
diff -r c33145a815ee -r 3f3c6dc38f3e keras_deep_learning.py
--- a/keras_deep_learning.py Thu Nov 07 05:42:25 2019 -0500
+++ b/keras_deep_learning.py Mon Dec 16 05:39:20 2019 -0500
[
@@ -73,7 +73,7 @@
             }
     """
     constraint_type = config['constraint_type']
-    if constraint_type == 'None':
+    if constraint_type in ('None', ''):
         return None
 
     klass = getattr(keras.constraints, constraint_type)
@@ -92,7 +92,7 @@
     """Access to handle all kinds of parameters
     """
     for key, value in six.iteritems(params):
-        if value == 'None':
+        if value in ('None', ''):
             params[key] = None
             continue
 
@@ -205,6 +205,9 @@
     config : dictionary, galaxy tool parameters loaded by JSON
     """
     generator_type = config.pop('generator_type')
+    if generator_type == 'none':
+        return None
+
     klass = try_get_attr('galaxy_ml.preprocessors', generator_type)
 
     if generator_type == 'GenomicIntervalBatchGenerator':
@@ -240,7 +243,7 @@
     json_string = model.to_json()
 
     with open(outfile, 'w') as f:
-        f.write(json_string)
+        json.dump(json.loads(json_string), f, indent=2)
 
 
 def build_keras_model(inputs, outfile, model_json, infile_weights=None,
b
diff -r c33145a815ee -r 3f3c6dc38f3e keras_macros.xml
--- a/keras_macros.xml Thu Nov 07 05:42:25 2019 -0500
+++ b/keras_macros.xml Mon Dec 16 05:39:20 2019 -0500
b
@@ -1,5 +1,5 @@
 <macros>
-  <token name="@KERAS_VERSION@">0.4.2</token>
+  <token name="@KERAS_VERSION@">0.5.0</token>
 
   <xml name="macro_stdio">
     <stdio>
@@ -18,7 +18,7 @@
 
   <xml name="keras_optimizer_common_more" token_lr="0.001">
     <expand macro="keras_optimizer_common" lr="@LR@">
-      <param argument="epsilon" type="float" value="" label="epsilon" optional="true" help="Fuzz factor. If `None`, defaults to `K.epsilon()`"/>
+      <!--param argument="epsilon" type="float" value="" label="epsilon" optional="true" help="Fuzz factor. If `None`, defaults to `K.epsilon()`"/>-->
       <param argument="decay" type="float" value="0" optional="true" label="decay" help="Learning rate decay over each update."/>
       <yield/>
     </expand>
@@ -885,7 +885,7 @@
           <expand macro="keras_optimizer_common" lr="0.002">
             <param argument="beta_1" type="float" value="0.9" optional="true" label="beta_1" help="float, 0 &lt; beta &lt; 1. Generally close to 1."/>
             <param argument="beta_2" type="float" value="0.999" optional="true" label="beta_2" help="float, 0 &lt; beta &lt; 1. Generally close to 1."/>
-            <param argument="epsilon" type="float" value="" label="epsilon" optional="true" help="Fuzz factor. If `None`, defaults to `K.epsilon()`"/>
+            <!--param argument="epsilon" type="float" value="" label="epsilon" optional="true" help="Fuzz factor. If `None`, defaults to `K.epsilon()`"/>-->
             <param argument="schedule_decay" type="float" value="0.004" optional="true" label="schedule_decay" help="float, 0 &lt; beta &lt; 1."/>
           </expand>
         </when>
b
diff -r c33145a815ee -r 3f3c6dc38f3e keras_train_and_eval.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/keras_train_and_eval.py Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -0,0 +1,491 @@\n+import argparse\n+import joblib\n+import json\n+import numpy as np\n+import os\n+import pandas as pd\n+import pickle\n+import warnings\n+from itertools import chain\n+from scipy.io import mmread\n+from sklearn.pipeline import Pipeline\n+from sklearn.metrics.scorer import _check_multimetric_scoring\n+from sklearn import model_selection\n+from sklearn.model_selection._validation import _score\n+from sklearn.model_selection import _search, _validation\n+from sklearn.utils import indexable, safe_indexing\n+\n+from galaxy_ml.externals.selene_sdk.utils import compute_score\n+from galaxy_ml.model_validations import train_test_split\n+from galaxy_ml.keras_galaxy_models import _predict_generator\n+from galaxy_ml.utils import (SafeEval, get_scoring, load_model,\n+                             read_columns, try_get_attr, get_module,\n+                             clean_params, get_main_estimator)\n+\n+\n+_fit_and_score = try_get_attr(\'galaxy_ml.model_validations\', \'_fit_and_score\')\n+setattr(_search, \'_fit_and_score\', _fit_and_score)\n+setattr(_validation, \'_fit_and_score\', _fit_and_score)\n+\n+N_JOBS = int(os.environ.get(\'GALAXY_SLOTS\', 1))\n+CACHE_DIR = os.path.join(os.getcwd(), \'cached\')\n+del os\n+NON_SEARCHABLE = (\'n_jobs\', \'pre_dispatch\', \'memory\', \'_path\',\n+                  \'nthread\', \'callbacks\')\n+ALLOWED_CALLBACKS = (\'EarlyStopping\', \'TerminateOnNaN\', \'ReduceLROnPlateau\',\n+                     \'CSVLogger\', \'None\')\n+\n+\n+def _eval_swap_params(params_builder):\n+    swap_params = {}\n+\n+    for p in params_builder[\'param_set\']:\n+        swap_value = p[\'sp_value\'].strip()\n+        if swap_value == \'\':\n+            continue\n+\n+        param_name = p[\'sp_name\']\n+        if param_name.lower().endswith(NON_SEARCHABLE):\n+            warnings.warn("Warning: `%s` is not eligible for search and was "\n+                          "omitted!" % param_name)\n+            continue\n+\n+        if not swap_value.startswith(\':\'):\n+            safe_eval = SafeEval(load_scipy=True, load_numpy=True)\n+            ev = safe_eval(swap_value)\n+        else:\n+            # Have `:` before search list, asks for estimator evaluatio\n+            safe_eval_es = SafeEval(load_estimators=True)\n+            swap_value = swap_value[1:].strip()\n+            # TODO maybe add regular express check\n+            ev = safe_eval_es(swap_value)\n+\n+        swap_params[param_name] = ev\n+\n+    return swap_params\n+\n+\n+def train_test_split_none(*arrays, **kwargs):\n+    """extend train_test_split to take None arrays\n+    and support split by group names.\n+    """\n+    nones = []\n+    new_arrays = []\n+    for idx, arr in enumerate(arrays):\n+        if arr is None:\n+            nones.append(idx)\n+        else:\n+            new_arrays.append(arr)\n+\n+    if kwargs[\'shuffle\'] == \'None\':\n+        kwargs[\'shuffle\'] = None\n+\n+    group_names = kwargs.pop(\'group_names\', None)\n+\n+    if group_names is not None and group_names.strip():\n+        group_names = [name.strip() for name in\n+                       group_names.split(\',\')]\n+        new_arrays = indexable(*new_arrays)\n+        groups = kwargs[\'labels\']\n+        n_samples = new_arrays[0].shape[0]\n+        index_arr = np.arange(n_samples)\n+        test = index_arr[np.isin(groups, group_names)]\n+        train = index_arr[~np.isin(groups, group_names)]\n+        rval = list(chain.from_iterable(\n+            (safe_indexing(a, train),\n+             safe_indexing(a, test)) for a in new_arrays))\n+    else:\n+        rval = train_test_split(*new_arrays, **kwargs)\n+\n+    for pos in nones:\n+        rval[pos * 2: 2] = [None, None]\n+\n+    return rval\n+\n+\n+def _evaluate(y_true, pred_probas, scorer, is_multimetric=True):\n+    """ output scores based on input scorer\n+\n+    Parameters\n+    ----------\n+    y_true : array\n+        True label or target values\n+    pred_probas : array\n+        Prediction values, probability for classification problem\n+    scorer : dict\n+        dict of `sklearn.metrics.scorer.SCORER`\n+    is_multimetric : bool, default is True\n+    """\n+ '..b'          validation_data=(X_val, y_val))\n+        else:\n+            estimator.fit(X_train, y_train,\n+                          validation_data=(X_test, y_test))\n+    else:\n+        estimator.fit(X_train, y_train)\n+\n+    if hasattr(estimator, \'evaluate\'):\n+        steps = estimator.prediction_steps\n+        batch_size = estimator.batch_size\n+        generator = estimator.data_generator_.flow(X_test, y=y_test,\n+                                                   batch_size=batch_size)\n+        predictions, y_true = _predict_generator(estimator.model_, generator,\n+                                                 steps=steps)\n+        scores = _evaluate(y_true, predictions, scorer, is_multimetric=True)\n+\n+    else:\n+        if hasattr(estimator, \'predict_proba\'):\n+            predictions = estimator.predict_proba(X_test)\n+        else:\n+            predictions = estimator.predict(X_test)\n+\n+        y_true = y_test\n+        scores = _score(estimator, X_test, y_test, scorer,\n+                        is_multimetric=True)\n+    if outfile_y_true:\n+        try:\n+            pd.DataFrame(y_true).to_csv(outfile_y_true, sep=\'\\t\',\n+                                        index=False)\n+            pd.DataFrame(predictions).astype(np.float32).to_csv(\n+                outfile_y_preds, sep=\'\\t\', index=False,\n+                float_format=\'%g\', chunksize=10000)\n+        except Exception as e:\n+            print("Error in saving predictions: %s" % e)\n+\n+    # handle output\n+    for name, score in scores.items():\n+        scores[name] = [score]\n+    df = pd.DataFrame(scores)\n+    df = df[sorted(df.columns)]\n+    df.to_csv(path_or_buf=outfile_result, sep=\'\\t\',\n+              header=True, index=False)\n+\n+    memory.clear(warn=False)\n+\n+    if outfile_object:\n+        main_est = estimator\n+        if isinstance(estimator, Pipeline):\n+            main_est = estimator.steps[-1][-1]\n+\n+        if hasattr(main_est, \'model_\') \\\n+                and hasattr(main_est, \'save_weights\'):\n+            if outfile_weights:\n+                main_est.save_weights(outfile_weights)\n+            del main_est.model_\n+            del main_est.fit_params\n+            del main_est.model_class_\n+            del main_est.validation_data\n+            if getattr(main_est, \'data_generator_\', None):\n+                del main_est.data_generator_\n+\n+        with open(outfile_object, \'wb\') as output_handler:\n+            pickle.dump(estimator, output_handler,\n+                        pickle.HIGHEST_PROTOCOL)\n+\n+\n+if __name__ == \'__main__\':\n+    aparser = argparse.ArgumentParser()\n+    aparser.add_argument("-i", "--inputs", dest="inputs", required=True)\n+    aparser.add_argument("-e", "--estimator", dest="infile_estimator")\n+    aparser.add_argument("-X", "--infile1", dest="infile1")\n+    aparser.add_argument("-y", "--infile2", dest="infile2")\n+    aparser.add_argument("-O", "--outfile_result", dest="outfile_result")\n+    aparser.add_argument("-o", "--outfile_object", dest="outfile_object")\n+    aparser.add_argument("-w", "--outfile_weights", dest="outfile_weights")\n+    aparser.add_argument("-l", "--outfile_y_true", dest="outfile_y_true")\n+    aparser.add_argument("-p", "--outfile_y_preds", dest="outfile_y_preds")\n+    aparser.add_argument("-g", "--groups", dest="groups")\n+    aparser.add_argument("-r", "--ref_seq", dest="ref_seq")\n+    aparser.add_argument("-b", "--intervals", dest="intervals")\n+    aparser.add_argument("-t", "--targets", dest="targets")\n+    aparser.add_argument("-f", "--fasta_path", dest="fasta_path")\n+    args = aparser.parse_args()\n+\n+    main(args.inputs, args.infile_estimator, args.infile1, args.infile2,\n+         args.outfile_result, outfile_object=args.outfile_object,\n+         outfile_weights=args.outfile_weights,\n+         outfile_y_true=args.outfile_y_true,\n+         outfile_y_preds=args.outfile_y_preds,\n+         groups=args.groups,\n+         ref_seq=args.ref_seq, intervals=args.intervals,\n+         targets=args.targets, fasta_path=args.fasta_path)\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e main_macros.xml
--- a/main_macros.xml Thu Nov 07 05:42:25 2019 -0500
+++ b/main_macros.xml Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,12 +1,10 @@\n <macros>\n-  <token name="@VERSION@">1.0.7.12</token>\n-\n-  <token name="@ENSEMBLE_VERSION@">0.2.0</token>\n+  <token name="@VERSION@">1.0.8.1</token>\n \n   <xml name="python_requirements">\n       <requirements>\n           <requirement type="package" version="3.6">python</requirement>\n-          <requirement type="package" version="0.7.12">Galaxy-ML</requirement>\n+          <requirement type="package" version="0.8.1">Galaxy-ML</requirement>\n           <yield/>\n       </requirements>\n   </xml>\n@@ -235,8 +233,8 @@\n     <param argument="fit_intercept" type="boolean" optional="true" truevalue="booltrue" falsevalue="boolfalse" checked="@CHECKED@" label="Estimate the intercept" help="If false, the data is assumed to be already centered."/>\n   </xml>\n \n-  <xml name="n_iter" token_default_value="5" token_help_text="The number of passes over the training data (aka epochs). ">\n-    <param argument="n_iter" type="integer" optional="true" value="@DEFAULT_VALUE@" label="Number of iterations" help="@HELP_TEXT@"/>\n+  <xml name="n_iter_no_change" token_default_value="5" token_help_text="Number of iterations with no improvement to wait before early stopping. ">\n+    <param argument="n_iter_no_change" type="integer" optional="true" value="@DEFAULT_VALUE@" label="Number of iterations" help="@HELP_TEXT@"/>\n   </xml>\n \n   <xml name="shuffle" token_checked="true" token_help_text=" " token_label="Shuffle data after each iteration">\n@@ -763,6 +761,9 @@\n       <option value="MinMaxScaler">Minmax Scaler (Scales features to a range)</option>\n       <option value="PolynomialFeatures">Polynomial Features (Generates polynomial and interaction features)</option>\n       <option value="RobustScaler">Robust Scaler (Scales features using outlier-invariance statistics)</option>\n+      <option value="QuantileTransformer">QuantileTransformer (Transform features using quantiles information)</option>\n+      <option value="PowerTransformer">PowerTransformer (Apply a power transform featurewise to make data more Gaussian-like)</option>\n+      <option value="KBinsDiscretizer">KBinsDiscretizer (Bin continuous data into intervals.)</option>\n     </expand>\n   </xml>\n \n@@ -837,6 +838,42 @@\n                   label="Use a copy of data for inplace scaling" help=" "/>\n           </section>\n       </when>\n+      <when value="QuantileTransformer">\n+          <section name="options" title="Advanced Options" expanded="False">\n+              <param name="n_quantiles" type="integer" value="1000" min="0" label="Number of quantiles to be computed" />\n+              <param name="output_distribution" type="select" label="Marginal distribution for the transformed data">\n+                  <option value="uniform" selected="true">uniform</option>\n+                  <option value="normal">normal</option>\n+              </param>\n+              <param name="ignore_implicit_zeros" type="boolean" truevalue="booltrue" falsevalue="boolfalse" checked="false" label="Whether to discard sparse entries" help="Only applies to sparse matrices. If False, sparse entries are treated as zeros"/>\n+              <param name="subsample" type="integer" value="100000" label="Maximum number of samples used to estimate the quantiles for computational efficiency" help="Note that the subsampling procedure may differ for value-identical sparse and dense matrices."/>\n+              <expand macro="random_state" help_text="This is used by subsampling and smoothing noise"/>\n+          </section>\n+      </when>\n+      <when value="PowerTransformer">\n+          <section name="options" title="Advanced Options" expanded="False">\n+              <param name="method" type="select" label="The power transform method">\n+                  <option value="yeo-johnson" selected="true">yeo-johnson (works with positive and negative values)</option>\n+                  <option value="box-cox">box-cox (might perform better, but only works with strictly positive values)</option>\n+              </param>\n+              <param'..b're_dispatch" help="@HELP@"/>\n   </xml>\n \n-  <xml name="search_cv_estimator">\n-    <param name="infile_estimator" type="data" format="zip" label="Choose the dataset containing pipeline/estimator object"/>\n-    <section name="search_params_builder" title="Search parameters Builder" expanded="true">\n-      <param name="infile_params" type="data" format="tabular" optional="true" label="Choose the dataset containing parameter names" help="This dataset could be the output of `get_params` in the `Estimator Attributes` tool."/>\n-      <repeat name="param_set" min="1" max="30" title="Parameter settings for search:">\n-          <param name="sp_name" type="select" optional="true" label="Choose a parameter name (with current value)">\n-            <options from_dataset="infile_params" startswith="@">\n-              <column name="name" index="2"/>\n-              <column name="value" index="1"/>\n-              <filter type="unique_value" name="unique_param" column="1"/>\n-            </options>\n-          </param>\n-          <param name="sp_list" type="text" value="" optional="true" label="Search list" help="list or array-like, for example: [1, 10, 100, 1000], [True, False] and [\'auto\', \'sqrt\', None]. See `help` section for more examples">\n-            <sanitizer>\n-              <valid initial="default">\n-                <add value="&apos;"/>\n-                <add value="&quot;"/>\n-                <add value="["/>\n-                <add value="]"/>\n-              </valid>\n-            </sanitizer>\n-          </param>\n-      </repeat>\n-    </section>\n-  </xml>\n-\n   <xml name="estimator_and_hyperparameter">\n     <param name="infile_estimator" type="data" format="zip" label="Choose the dataset containing pipeline/estimator object"/>\n     <section name="hyperparams_swapping" title="Hyperparameter Swapping" expanded="false">\n@@ -1398,7 +1412,7 @@\n       <expand macro="model_validation_common_options"/>\n       <!--expand macro="pre_dispatch" default_value="2*n_jobs" help="Controls the number of jobs that get dispatched during parallel execution"/-->\n       <param argument="iid" type="boolean" truevalue="booltrue" falsevalue="boolfalse" checked="true" label="iid" help="If True, data is identically distributed across the folds"/>\n-      <param argument="refit" type="boolean" truevalue="booltrue" falsevalue="boolfalse" checked="true" label="refit" help="Refit an estimator using the best found parameters on the whole dataset. Be aware that `refit=True` invokes extra computation, but it\'s REQUIRED for outputting the best estimator!"/>\n+      <!--param argument="refit" type="boolean" truevalue="booltrue" falsevalue="boolfalse" checked="true" label="refit" help="Refit an estimator using the best found parameters on the whole dataset. Be aware that `refit=True` invokes extra computation, but it\'s REQUIRED for outputting the best estimator!"/> -->\n       <param argument="error_score" type="boolean" truevalue="booltrue" falsevalue="boolfalse" checked="true" label="Raise fit error:" help="If false, the metric score is assigned to NaN if an error occurs in estimator fitting and FitFailedWarning is raised."/>\n       <param argument="return_train_score" type="boolean" truevalue="booltrue" falsevalue="boolfalse" checked="false" label="return_train_score" help=""/>\n   </xml>\n@@ -1475,6 +1489,8 @@\n           <option value="GradientBoostingClassifier">GradientBoostingClassifier</option>\n           <option value="GradientBoostingRegressor">GradientBoostingRegressor</option>\n           <option value="IsolationForest">IsolationForest</option>\n+          <option value="HistGradientBoostingClassifier">HistGradientBoostingClassifier</option>\n+          <option value="HistGradientBoostingRegressor">HistGradientBoostingRegressor</option>\n           <option value="RandomForestClassifier">RandomForestClassifier</option>\n           <option value="RandomForestRegressor">RandomForestRegressor</option>\n           <option value="RandomTreesEmbedding">RandomTreesEmbedding</option>\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e ml_visualization_ex.py
--- a/ml_visualization_ex.py Thu Nov 07 05:42:25 2019 -0500
+++ b/ml_visualization_ex.py Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,6 +1,9 @@\n import argparse\n import json\n+import matplotlib\n+import matplotlib.pyplot as plt\n import numpy as np\n+import os\n import pandas as pd\n import plotly\n import plotly.graph_objs as go\n@@ -17,6 +20,251 @@\n \n safe_eval = SafeEval()\n \n+# plotly default colors\n+default_colors = [\n+    \'#1f77b4\',  # muted blue\n+    \'#ff7f0e\',  # safety orange\n+    \'#2ca02c\',  # cooked asparagus green\n+    \'#d62728\',  # brick red\n+    \'#9467bd\',  # muted purple\n+    \'#8c564b\',  # chestnut brown\n+    \'#e377c2\',  # raspberry yogurt pink\n+    \'#7f7f7f\',  # middle gray\n+    \'#bcbd22\',  # curry yellow-green\n+    \'#17becf\'   # blue-teal\n+]\n+\n+\n+def visualize_pr_curve_plotly(df1, df2, pos_label, title=None):\n+    """output pr-curve in html using plotly\n+\n+    df1 : pandas.DataFrame\n+        Containing y_true\n+    df2 : pandas.DataFrame\n+        Containing y_score\n+    pos_label : None\n+        The label of positive class\n+    title : str\n+        Plot title\n+    """\n+    data = []\n+    for idx in range(df1.shape[1]):\n+        y_true = df1.iloc[:, idx].values\n+        y_score = df2.iloc[:, idx].values\n+\n+        precision, recall, _ = precision_recall_curve(\n+            y_true, y_score, pos_label=pos_label)\n+        ap = average_precision_score(\n+            y_true, y_score, pos_label=pos_label or 1)\n+\n+        trace = go.Scatter(\n+            x=recall,\n+            y=precision,\n+            mode=\'lines\',\n+            marker=dict(\n+                color=default_colors[idx % len(default_colors)]\n+            ),\n+            name=\'%s (area = %.3f)\' % (idx, ap)\n+        )\n+        data.append(trace)\n+\n+    layout = go.Layout(\n+        xaxis=dict(\n+            title=\'Recall\',\n+            linecolor=\'lightslategray\',\n+            linewidth=1\n+        ),\n+        yaxis=dict(\n+            title=\'Precision\',\n+            linecolor=\'lightslategray\',\n+            linewidth=1\n+        ),\n+        title=dict(\n+            text=title or \'Precision-Recall Curve\',\n+            x=0.5,\n+            y=0.92,\n+            xanchor=\'center\',\n+            yanchor=\'top\'\n+        ),\n+        font=dict(\n+            family="sans-serif",\n+            size=11\n+        ),\n+        # control backgroud colors\n+        plot_bgcolor=\'rgba(255,255,255,0)\'\n+    )\n+    """\n+    legend=dict(\n+        x=0.95,\n+        y=0,\n+        traceorder="normal",\n+        font=dict(\n+            family="sans-serif",\n+            size=9,\n+            color="black"\n+        ),\n+        bgcolor="LightSteelBlue",\n+        bordercolor="Black",\n+        borderwidth=2\n+    ),"""\n+\n+    fig = go.Figure(data=data, layout=layout)\n+\n+    plotly.offline.plot(fig, filename="output.html", auto_open=False)\n+    # to be discovered by `from_work_dir`\n+    os.rename(\'output.html\', \'output\')\n+\n+\n+def visualize_pr_curve_matplotlib(df1, df2, pos_label, title=None):\n+    """visualize pr-curve using matplotlib and output svg image\n+    """\n+    backend = matplotlib.get_backend()\n+    if "inline" not in backend:\n+        matplotlib.use("SVG")\n+    plt.style.use(\'seaborn-colorblind\')\n+    plt.figure()\n+\n+    for idx in range(df1.shape[1]):\n+        y_true = df1.iloc[:, idx].values\n+        y_score = df2.iloc[:, idx].values\n+\n+        precision, recall, _ = precision_recall_curve(\n+            y_true, y_score, pos_label=pos_label)\n+        ap = average_precision_score(\n+            y_true, y_score, pos_label=pos_label or 1)\n+\n+        plt.step(recall, precision, \'r-\', color="black", alpha=0.3,\n+                 lw=1, where="post", label=\'%s (area = %.3f)\' % (idx, ap))\n+\n+    plt.xlim([0.0, 1.0])\n+    plt.ylim([0.0, 1.05])\n+    plt.xlabel(\'Recall\')\n+    plt.ylabel(\'Precision\')\n+    title = title or \'Precision-Recall Curve\'\n+    plt.title(title)\n+    folder = os.getcwd()\n+    plt.savefig(os.path.join(folder, "output.svg"), format="svg")\n+    os.rename(os.path.join(folder, "output.svg"),\n+              os.path.join(folder, "output"))\n+\n+\n+def visualize_roc_curve_plotly(df1, df2, pos_label,\n+                            '..b'             line=dict(color=\'black\', dash=\'dash\'),\n-                           showlegend=False)\n-        data.append(trace)\n-\n-        layout = go.Layout(\n-            title=title or "Receiver operating characteristic curve",\n-            xaxis=dict(title=\'False Positive Rate\'),\n-            yaxis=dict(title=\'True Positive Rate\')\n-        )\n-\n-        fig = go.Figure(data=data, layout=layout)\n+        return 0\n \n     elif plot_type == \'rfecv_gridscores\':\n         input_df = pd.read_csv(infile1, sep=\'\\t\', header=\'infer\')\n@@ -231,10 +429,43 @@\n         layout = go.Layout(\n             xaxis=dict(title="Number of features selected"),\n             yaxis=dict(title="Cross validation score"),\n-            title=title or None\n+            title=dict(\n+                text=title or None,\n+                x=0.5,\n+                y=0.92,\n+                xanchor=\'center\',\n+                yanchor=\'top\'\n+            ),\n+            font=dict(\n+                family="sans-serif",\n+                size=11\n+            ),\n+            # control backgroud colors\n+            plot_bgcolor=\'rgba(255,255,255,0)\'\n         )\n+        """\n+        # legend=dict(\n+                # x=0.95,\n+                # y=0,\n+                # traceorder="normal",\n+                # font=dict(\n+                #    family="sans-serif",\n+                #    size=9,\n+                #    color="black"\n+                # ),\n+                # bgcolor="LightSteelBlue",\n+                # bordercolor="Black",\n+                # borderwidth=2\n+            # ),\n+        """\n \n         fig = go.Figure(data=[data], layout=layout)\n+        plotly.offline.plot(fig, filename="output.html",\n+                            auto_open=False)\n+        # to be discovered by `from_work_dir`\n+        os.rename(\'output.html\', \'output\')\n+\n+        return 0\n \n     elif plot_type == \'learning_curve\':\n         input_df = pd.read_csv(infile1, sep=\'\\t\', header=\'infer\')\n@@ -264,23 +495,57 @@\n             yaxis=dict(\n                 title=\'Performance Score\'\n             ),\n-            title=title or \'Learning Curve\'\n+            # modify these configurations to customize image\n+            title=dict(\n+                text=title or \'Learning Curve\',\n+                x=0.5,\n+                y=0.92,\n+                xanchor=\'center\',\n+                yanchor=\'top\'\n+            ),\n+            font=dict(\n+                family="sans-serif",\n+                size=11\n+            ),\n+            # control backgroud colors\n+            plot_bgcolor=\'rgba(255,255,255,0)\'\n         )\n+        """\n+        # legend=dict(\n+                # x=0.95,\n+                # y=0,\n+                # traceorder="normal",\n+                # font=dict(\n+                #    family="sans-serif",\n+                #    size=9,\n+                #    color="black"\n+                # ),\n+                # bgcolor="LightSteelBlue",\n+                # bordercolor="Black",\n+                # borderwidth=2\n+            # ),\n+        """\n+\n         fig = go.Figure(data=[data1, data2], layout=layout)\n+        plotly.offline.plot(fig, filename="output.html",\n+                            auto_open=False)\n+        # to be discovered by `from_work_dir`\n+        os.rename(\'output.html\', \'output\')\n+\n+        return 0\n \n     elif plot_type == \'keras_plot_model\':\n         with open(model_config, \'r\') as f:\n             model_str = f.read()\n         model = model_from_json(model_str)\n         plot_model(model, to_file="output.png")\n-        __import__(\'os\').rename(\'output.png\', \'output\')\n+        os.rename(\'output.png\', \'output\')\n \n         return 0\n \n-    plotly.offline.plot(fig, filename="output.html",\n-                        auto_open=False)\n-    # to be discovered by `from_work_dir`\n-    __import__(\'os\').rename(\'output.html\', \'output\')\n+    # save pdf file to disk\n+    # fig.write_image("image.pdf", format=\'pdf\')\n+    # fig.write_image("image.pdf", format=\'pdf\', width=340*2, height=226*2)\n \n \n if __name__ == \'__main__\':\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e model_prediction.py
--- a/model_prediction.py Thu Nov 07 05:42:25 2019 -0500
+++ b/model_prediction.py Mon Dec 16 05:39:20 2019 -0500
[
@@ -2,13 +2,11 @@
 import json
 import numpy as np
 import pandas as pd
-import tabix
 import warnings
 
 from scipy.io import mmread
 from sklearn.pipeline import Pipeline
 
-from galaxy_ml.externals.selene_sdk.sequences import Genome
 from galaxy_ml.utils import (load_model, read_columns,
                              get_module, try_get_attr)
 
@@ -138,45 +136,10 @@
         pred_data_generator = klass(
             ref_genome_path=ref_seq, vcf_path=vcf_path, **options)
 
-        pred_data_generator.fit()
+        pred_data_generator.set_processing_attrs()
 
         variants = pred_data_generator.variants
-        # TODO : remove the following block after galaxy-ml v0.7.13
-        blacklist_tabix = getattr(pred_data_generator.reference_genome_,
-                                  '_blacklist_tabix', None)
-        clean_variants = []
-        if blacklist_tabix:
-            start_radius = pred_data_generator.start_radius_
-            end_radius = pred_data_generator.end_radius_
 
-            for chrom, pos, name, ref, alt, strand in variants:
-                center = pos + len(ref) // 2
-                start = center - start_radius
-                end = center + end_radius
-
-                if isinstance(pred_data_generator.reference_genome_, Genome):
-                    if "chr" not in chrom:
-                        chrom = "chr" + chrom
-                    if "MT" in chrom:
-                        chrom = chrom[:-1]
-                try:
-                    rows = blacklist_tabix.query(chrom, start, end)
-                    found = 0
-                    for row in rows:
-                        found = 1
-                        break
-                    if found:
-                        continue
-                except tabix.TabixError:
-                    pass
-
-                clean_variants.append((chrom, pos, name, ref, alt, strand))
-        else:
-            clean_variants = variants
-
-        setattr(pred_data_generator, 'variants', clean_variants)
-
-        variants = np.array(clean_variants)
         # predict 1600 sample at once then write to file
         gen_flow = pred_data_generator.flow(batch_size=1600)
 
b
diff -r c33145a815ee -r 3f3c6dc38f3e pipeline.xml
--- a/pipeline.xml Thu Nov 07 05:42:25 2019 -0500
+++ b/pipeline.xml Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,5 +1,5 @@\n <tool id="sklearn_build_pipeline" name="Pipeline Builder" version="@VERSION@">\n-    <description>constructs a list of transforms and a final estimator</description>\n+    <description>an all-in-one platform to build pipeline, single estimator, preprocessor and custom wrappers</description>\n     <macros>\n         <import>main_macros.xml</import>\n     </macros>\n@@ -31,7 +31,10 @@\n from sklearn.pipeline import make_pipeline\n from imblearn.pipeline import make_pipeline as imb_make_pipeline\n from galaxy_ml.utils import (SafeEval, feature_selector, get_estimator,\n-                             try_get_attr, get_search_params)\n+                             try_get_attr, get_search_params, load_model)\n+\n+## TODO remove following imports after scikit-learn v0.22\n+from sklearn.experimental import enable_hist_gradient_boosting\n \n \n N_JOBS = int(__import__(\'os\').environ.get(\'GALAXY_SLOTS\', 1))\n@@ -174,27 +177,33 @@\n     if len(pipeline_steps) == 0:\n         sys.exit("No pipeline steps specified!")\n     ## else:  turn the last pre-process component to final estimator\n+elif estimator_json[\'selected_module\'] == \'sklearn.compose\':\n+    #if $final_estimator.estimator_selector.selected_module == \'sklearn.compose\':\n+    regressor_path = \'$final_estimator.estimator_selector.regressor\'\n+    transformer_path = \'$final_estimator.estimator_selector.transformer\'\n+    #end if\n+    with open(regressor_path, \'rb\') as f:\n+        regressor = load_model(f)\n+    with open(transformer_path, \'rb\') as f:\n+        transformer = load_model(f)\n+    estimator = compose.TransformedTargetRegressor(regressor=regressor, transformer=transformer)\n+    pipeline_steps.append( estimator )\n else:\n     estimator = get_estimator(estimator_json)\n     pipeline_steps.append( estimator )\n \n-#if $output_type == \'Final_Estimator_Builder\':\n-with open(\'$outfile\', \'wb\') as out_handler:\n-    final_est = pipeline_steps[-1]\n-    print(final_est)\n-    pickle.dump(final_est, out_handler, pickle.HIGHEST_PROTOCOL)\n-out_obj = final_est\n-#else:\n-if has_imblearn:\n-    pipeline = imb_make_pipeline(*pipeline_steps)\n+if len(pipeline_steps) == 1:\n+    out_obj = pipeline_steps[-1]\n+    print(out_obj)\n else:\n-    pipeline = make_pipeline(*pipeline_steps)\n-pprint.pprint(pipeline.named_steps)\n+    if has_imblearn:\n+        out_obj = imb_make_pipeline(*pipeline_steps)\n+    else:\n+        out_obj = make_pipeline(*pipeline_steps)\n+    pprint.pprint(out_obj.named_steps)\n \n with open(\'$outfile\', \'wb\') as out_handler:\n-    pickle.dump(pipeline, out_handler, pickle.HIGHEST_PROTOCOL)\n-out_obj = pipeline\n-#end if\n+    pickle.dump(out_obj, out_handler, pickle.HIGHEST_PROTOCOL)\n \n #if $get_params\n results = get_search_params(out_obj)\n@@ -262,12 +271,20 @@\n             <conditional name="estimator_selector">\n                 <param name="selected_module" type="select" label="Choose the module that contains target estimator:" >\n                     <expand macro="estimator_module_options">\n+                        <option value="sklearn.compose">sklearn.compose</option>\n                         <option value="binarize_target">Binarize Target Classifier or Regressor</option>\n                         <option value="custom_estimator">Load a custom estimator</option>\n                         <option value="none">none -- The last component of pre-processing step will turn to a final estimator</option>\n                     </expand>\n                 </param>\n                 <expand macro="estimator_suboptions">\n+                    <when value="sklearn.compose">\n+                        <param name="selected_estimator" type="select" label="Choose estimator class:">\n+                            <option value="TransformedTargetRegressor" selected="true">TransformedTargetRegressor</option>\n+                        </param>\n+                        <param name="regressor" type="data" format="zip" label="Choose the dataset containing the wrapped regressor"/>\n+                        <param name="transformer" '..b'     <param name="selected_estimator" value="RandomForestClassifier"/>\n-            <param name="output_type" value="Final_Estimator_Builder"/>\n             <output name="outfile" file="RandomForestClassifier.zip" compare="sim_size" delta="5"/>\n         </test>\n         <test>\n@@ -483,7 +527,6 @@\n                     <param name="selected_module" value="none"/>\n                 </conditional>\n             </section>\n-            <param name="output_type" value="Final_Estimator_Builder"/>\n             <output name="outfile" file="pipeline14" compare="sim_size" delta="5"/>\n         </test>\n         <test>\n@@ -497,7 +540,6 @@\n                     <param name="wrapped_estimator" value="RandomForestClassifier.zip" ftype="zip"/>\n                 </conditional>\n             </section>\n-            <param name="output_type" value="Final_Estimator_Builder"/>\n             <output name="outfile" file="pipeline15" compare="sim_size" delta="5"/>\n         </test>\n         <test>\n@@ -521,13 +563,32 @@\n     <help>\n         <![CDATA[\n **What it does**\n-Constructs a pipeline that contains a list of transfroms and a final estimator. Pipeline assembles several steps\n-that can be cross-validated together while setting different parameters.\n-please refer to `Scikit-learn pipeline Pipeline`_.\n+This tool not only builds sklearn pipeline object, but also builds single main estimator or single preprocessing component. The output object type is based on the length of pipeline steps. When there is only one step (choose `None` for others), either a main estimator or preprocessor, the component is output directly instead of wrapping in a pipeline object.\n+\n+A typical pipeline chains one or more preprocessing steps plus a final main estimator, for example, [VarianceThreshold, StandardScaler, SGDClassifier] which is composed of a feature selctor, a preprocessing scaler and a main estimator together.\n+For more information, please refer to `Scikit-learn pipeline Pipeline`_.\n+\n+**Pre-processing components** come from `sklearn.preprocessing`_, `feature_selection`_, `decomposition`_, `kernel_approximation`_, `cluster.FeatureAgglomeration`_, `skrebate`_ and more.\n+\n+**Final Estimator** supports estimators from `xgboost`_ and many scikit-learn modules, including `svm`_, `linear_model`_, `ensemble`_, `naive_bayes`_, `tree`_, `neighbors`_ and so on.\n+\n+**Custom estimators**\n+\n+- `GenomeOneHotEncoder`_\n \n-**Pre-processing components** allow None, one or a combination of up to 5 transformations from `sklearn.preprocessing`_, `feature_selection`_, `decomposition`_, `kernel_approximation`_, `cluster.FeatureAgglomeration`_ and/or `skrebate`_.\n+- `ProteinOnehotEncoder`_\n+\n+- `IRAPSClassifier`_\n+\n+- `BinarizeTargetClassifier`_\n \n-**Estimator** selector supports estimators from `xgboost`_ and many scikit-learn modules, including `svm`_, `linear_model`_, `ensemble`_, `naive_bayes`_, `tree`_ and `neighbors`_.\n+- `BinarizeTargetRegressor`_\n+\n+**Output**\n+\n+- Pickled pipeline/estimator object\n+\n+- Hyperparameter of the ojbect (optional)\n \n \n .. _`Scikit-learn pipeline Pipeline`: http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html\n@@ -546,6 +607,12 @@\n .. _`cluster.FeatureAgglomeration`: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.FeatureAgglomeration.html\n .. _`skrebate`: https://epistasislab.github.io/scikit-rebate/using/\n \n+.. _`GenomeOneHotEncoder`: https://goeckslab.github.io/Galaxy-ML/APIs/preprocessors/#genomeonehotencoder\n+.. _`ProteinOnehotEncoder`: https://goeckslab.github.io/Galaxy-ML/APIs/preprocessors/#proteinonehotencoder\n+.. _`IRAPSClassifier`: https://goeckslab.github.io/Galaxy-ML/APIs/iraps-classifier/#irapsclassifier\n+.. _`BinarizeTargetClassifier`: https://goeckslab.github.io/Galaxy-ML/APIs/binarize-target/#binarizetargetclassifier\n+.. _`BinarizeTargetRegressor`: https://goeckslab.github.io/Galaxy-ML/APIs/binarize-target/#binarizetargetregressor\n+\n         ]]>\n     </help>\n     <expand macro="sklearn_citation">\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e search_model_validation.py
--- a/search_model_validation.py Thu Nov 07 05:42:25 2019 -0500
+++ b/search_model_validation.py Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -4,41 +4,35 @@\n import joblib\n import json\n import numpy as np\n+import os\n import pandas as pd\n import pickle\n import skrebate\n-import sklearn\n import sys\n-import xgboost\n import warnings\n-from imblearn import under_sampling, over_sampling, combine\n from scipy.io import mmread\n-from mlxtend import classifier, regressor\n-from sklearn.base import clone\n-from sklearn import (cluster, compose, decomposition, ensemble,\n-                     feature_extraction, feature_selection,\n-                     gaussian_process, kernel_approximation, metrics,\n-                     model_selection, naive_bayes, neighbors,\n-                     pipeline, preprocessing, svm, linear_model,\n-                     tree, discriminant_analysis)\n+from sklearn import (cluster, decomposition, feature_selection,\n+                     kernel_approximation, model_selection, preprocessing)\n from sklearn.exceptions import FitFailedWarning\n from sklearn.model_selection._validation import _score, cross_validate\n from sklearn.model_selection import _search, _validation\n+from sklearn.pipeline import Pipeline\n \n from galaxy_ml.utils import (SafeEval, get_cv, get_scoring, load_model,\n-                             read_columns, try_get_attr, get_module)\n+                             read_columns, try_get_attr, get_module,\n+                             clean_params, get_main_estimator)\n \n \n _fit_and_score = try_get_attr(\'galaxy_ml.model_validations\', \'_fit_and_score\')\n setattr(_search, \'_fit_and_score\', _fit_and_score)\n setattr(_validation, \'_fit_and_score\', _fit_and_score)\n \n-N_JOBS = int(__import__(\'os\').environ.get(\'GALAXY_SLOTS\', 1))\n-CACHE_DIR = \'./cached\'\n+N_JOBS = int(os.environ.get(\'GALAXY_SLOTS\', 1))\n+# handle  disk cache\n+CACHE_DIR = os.path.join(os.getcwd(), \'cached\')\n+del os\n NON_SEARCHABLE = (\'n_jobs\', \'pre_dispatch\', \'memory\', \'_path\',\n                   \'nthread\', \'callbacks\')\n-ALLOWED_CALLBACKS = (\'EarlyStopping\', \'TerminateOnNaN\', \'ReduceLROnPlateau\',\n-                     \'CSVLogger\', \'None\')\n \n \n def _eval_search_params(params_builder):\n@@ -164,74 +158,40 @@\n     return search_params\n \n \n-def main(inputs, infile_estimator, infile1, infile2,\n-         outfile_result, outfile_object=None,\n-         outfile_weights=None, groups=None,\n-         ref_seq=None, intervals=None, targets=None,\n-         fasta_path=None):\n-    """\n-    Parameter\n-    ---------\n-    inputs : str\n-        File path to galaxy tool parameter\n+def _handle_X_y(estimator, params, infile1, infile2, loaded_df={},\n+                ref_seq=None, intervals=None, targets=None,\n+                fasta_path=None):\n+    """read inputs\n \n-    infile_estimator : str\n-        File path to estimator\n-\n+    Params\n+    -------\n+    estimator : estimator object\n+    params : dict\n+        Galaxy tool parameter inputs\n     infile1 : str\n         File path to dataset containing features\n-\n     infile2 : str\n         File path to dataset containing target values\n-\n-    outfile_result : str\n-        File path to save the results, either cv_results or test result\n-\n-    outfile_object : str, optional\n-        File path to save searchCV object\n-\n-    outfile_weights : str, optional\n-        File path to save model weights\n-\n-    groups : str\n-        File path to dataset containing groups labels\n-\n+    loaded_df : dict\n+        Contains loaded DataFrame objects with file path as keys\n     ref_seq : str\n         File path to dataset containing genome sequence file\n-\n-    intervals : str\n+    interval : str\n         File path to dataset containing interval file\n-\n     targets : str\n         File path to dataset compressed target bed file\n-\n     fasta_path : str\n         File path to dataset containing fasta file\n-    """\n-    warnings.simplefilter(\'ignore\')\n \n-    with open(inputs, \'r\') as param_handler:\n-        params = json.load(param_handler)\n-\n-    # conflict param checker\n-    if params[\'outer_split\'][\'split_mode\'] == \'nested_cv\' \\\n-            and params[\'save\'] != \'nope\':\n-        raise ValueEr'..b'split_options)\n-            else:\n-                if split_options[\'shuffle\'] == \'None\':\n-                    split_options[\'shuffle\'] = None\n-                X, X_test, y, y_test =\\\n-                    train_test_split(X, y, **split_options)\n-        # end train_test_split\n+        # deprecate train test split mode\n+        """searcher = _do_train_test_split_val(\n+            searcher, X, y, params,\n+            primary_scoring=primary_scoring,\n+            error_score=options[\'error_score\'],\n+            groups=groups,\n+            outfile=outfile_result)"""\n \n-        # shared by both train_test_split and non-split\n+    # no outer split\n+    else:\n+        searcher.set_params(n_jobs=N_JOBS)\n         if options[\'error_score\'] == \'raise\':\n             searcher.fit(X, y, groups=groups)\n         else:\n@@ -489,47 +646,14 @@\n                 for warning in w:\n                     print(repr(warning.message))\n \n-        # no outer split\n-        if split_mode == \'no\':\n-            # save results\n-            cv_results = pd.DataFrame(searcher.cv_results_)\n-            cv_results = cv_results[sorted(cv_results.columns)]\n-            cv_results.to_csv(path_or_buf=outfile_result, sep=\'\\t\',\n-                              header=True, index=False)\n-\n-        # train_test_split, output test result using best_estimator_\n-        # or rebuild the trained estimator using weights if applicable.\n-        else:\n-            scorer_ = searcher.scorer_\n-            if isinstance(scorer_, collections.Mapping):\n-                is_multimetric = True\n-            else:\n-                is_multimetric = False\n-\n-            best_estimator_ = getattr(searcher, \'best_estimator_\', None)\n-            if not best_estimator_:\n-                raise ValueError("GridSearchCV object has no "\n-                                 "`best_estimator_` when `refit`=False!")\n-\n-            if best_estimator_.__class__.__name__ == \'KerasGBatchClassifier\' \\\n-                    and hasattr(estimator.data_batch_generator, \'target_path\'):\n-                test_score = best_estimator_.evaluate(\n-                    X_test, scorer=scorer_, is_multimetric=is_multimetric)\n-            else:\n-                test_score = _score(best_estimator_, X_test,\n-                                    y_test, scorer_,\n-                                    is_multimetric=is_multimetric)\n-\n-            if not is_multimetric:\n-                test_score = {primary_scoring: test_score}\n-            for key, value in test_score.items():\n-                test_score[key] = [value]\n-            result_df = pd.DataFrame(test_score)\n-            result_df.to_csv(path_or_buf=outfile_result, sep=\'\\t\',\n-                             header=True, index=False)\n+        cv_results = pd.DataFrame(searcher.cv_results_)\n+        cv_results = cv_results[sorted(cv_results.columns)]\n+        cv_results.to_csv(path_or_buf=outfile_result, sep=\'\\t\',\n+                          header=True, index=False)\n \n     memory.clear(warn=False)\n \n+    # output best estimator, and weights if applicable\n     if outfile_object:\n         best_estimator_ = getattr(searcher, \'best_estimator_\', None)\n         if not best_estimator_:\n@@ -538,9 +662,10 @@\n                           "nested gridsearch or `refit` is False!")\n             return\n \n-        main_est = best_estimator_\n-        if isinstance(best_estimator_, pipeline.Pipeline):\n-            main_est = best_estimator_.steps[-1][-1]\n+        # clean prams\n+        best_estimator_ = clean_params(best_estimator_)\n+\n+        main_est = get_main_estimator(best_estimator_)\n \n         if hasattr(main_est, \'model_\') \\\n                 and hasattr(main_est, \'save_weights\'):\n@@ -554,6 +679,7 @@\n                 del main_est.data_generator_\n \n         with open(outfile_object, \'wb\') as output_handler:\n+            print("Best estimator is saved: %s " % repr(best_estimator_))\n             pickle.dump(best_estimator_, output_handler,\n                         pickle.HIGHEST_PROTOCOL)\n \n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/RandomForestClassifier.zip
b
Binary file test-data/RandomForestClassifier.zip has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/StackingCVRegressor01.zip
b
Binary file test-data/StackingCVRegressor01.zip has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/StackingRegressor02.zip
b
Binary file test-data/StackingRegressor02.zip has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/StackingVoting03.zip
b
Binary file test-data/StackingVoting03.zip has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/abc_model01
b
Binary file test-data/abc_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/abr_model01
b
Binary file test-data/abr_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/best_estimator_.zip
b
Binary file test-data/best_estimator_.zip has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/brier_score_loss.txt
--- a/test-data/brier_score_loss.txt Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/brier_score_loss.txt Mon Dec 16 05:39:20 2019 -0500
b
@@ -1,2 +1,2 @@
 brier_score_loss : 
-0.5641025641025641
+0.24051282051282052
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/classification_report.txt
--- a/test-data/classification_report.txt Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/classification_report.txt Mon Dec 16 05:39:20 2019 -0500
b
@@ -5,7 +5,7 @@
            1       1.00      0.62      0.77        16
            2       0.60      1.00      0.75         9
 
-   micro avg       0.85      0.85      0.85        39
+    accuracy                           0.85        39
    macro avg       0.87      0.88      0.84        39
 weighted avg       0.91      0.85      0.85        39
 
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/gbc_model01
b
Binary file test-data/gbc_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/gbr_model01
b
Binary file test-data/gbr_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/get_params05.tabular
--- a/test-data/get_params05.tabular Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/get_params05.tabular Mon Dec 16 05:39:20 2019 -0500
[
@@ -1,31 +1,18 @@
  Parameter Value
-* memory memory: None
-* steps "steps: [('randomforestregressor', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,
-           max_features='auto', max_leaf_nodes=None,
-           min_impurity_decrease=0.0, min_impurity_split=None,
-           min_samples_leaf=1, min_samples_split=2,
-           min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=1,
-           oob_score=False, random_state=42, verbose=0, warm_start=False))]"
-@ randomforestregressor "randomforestregressor: RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,
-           max_features='auto', max_leaf_nodes=None,
-           min_impurity_decrease=0.0, min_impurity_split=None,
-           min_samples_leaf=1, min_samples_split=2,
-           min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=1,
-           oob_score=False, random_state=42, verbose=0, warm_start=False)"
-@ randomforestregressor__bootstrap randomforestregressor__bootstrap: True
-@ randomforestregressor__criterion randomforestregressor__criterion: 'mse'
-@ randomforestregressor__max_depth randomforestregressor__max_depth: None
-@ randomforestregressor__max_features randomforestregressor__max_features: 'auto'
-@ randomforestregressor__max_leaf_nodes randomforestregressor__max_leaf_nodes: None
-@ randomforestregressor__min_impurity_decrease randomforestregressor__min_impurity_decrease: 0.0
-@ randomforestregressor__min_impurity_split randomforestregressor__min_impurity_split: None
-@ randomforestregressor__min_samples_leaf randomforestregressor__min_samples_leaf: 1
-@ randomforestregressor__min_samples_split randomforestregressor__min_samples_split: 2
-@ randomforestregressor__min_weight_fraction_leaf randomforestregressor__min_weight_fraction_leaf: 0.0
-@ randomforestregressor__n_estimators randomforestregressor__n_estimators: 100
-* randomforestregressor__n_jobs randomforestregressor__n_jobs: 1
-@ randomforestregressor__oob_score randomforestregressor__oob_score: False
-@ randomforestregressor__random_state randomforestregressor__random_state: 42
-* randomforestregressor__verbose randomforestregressor__verbose: 0
-@ randomforestregressor__warm_start randomforestregressor__warm_start: False
- Note: @, searchable params in searchcv too.
+@ bootstrap bootstrap: True
+@ criterion criterion: 'mse'
+@ max_depth max_depth: None
+@ max_features max_features: 'auto'
+@ max_leaf_nodes max_leaf_nodes: None
+@ min_impurity_decrease min_impurity_decrease: 0.0
+@ min_impurity_split min_impurity_split: None
+@ min_samples_leaf min_samples_leaf: 1
+@ min_samples_split min_samples_split: 2
+@ min_weight_fraction_leaf min_weight_fraction_leaf: 0.0
+@ n_estimators n_estimators: 100
+* n_jobs n_jobs: 1
+@ oob_score oob_score: False
+@ random_state random_state: 42
+* verbose verbose: 0
+@ warm_start warm_start: False
+ Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/get_params12.tabular
--- a/test-data/get_params12.tabular Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/get_params12.tabular Mon Dec 16 05:39:20 2019 -0500
[
@@ -1,47 +1,32 @@
  Parameter Value
-* memory memory: None
-* steps "steps: [('rfe', RFE(estimator=XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
-       colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
-       max_depth=3, min_child_weight=1, missing=nan, n_estimators=100,
-       n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
-       reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
-       silent=True, subsample=1),
-  n_features_to_select=None, step=1, verbose=0))]"
-@ rfe "rfe: RFE(estimator=XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
-       colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
-       max_depth=3, min_child_weight=1, missing=nan, n_estimators=100,
-       n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
-       reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
-       silent=True, subsample=1),
-  n_features_to_select=None, step=1, verbose=0)"
-@ rfe__estimator__base_score rfe__estimator__base_score: 0.5
-@ rfe__estimator__booster rfe__estimator__booster: 'gbtree'
-@ rfe__estimator__colsample_bylevel rfe__estimator__colsample_bylevel: 1
-@ rfe__estimator__colsample_bytree rfe__estimator__colsample_bytree: 1
-@ rfe__estimator__gamma rfe__estimator__gamma: 0
-@ rfe__estimator__learning_rate rfe__estimator__learning_rate: 0.1
-@ rfe__estimator__max_delta_step rfe__estimator__max_delta_step: 0
-@ rfe__estimator__max_depth rfe__estimator__max_depth: 3
-@ rfe__estimator__min_child_weight rfe__estimator__min_child_weight: 1
-@ rfe__estimator__missing rfe__estimator__missing: nan
-@ rfe__estimator__n_estimators rfe__estimator__n_estimators: 100
-* rfe__estimator__n_jobs rfe__estimator__n_jobs: 1
-* rfe__estimator__nthread rfe__estimator__nthread: None
-@ rfe__estimator__objective rfe__estimator__objective: 'reg:linear'
-@ rfe__estimator__random_state rfe__estimator__random_state: 0
-@ rfe__estimator__reg_alpha rfe__estimator__reg_alpha: 0
-@ rfe__estimator__reg_lambda rfe__estimator__reg_lambda: 1
-@ rfe__estimator__scale_pos_weight rfe__estimator__scale_pos_weight: 1
-@ rfe__estimator__seed rfe__estimator__seed: None
-@ rfe__estimator__silent rfe__estimator__silent: True
-@ rfe__estimator__subsample rfe__estimator__subsample: 1
-@ rfe__estimator "rfe__estimator: XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
+@ estimator "estimator: XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
        colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
        max_depth=3, min_child_weight=1, missing=nan, n_estimators=100,
        n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
        reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
        silent=True, subsample=1)"
-@ rfe__n_features_to_select rfe__n_features_to_select: None
-@ rfe__step rfe__step: 1
-* rfe__verbose rfe__verbose: 0
- Note: @, searchable params in searchcv too.
+@ n_features_to_select n_features_to_select: None
+* step step: 1
+* verbose verbose: 0
+@ estimator__base_score estimator__base_score: 0.5
+@ estimator__booster estimator__booster: 'gbtree'
+@ estimator__colsample_bylevel estimator__colsample_bylevel: 1
+@ estimator__colsample_bytree estimator__colsample_bytree: 1
+@ estimator__gamma estimator__gamma: 0
+@ estimator__learning_rate estimator__learning_rate: 0.1
+@ estimator__max_delta_step estimator__max_delta_step: 0
+@ estimator__max_depth estimator__max_depth: 3
+@ estimator__min_child_weight estimator__min_child_weight: 1
+@ estimator__missing estimator__missing: nan
+@ estimator__n_estimators estimator__n_estimators: 100
+* estimator__n_jobs estimator__n_jobs: 1
+* estimator__nthread estimator__nthread: None
+@ estimator__objective estimator__objective: 'reg:linear'
+@ estimator__random_state estimator__random_state: 0
+@ estimator__reg_alpha estimator__reg_alpha: 0
+@ estimator__reg_lambda estimator__reg_lambda: 1
+@ estimator__scale_pos_weight estimator__scale_pos_weight: 1
+@ estimator__seed estimator__seed: None
+@ estimator__silent estimator__silent: True
+@ estimator__subsample estimator__subsample: 1
+ Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model01
b
Binary file test-data/glm_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model02
b
Binary file test-data/glm_model02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model03
b
Binary file test-data/glm_model03 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model04
b
Binary file test-data/glm_model04 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model05
b
Binary file test-data/glm_model05 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model06
b
Binary file test-data/glm_model06 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model07
b
Binary file test-data/glm_model07 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_model08
b
Binary file test-data/glm_model08 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_result01
--- a/test-data/glm_result01 Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/glm_result01 Mon Dec 16 05:39:20 2019 -0500
b
@@ -1,5 +1,5 @@
-86.97021227350001 1.00532111569 -1.01739601979 -0.613139481654 0.641846874331 3703215242836.872
-91.2021798817 -0.6215229712070001 1.11914889596 0.390012184498 1.28956938152 3875943636708.156
--47.4101632272 -0.638416457964 -0.7327774684530001 -0.8640261049779999 -1.06109770116 -2071574726112.0168
-61.712804630200004 -1.0999480057700002 -0.739679672932 0.585657963012 1.4890682753600002 2642119730255.405
--206.998295124 0.130238853011 0.70574123041 1.3320656526399999 -1.3322092373799999 -8851040854159.11
+86.97021227350001 1.00532111569 -1.01739601979 -0.613139481654 0.641846874331 20479602419382.055
+91.2021798817 -0.6215229712070001 1.11914889596 0.390012184498 1.28956938152 21460309408632.004
+-47.4101632272 -0.638416457964 -0.7327774684530001 -0.8640261049779999 -1.06109770116 -11245419999724.842
+61.712804630200004 -1.0999480057700002 -0.739679672932 0.585657963012 1.4890682753600002 14574106078789.26
+-206.998295124 0.130238853011 0.70574123041 1.3320656526399999 -1.3322092373799999 -48782519807586.32
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_result02
--- a/test-data/glm_result02 Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/glm_result02 Mon Dec 16 05:39:20 2019 -0500
b
@@ -1,5 +1,5 @@
 3.68258022948 2.82110345641 -3.9901407239999998 -1.9523364774 1
 0.015942057224 -0.7119585943469999 0.125502976978 -0.972218263337 0
-2.0869076882499997 0.929399321468 -2.1292408448400004 -1.9971402218799998 0
-1.4132105208399999 0.523750660422 -1.4210539291 -1.49298569451 0
+2.0869076882499997 0.929399321468 -2.1292408448400004 -1.9971402218799998 1
+1.4132105208399999 0.523750660422 -1.4210539291 -1.49298569451 1
 0.7683140439399999 1.38267855169 -0.989045048734 0.649504257894 1
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/glm_result08
--- a/test-data/glm_result08 Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/glm_result08 Mon Dec 16 05:39:20 2019 -0500
b
@@ -1,4 +1,4 @@
-3.68258022948 2.82110345641 -3.9901407239999998 -1.9523364774 0
+3.68258022948 2.82110345641 -3.9901407239999998 -1.9523364774 1
 0.015942057224 -0.7119585943469999 0.125502976978 -0.972218263337 0
 2.0869076882499997 0.929399321468 -2.1292408448400004 -1.9971402218799998 0
 1.4132105208399999 0.523750660422 -1.4210539291 -1.49298569451 0
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras01.json
--- a/test-data/keras01.json Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/keras01.json Mon Dec 16 05:39:20 2019 -0500
[
@@ -1,1 +1,90 @@
-{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 784], "dtype": "float32", "units": 32, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Activation", "config": {"name": "activation_1", "trainable": true, "activation": "relu"}}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "units": 10, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Activation", "config": {"name": "activation_2", "trainable": true, "activation": "softmax"}}]}, "keras_version": "2.2.4", "backend": "tensorflow"}
\ No newline at end of file
+{
+  "class_name": "Sequential",
+  "config": {
+    "name": "sequential_1",
+    "layers": [
+      {
+        "class_name": "Dense",
+        "config": {
+          "name": "dense_1",
+          "trainable": true,
+          "batch_input_shape": [
+            null,
+            784
+          ],
+          "dtype": "float32",
+          "units": 32,
+          "activation": "linear",
+          "use_bias": true,
+          "kernel_initializer": {
+            "class_name": "VarianceScaling",
+            "config": {
+              "scale": 1.0,
+              "mode": "fan_avg",
+              "distribution": "uniform",
+              "seed": null
+            }
+          },
+          "bias_initializer": {
+            "class_name": "Zeros",
+            "config": {}
+          },
+          "kernel_regularizer": null,
+          "bias_regularizer": null,
+          "activity_regularizer": null,
+          "kernel_constraint": null,
+          "bias_constraint": null
+        }
+      },
+      {
+        "class_name": "Activation",
+        "config": {
+          "name": "activation_1",
+          "trainable": true,
+          "dtype": "float32",
+          "activation": "relu"
+        }
+      },
+      {
+        "class_name": "Dense",
+        "config": {
+          "name": "dense_2",
+          "trainable": true,
+          "dtype": "float32",
+          "units": 10,
+          "activation": "linear",
+          "use_bias": true,
+          "kernel_initializer": {
+            "class_name": "VarianceScaling",
+            "config": {
+              "scale": 1.0,
+              "mode": "fan_avg",
+              "distribution": "uniform",
+              "seed": null
+            }
+          },
+          "bias_initializer": {
+            "class_name": "Zeros",
+            "config": {}
+          },
+          "kernel_regularizer": null,
+          "bias_regularizer": null,
+          "activity_regularizer": null,
+          "kernel_constraint": null,
+          "bias_constraint": null
+        }
+      },
+      {
+        "class_name": "Activation",
+        "config": {
+          "name": "activation_2",
+          "trainable": true,
+          "dtype": "float32",
+          "activation": "softmax"
+        }
+      }
+    ]
+  },
+  "keras_version": "2.3.1",
+  "backend": "tensorflow"
+}
\ No newline at end of file
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras02.json
--- a/test-data/keras02.json Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/keras02.json Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,1 +1,385 @@\n-{"class_name": "Model", "config": {"name": "model_1", "layers": [{"name": "main_input", "class_name": "InputLayer", "config": {"batch_input_shape": [null, 100], "dtype": "int32", "sparse": false, "name": "main_input"}, "inbound_nodes": []}, {"name": "embedding_1", "class_name": "Embedding", "config": {"name": "embedding_1", "trainable": true, "batch_input_shape": [null, 100], "dtype": "float32", "input_dim": 10000, "output_dim": 512, "embeddings_initializer": {"class_name": "RandomUniform", "config": {"minval": -0.05, "maxval": 0.05, "seed": null}}, "embeddings_regularizer": null, "activity_regularizer": null, "embeddings_constraint": null, "mask_zero": false, "input_length": 100}, "inbound_nodes": [[["main_input", 0, 0, {}]]]}, {"name": "lstm_1", "class_name": "LSTM", "config": {"name": "lstm_1", "trainable": true, "return_sequences": false, "return_state": false, "go_backwards": false, "stateful": false, "unroll": false, "units": 32, "activation": "tanh", "recurrent_activation": "hard_sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "recurrent_initializer": {"class_name": "Orthogonal", "config": {"gain": 1.0, "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "unit_forget_bias": true, "kernel_regularizer": null, "recurrent_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "recurrent_constraint": null, "bias_constraint": null, "dropout": 0.0, "recurrent_dropout": 0.0, "implementation": 1}, "inbound_nodes": [[["embedding_1", 0, 0, {}]]]}, {"name": "dense_1", "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "units": 1, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["lstm_1", 0, 0, {}]]]}, {"name": "aux_input", "class_name": "InputLayer", "config": {"batch_input_shape": [null, 5], "dtype": "float32", "sparse": false, "name": "aux_input"}, "inbound_nodes": []}, {"name": "concatenate_1", "class_name": "Concatenate", "config": {"name": "concatenate_1", "trainable": true, "axis": -1}, "inbound_nodes": [[["dense_1", 0, 0, {}], ["aux_input", 0, 0, {}]]]}, {"name": "dense_2", "class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["concatenate_1", 0, 0, {}]]]}, {"name": "dense_3", "class_name": "Dense", "config": {"name": "dense_3", "trainable": true, "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dense_2", 0, 0, {}]]]}, {"name": "dense_4", "class_name": "Dense", "config": {"name": "dense_4", "trainable": true, "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_re'..b'"bias_initializer": {\n+            "class_name": "Zeros",\n+            "config": {}\n+          },\n+          "kernel_regularizer": null,\n+          "bias_regularizer": null,\n+          "activity_regularizer": null,\n+          "kernel_constraint": null,\n+          "bias_constraint": null\n+        },\n+        "inbound_nodes": [\n+          [\n+            [\n+              "concatenate_1",\n+              0,\n+              0,\n+              {}\n+            ]\n+          ]\n+        ]\n+      },\n+      {\n+        "name": "dense_3",\n+        "class_name": "Dense",\n+        "config": {\n+          "name": "dense_3",\n+          "trainable": true,\n+          "dtype": "float32",\n+          "units": 64,\n+          "activation": "relu",\n+          "use_bias": true,\n+          "kernel_initializer": {\n+            "class_name": "VarianceScaling",\n+            "config": {\n+              "scale": 1.0,\n+              "mode": "fan_avg",\n+              "distribution": "uniform",\n+              "seed": null\n+            }\n+          },\n+          "bias_initializer": {\n+            "class_name": "Zeros",\n+            "config": {}\n+          },\n+          "kernel_regularizer": null,\n+          "bias_regularizer": null,\n+          "activity_regularizer": null,\n+          "kernel_constraint": null,\n+          "bias_constraint": null\n+        },\n+        "inbound_nodes": [\n+          [\n+            [\n+              "dense_2",\n+              0,\n+              0,\n+              {}\n+            ]\n+          ]\n+        ]\n+      },\n+      {\n+        "name": "dense_4",\n+        "class_name": "Dense",\n+        "config": {\n+          "name": "dense_4",\n+          "trainable": true,\n+          "dtype": "float32",\n+          "units": 64,\n+          "activation": "relu",\n+          "use_bias": true,\n+          "kernel_initializer": {\n+            "class_name": "VarianceScaling",\n+            "config": {\n+              "scale": 1.0,\n+              "mode": "fan_avg",\n+              "distribution": "uniform",\n+              "seed": null\n+            }\n+          },\n+          "bias_initializer": {\n+            "class_name": "Zeros",\n+            "config": {}\n+          },\n+          "kernel_regularizer": null,\n+          "bias_regularizer": null,\n+          "activity_regularizer": null,\n+          "kernel_constraint": null,\n+          "bias_constraint": null\n+        },\n+        "inbound_nodes": [\n+          [\n+            [\n+              "dense_3",\n+              0,\n+              0,\n+              {}\n+            ]\n+          ]\n+        ]\n+      },\n+      {\n+        "name": "dense_5",\n+        "class_name": "Dense",\n+        "config": {\n+          "name": "dense_5",\n+          "trainable": true,\n+          "dtype": "float32",\n+          "units": 1,\n+          "activation": "sigmoid",\n+          "use_bias": true,\n+          "kernel_initializer": {\n+            "class_name": "VarianceScaling",\n+            "config": {\n+              "scale": 1.0,\n+              "mode": "fan_avg",\n+              "distribution": "uniform",\n+              "seed": null\n+            }\n+          },\n+          "bias_initializer": {\n+            "class_name": "Zeros",\n+            "config": {}\n+          },\n+          "kernel_regularizer": null,\n+          "bias_regularizer": null,\n+          "activity_regularizer": null,\n+          "kernel_constraint": null,\n+          "bias_constraint": null\n+        },\n+        "inbound_nodes": [\n+          [\n+            [\n+              "dense_4",\n+              0,\n+              0,\n+              {}\n+            ]\n+          ]\n+        ]\n+      }\n+    ],\n+    "input_layers": [\n+      [\n+        "main_input",\n+        0,\n+        0\n+      ],\n+      [\n+        "aux_input",\n+        0,\n+        0\n+      ]\n+    ],\n+    "output_layers": [\n+      [\n+        "dense_1",\n+        0,\n+        0\n+      ],\n+      [\n+        "dense_5",\n+        0,\n+        0\n+      ]\n+    ]\n+  },\n+  "keras_version": "2.3.1",\n+  "backend": "tensorflow"\n+}\n\\ No newline at end of file\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras04.json
--- a/test-data/keras04.json Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/keras04.json Mon Dec 16 05:39:20 2019 -0500
[
@@ -1,1 +1,90 @@
-{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 17], "dtype": "float32", "units": 32, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Activation", "config": {"name": "activation_1", "trainable": true, "activation": "linear"}}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "units": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Activation", "config": {"name": "activation_2", "trainable": true, "activation": "linear"}}]}, "keras_version": "2.2.4", "backend": "tensorflow"}
\ No newline at end of file
+{
+  "class_name": "Sequential",
+  "config": {
+    "name": "sequential_1",
+    "layers": [
+      {
+        "class_name": "Dense",
+        "config": {
+          "name": "dense_1",
+          "trainable": true,
+          "batch_input_shape": [
+            null,
+            17
+          ],
+          "dtype": "float32",
+          "units": 32,
+          "activation": "linear",
+          "use_bias": true,
+          "kernel_initializer": {
+            "class_name": "VarianceScaling",
+            "config": {
+              "scale": 1.0,
+              "mode": "fan_avg",
+              "distribution": "uniform",
+              "seed": null
+            }
+          },
+          "bias_initializer": {
+            "class_name": "Zeros",
+            "config": {}
+          },
+          "kernel_regularizer": null,
+          "bias_regularizer": null,
+          "activity_regularizer": null,
+          "kernel_constraint": null,
+          "bias_constraint": null
+        }
+      },
+      {
+        "class_name": "Activation",
+        "config": {
+          "name": "activation_1",
+          "trainable": true,
+          "dtype": "float32",
+          "activation": "linear"
+        }
+      },
+      {
+        "class_name": "Dense",
+        "config": {
+          "name": "dense_2",
+          "trainable": true,
+          "dtype": "float32",
+          "units": 1,
+          "activation": "linear",
+          "use_bias": true,
+          "kernel_initializer": {
+            "class_name": "VarianceScaling",
+            "config": {
+              "scale": 1.0,
+              "mode": "fan_avg",
+              "distribution": "uniform",
+              "seed": null
+            }
+          },
+          "bias_initializer": {
+            "class_name": "Zeros",
+            "config": {}
+          },
+          "kernel_regularizer": null,
+          "bias_regularizer": null,
+          "activity_regularizer": null,
+          "kernel_constraint": null,
+          "bias_constraint": null
+        }
+      },
+      {
+        "class_name": "Activation",
+        "config": {
+          "name": "activation_2",
+          "trainable": true,
+          "dtype": "float32",
+          "activation": "linear"
+        }
+      }
+    ]
+  },
+  "keras_version": "2.3.1",
+  "backend": "tensorflow"
+}
\ No newline at end of file
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_batch_model01
b
Binary file test-data/keras_batch_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_batch_model02
b
Binary file test-data/keras_batch_model02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_batch_model04
b
Binary file test-data/keras_batch_model04 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_batch_params01.tabular
--- a/test-data/keras_batch_params01.tabular Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/keras_batch_params01.tabular Mon Dec 16 05:39:20 2019 -0500
[
@@ -6,15 +6,14 @@
 @ callbacks callbacks: [{'callback_selection': {'callback_type': 'None'}}]
 @ class_positive_factor class_positive_factor: 1.0
 @ config config: {'name': 'sequential_1', 'layers': [{'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable
-@ data_batch_generator "data_batch_generator: FastaDNABatchGenerator(fasta_path='to_be_determined', seed=999,
-            seq_length=1000, shuffle=True)"
+@ data_batch_generator "data_batch_generator: FastaDNABatchGenerator(fasta_path='to_be_determined', seed=999, seq_length=1000,
+                       shuffle=True)"
 @ decay decay: 0.0
 @ epochs epochs: 100
-@ epsilon epsilon: None
 @ layers_0_Dense layers_0_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'batch_input_shape': [None,
-@ layers_1_Activation layers_1_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_1', 'trainable': True, 'activation': 're
-@ layers_2_Dense layers_2_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_2', 'trainable': True, 'units': 10, 'activation': 
-@ layers_3_Activation layers_3_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_2', 'trainable': True, 'activation': 'so
+@ layers_1_Activation layers_1_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_1', 'trainable': True, 'dtype': 'float32
+@ layers_2_Dense layers_2_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_2', 'trainable': True, 'dtype': 'float32', 'units'
+@ layers_3_Activation layers_3_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_2', 'trainable': True, 'dtype': 'float32
 @ loss loss: 'binary_crossentropy'
 @ lr lr: 0.01
 @ metrics metrics: ['acc']
@@ -60,12 +59,13 @@
 @ layers_0_Dense__config__units layers_0_Dense__config__units: 32
 @ layers_0_Dense__config__use_bias layers_0_Dense__config__use_bias: True
 * layers_1_Activation__class_name layers_1_Activation__class_name: 'Activation'
-@ layers_1_Activation__config layers_1_Activation__config: {'name': 'activation_1', 'trainable': True, 'activation': 'relu'}
+@ layers_1_Activation__config layers_1_Activation__config: {'name': 'activation_1', 'trainable': True, 'dtype': 'float32', 'activation': 'relu'}
 @ layers_1_Activation__config__activation layers_1_Activation__config__activation: 'relu'
+@ layers_1_Activation__config__dtype layers_1_Activation__config__dtype: 'float32'
 * layers_1_Activation__config__name layers_1_Activation__config__name: 'activation_1'
 @ layers_1_Activation__config__trainable layers_1_Activation__config__trainable: True
 * layers_2_Dense__class_name layers_2_Dense__class_name: 'Dense'
-@ layers_2_Dense__config layers_2_Dense__config: {'name': 'dense_2', 'trainable': True, 'units': 10, 'activation': 'linear', 'use_bias': True, 'kerne
+@ layers_2_Dense__config layers_2_Dense__config: {'name': 'dense_2', 'trainable': True, 'dtype': 'float32', 'units': 10, 'activation': 'linear', 'use
 @ layers_2_Dense__config__activation layers_2_Dense__config__activation: 'linear'
 @ layers_2_Dense__config__activity_regularizer layers_2_Dense__config__activity_regularizer: None
 @ layers_2_Dense__config__bias_constraint layers_2_Dense__config__bias_constraint: None
@@ -73,6 +73,7 @@
 * layers_2_Dense__config__bias_initializer__class_name layers_2_Dense__config__bias_initializer__class_name: 'Zeros'
 @ layers_2_Dense__config__bias_initializer__config layers_2_Dense__config__bias_initializer__config: {}
 @ layers_2_Dense__config__bias_regularizer layers_2_Dense__config__bias_regularizer: None
+@ layers_2_Dense__config__dtype layers_2_Dense__config__dtype: 'float32'
 @ layers_2_Dense__config__kernel_constraint layers_2_Dense__config__kernel_constraint: None
 @ layers_2_Dense__config__kernel_initializer layers_2_Dense__config__kernel_initializer: {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'unifo
 * layers_2_Dense__config__kernel_initializer__class_name layers_2_Dense__config__kernel_initializer__class_name: 'VarianceScaling'
@@ -87,8 +88,9 @@
 @ layers_2_Dense__config__units layers_2_Dense__config__units: 10
 @ layers_2_Dense__config__use_bias layers_2_Dense__config__use_bias: True
 * layers_3_Activation__class_name layers_3_Activation__class_name: 'Activation'
-@ layers_3_Activation__config layers_3_Activation__config: {'name': 'activation_2', 'trainable': True, 'activation': 'softmax'}
+@ layers_3_Activation__config layers_3_Activation__config: {'name': 'activation_2', 'trainable': True, 'dtype': 'float32', 'activation': 'softmax'}
 @ layers_3_Activation__config__activation layers_3_Activation__config__activation: 'softmax'
+@ layers_3_Activation__config__dtype layers_3_Activation__config__dtype: 'float32'
 * layers_3_Activation__config__name layers_3_Activation__config__name: 'activation_2'
 @ layers_3_Activation__config__trainable layers_3_Activation__config__trainable: True
  Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_batch_params04.tabular
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/keras_batch_params04.tabular Mon Dec 16 05:39:20 2019 -0500
[
@@ -0,0 +1,91 @@
+ Parameter Value
+@ amsgrad amsgrad: None
+@ batch_size batch_size: 32
+@ beta_1 beta_1: None
+@ beta_2 beta_2: None
+@ callbacks callbacks: [{'callback_selection': {'callback_type': 'None'}}]
+@ class_positive_factor class_positive_factor: 1.0
+@ config config: {'name': 'sequential_1', 'layers': [{'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable
+@ data_batch_generator data_batch_generator: None
+@ decay decay: 0.0
+@ epochs epochs: 100
+@ layers_0_Dense layers_0_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'batch_input_shape': [None,
+@ layers_1_Activation layers_1_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_1', 'trainable': True, 'dtype': 'float32
+@ layers_2_Dense layers_2_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_2', 'trainable': True, 'dtype': 'float32', 'units'
+@ layers_3_Activation layers_3_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_2', 'trainable': True, 'dtype': 'float32
+@ loss loss: 'binary_crossentropy'
+@ lr lr: 0.01
+@ metrics metrics: ['acc']
+@ model_type model_type: 'sequential'
+@ momentum momentum: 0.0
+* n_jobs n_jobs: 1
+@ nesterov nesterov: False
+@ optimizer optimizer: 'sgd'
+@ prediction_steps prediction_steps: None
+@ rho rho: None
+@ schedule_decay schedule_decay: None
+@ seed seed: None
+@ steps_per_epoch steps_per_epoch: None
+@ validation_data validation_data: None
+@ validation_steps validation_steps: None
+@ verbose verbose: 0
+* layers_0_Dense__class_name layers_0_Dense__class_name: 'Dense'
+@ layers_0_Dense__config layers_0_Dense__config: {'name': 'dense_1', 'trainable': True, 'batch_input_shape': [None, 784], 'dtype': 'float32', 'units'
+@ layers_0_Dense__config__activation layers_0_Dense__config__activation: 'linear'
+@ layers_0_Dense__config__activity_regularizer layers_0_Dense__config__activity_regularizer: None
+@ layers_0_Dense__config__batch_input_shape layers_0_Dense__config__batch_input_shape: [None, 784]
+@ layers_0_Dense__config__bias_constraint layers_0_Dense__config__bias_constraint: None
+@ layers_0_Dense__config__bias_initializer layers_0_Dense__config__bias_initializer: {'class_name': 'Zeros', 'config': {}}
+* layers_0_Dense__config__bias_initializer__class_name layers_0_Dense__config__bias_initializer__class_name: 'Zeros'
+@ layers_0_Dense__config__bias_initializer__config layers_0_Dense__config__bias_initializer__config: {}
+@ layers_0_Dense__config__bias_regularizer layers_0_Dense__config__bias_regularizer: None
+@ layers_0_Dense__config__dtype layers_0_Dense__config__dtype: 'float32'
+@ layers_0_Dense__config__kernel_constraint layers_0_Dense__config__kernel_constraint: None
+@ layers_0_Dense__config__kernel_initializer layers_0_Dense__config__kernel_initializer: {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'unifo
+* layers_0_Dense__config__kernel_initializer__class_name layers_0_Dense__config__kernel_initializer__class_name: 'VarianceScaling'
+@ layers_0_Dense__config__kernel_initializer__config layers_0_Dense__config__kernel_initializer__config: {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', 'seed': None}
+@ layers_0_Dense__config__kernel_initializer__config__distribution layers_0_Dense__config__kernel_initializer__config__distribution: 'uniform'
+@ layers_0_Dense__config__kernel_initializer__config__mode layers_0_Dense__config__kernel_initializer__config__mode: 'fan_avg'
+@ layers_0_Dense__config__kernel_initializer__config__scale layers_0_Dense__config__kernel_initializer__config__scale: 1.0
+@ layers_0_Dense__config__kernel_initializer__config__seed layers_0_Dense__config__kernel_initializer__config__seed: None
+@ layers_0_Dense__config__kernel_regularizer layers_0_Dense__config__kernel_regularizer: None
+* layers_0_Dense__config__name layers_0_Dense__config__name: 'dense_1'
+@ layers_0_Dense__config__trainable layers_0_Dense__config__trainable: True
+@ layers_0_Dense__config__units layers_0_Dense__config__units: 32
+@ layers_0_Dense__config__use_bias layers_0_Dense__config__use_bias: True
+* layers_1_Activation__class_name layers_1_Activation__class_name: 'Activation'
+@ layers_1_Activation__config layers_1_Activation__config: {'name': 'activation_1', 'trainable': True, 'dtype': 'float32', 'activation': 'relu'}
+@ layers_1_Activation__config__activation layers_1_Activation__config__activation: 'relu'
+@ layers_1_Activation__config__dtype layers_1_Activation__config__dtype: 'float32'
+* layers_1_Activation__config__name layers_1_Activation__config__name: 'activation_1'
+@ layers_1_Activation__config__trainable layers_1_Activation__config__trainable: True
+* layers_2_Dense__class_name layers_2_Dense__class_name: 'Dense'
+@ layers_2_Dense__config layers_2_Dense__config: {'name': 'dense_2', 'trainable': True, 'dtype': 'float32', 'units': 10, 'activation': 'linear', 'use
+@ layers_2_Dense__config__activation layers_2_Dense__config__activation: 'linear'
+@ layers_2_Dense__config__activity_regularizer layers_2_Dense__config__activity_regularizer: None
+@ layers_2_Dense__config__bias_constraint layers_2_Dense__config__bias_constraint: None
+@ layers_2_Dense__config__bias_initializer layers_2_Dense__config__bias_initializer: {'class_name': 'Zeros', 'config': {}}
+* layers_2_Dense__config__bias_initializer__class_name layers_2_Dense__config__bias_initializer__class_name: 'Zeros'
+@ layers_2_Dense__config__bias_initializer__config layers_2_Dense__config__bias_initializer__config: {}
+@ layers_2_Dense__config__bias_regularizer layers_2_Dense__config__bias_regularizer: None
+@ layers_2_Dense__config__dtype layers_2_Dense__config__dtype: 'float32'
+@ layers_2_Dense__config__kernel_constraint layers_2_Dense__config__kernel_constraint: None
+@ layers_2_Dense__config__kernel_initializer layers_2_Dense__config__kernel_initializer: {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'unifo
+* layers_2_Dense__config__kernel_initializer__class_name layers_2_Dense__config__kernel_initializer__class_name: 'VarianceScaling'
+@ layers_2_Dense__config__kernel_initializer__config layers_2_Dense__config__kernel_initializer__config: {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', 'seed': None}
+@ layers_2_Dense__config__kernel_initializer__config__distribution layers_2_Dense__config__kernel_initializer__config__distribution: 'uniform'
+@ layers_2_Dense__config__kernel_initializer__config__mode layers_2_Dense__config__kernel_initializer__config__mode: 'fan_avg'
+@ layers_2_Dense__config__kernel_initializer__config__scale layers_2_Dense__config__kernel_initializer__config__scale: 1.0
+@ layers_2_Dense__config__kernel_initializer__config__seed layers_2_Dense__config__kernel_initializer__config__seed: None
+@ layers_2_Dense__config__kernel_regularizer layers_2_Dense__config__kernel_regularizer: None
+* layers_2_Dense__config__name layers_2_Dense__config__name: 'dense_2'
+@ layers_2_Dense__config__trainable layers_2_Dense__config__trainable: True
+@ layers_2_Dense__config__units layers_2_Dense__config__units: 10
+@ layers_2_Dense__config__use_bias layers_2_Dense__config__use_bias: True
+* layers_3_Activation__class_name layers_3_Activation__class_name: 'Activation'
+@ layers_3_Activation__config layers_3_Activation__config: {'name': 'activation_2', 'trainable': True, 'dtype': 'float32', 'activation': 'softmax'}
+@ layers_3_Activation__config__activation layers_3_Activation__config__activation: 'softmax'
+@ layers_3_Activation__config__dtype layers_3_Activation__config__dtype: 'float32'
+* layers_3_Activation__config__name layers_3_Activation__config__name: 'activation_2'
+@ layers_3_Activation__config__trainable layers_3_Activation__config__trainable: True
+ Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_model01
b
Binary file test-data/keras_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_model02
b
Binary file test-data/keras_model02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_model04
b
Binary file test-data/keras_model04 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_params04.tabular
--- a/test-data/keras_params04.tabular Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/keras_params04.tabular Mon Dec 16 05:39:20 2019 -0500
[
@@ -7,11 +7,10 @@
 @ config config: {'name': 'sequential_1', 'layers': [{'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable
 @ decay decay: 0.0
 @ epochs epochs: 100
-@ epsilon epsilon: None
 @ layers_0_Dense layers_0_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_1', 'trainable': True, 'batch_input_shape': [None,
-@ layers_1_Activation layers_1_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_1', 'trainable': True, 'activation': 'li
-@ layers_2_Dense layers_2_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_2', 'trainable': True, 'units': 1, 'activation': '
-@ layers_3_Activation layers_3_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_2', 'trainable': True, 'activation': 'li
+@ layers_1_Activation layers_1_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_1', 'trainable': True, 'dtype': 'float32
+@ layers_2_Dense layers_2_Dense: {'class_name': 'Dense', 'config': {'name': 'dense_2', 'trainable': True, 'dtype': 'float32', 'units'
+@ layers_3_Activation layers_3_Activation: {'class_name': 'Activation', 'config': {'name': 'activation_2', 'trainable': True, 'dtype': 'float32
 @ loss loss: 'mean_squared_error'
 @ lr lr: 0.001
 @ metrics metrics: ['mse']
@@ -51,12 +50,13 @@
 @ layers_0_Dense__config__units layers_0_Dense__config__units: 32
 @ layers_0_Dense__config__use_bias layers_0_Dense__config__use_bias: True
 * layers_1_Activation__class_name layers_1_Activation__class_name: 'Activation'
-@ layers_1_Activation__config layers_1_Activation__config: {'name': 'activation_1', 'trainable': True, 'activation': 'linear'}
+@ layers_1_Activation__config layers_1_Activation__config: {'name': 'activation_1', 'trainable': True, 'dtype': 'float32', 'activation': 'linear'}
 @ layers_1_Activation__config__activation layers_1_Activation__config__activation: 'linear'
+@ layers_1_Activation__config__dtype layers_1_Activation__config__dtype: 'float32'
 * layers_1_Activation__config__name layers_1_Activation__config__name: 'activation_1'
 @ layers_1_Activation__config__trainable layers_1_Activation__config__trainable: True
 * layers_2_Dense__class_name layers_2_Dense__class_name: 'Dense'
-@ layers_2_Dense__config layers_2_Dense__config: {'name': 'dense_2', 'trainable': True, 'units': 1, 'activation': 'linear', 'use_bias': True, 'kernel
+@ layers_2_Dense__config layers_2_Dense__config: {'name': 'dense_2', 'trainable': True, 'dtype': 'float32', 'units': 1, 'activation': 'linear', 'use_
 @ layers_2_Dense__config__activation layers_2_Dense__config__activation: 'linear'
 @ layers_2_Dense__config__activity_regularizer layers_2_Dense__config__activity_regularizer: None
 @ layers_2_Dense__config__bias_constraint layers_2_Dense__config__bias_constraint: None
@@ -64,6 +64,7 @@
 * layers_2_Dense__config__bias_initializer__class_name layers_2_Dense__config__bias_initializer__class_name: 'Zeros'
 @ layers_2_Dense__config__bias_initializer__config layers_2_Dense__config__bias_initializer__config: {}
 @ layers_2_Dense__config__bias_regularizer layers_2_Dense__config__bias_regularizer: None
+@ layers_2_Dense__config__dtype layers_2_Dense__config__dtype: 'float32'
 @ layers_2_Dense__config__kernel_constraint layers_2_Dense__config__kernel_constraint: None
 @ layers_2_Dense__config__kernel_initializer layers_2_Dense__config__kernel_initializer: {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'unifo
 * layers_2_Dense__config__kernel_initializer__class_name layers_2_Dense__config__kernel_initializer__class_name: 'VarianceScaling'
@@ -78,8 +79,9 @@
 @ layers_2_Dense__config__units layers_2_Dense__config__units: 1
 @ layers_2_Dense__config__use_bias layers_2_Dense__config__use_bias: True
 * layers_3_Activation__class_name layers_3_Activation__class_name: 'Activation'
-@ layers_3_Activation__config layers_3_Activation__config: {'name': 'activation_2', 'trainable': True, 'activation': 'linear'}
+@ layers_3_Activation__config layers_3_Activation__config: {'name': 'activation_2', 'trainable': True, 'dtype': 'float32', 'activation': 'linear'}
 @ layers_3_Activation__config__activation layers_3_Activation__config__activation: 'linear'
+@ layers_3_Activation__config__dtype layers_3_Activation__config__dtype: 'float32'
 * layers_3_Activation__config__name layers_3_Activation__config__name: 'activation_2'
 @ layers_3_Activation__config__trainable layers_3_Activation__config__trainable: True
  Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_prefitted01.zip
b
Binary file test-data/keras_prefitted01.zip has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_save_weights01.h5
b
Binary file test-data/keras_save_weights01.h5 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/keras_train_eval_y_true02.tabular
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/keras_train_eval_y_true02.tabular Mon Dec 16 05:39:20 2019 -0500
b
@@ -0,0 +1,54 @@
+0
+54
+54
+41
+48
+46
+74
+57
+52
+54
+54
+45
+57
+54
+51
+68
+71
+68
+68
+40
+46
+79
+46
+49
+55
+68
+76
+85
+42
+79
+77
+80
+64
+59
+48
+67
+50
+77
+88
+76
+75
+66
+61
+89
+49
+59
+71
+60
+55
+77
+75
+54
+75
+60
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/lda_model01
b
Binary file test-data/lda_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/lda_model02
b
Binary file test-data/lda_model02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/ml_vis01.html
--- a/test-data/ml_vis01.html Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/ml_vis01.html Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,14 +1,31 @@\n-<html><head><meta charset="utf-8" /></head><body><script type="text/javascript">/**\n-* plotly.js v1.39.4\n-* Copyright 2012-2018, Plotly, Inc.\n+<html>\n+<head><meta charset="utf-8" /></head>\n+<body>\n+    <div>\n+        \n+                <script type="text/javascript">window.PlotlyConfig = {MathJaxConfig: \'local\'};</script>\n+        <script type="text/javascript">/**\n+* plotly.js v1.51.1\n+* Copyright 2012-2019, Plotly, Inc.\n * All rights reserved.\n * Licensed under the MIT license\n */\n-!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Plotly=t()}}(function(){return function(){return function t(e,r,n){function i(o,s){if(!r[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(a)return a(o,!0);var c=new Error("Cannot find module \'"+o+"\'");throw c.code="MODULE_NOT_FOUND",c}var u=r[o]={exports:{}};e[o][0].call(u.exports,function(t){var r=e[o][1][t];return i(r||t)},u,u.exports,t,e,r,n)}return r[o].exports}for(var a="function"==typeof require&&require,o=0;o<n.length;o++)i(n[o]);return i}}()({1:[function(t,e,r){"use strict";var n=t("../src/lib"),i={"X,X div":"direction:ltr;font-family:\'Open Sans\', verdana, arial, sans-serif;margin:0;padding:0;","X input,X button":"font-family:\'Open Sans\', verdana, arial, sans-serif;","X input:focus,X button:focus":"outline:none;","X a":"text-decoration:none;","X a:hover":"text-decoration:none;","X .crisp":"shape-rendering:crispEdges;","X .user-select-none":"-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;","X svg":"overflow:hidden;","X svg a":"fill:#447adb;","X svg a:hover":"fill:#3c6dc5;","X .main-svg":"position:absolute;top:0;left:0;pointer-events:none;","X .main-svg .draglayer":"pointer-events:all;","X .cursor-default":"cursor:default;","X .cursor-pointer":"cursor:pointer;","X .cursor-crosshair":"cursor:crosshair;","X .cursor-move":"cursor:move;","X .cursor-col-resize":"cursor:col-resize;","X .cursor-row-resize":"cursor:row-resize;","X .cursor-ns-resize":"cursor:ns-resize;","X .cursor-ew-resize":"cursor:ew-resize;","X .cursor-sw-resize":"cursor:sw-resize;","X .cursor-s-resize":"cursor:s-resize;","X .cursor-se-resize":"cursor:se-resize;","X .cursor-w-resize":"cursor:w-resize;","X .cursor-e-resize":"cursor:e-resize;","X .cursor-nw-resize":"cursor:nw-resize;","X .cursor-n-resize":"cursor:n-resize;","X .cursor-ne-resize":"cursor:ne-resize;","X .cursor-grab":"cursor:-webkit-grab;cursor:grab;","X .modebar":"position:absolute;top:2px;right:2px;z-index:1001;background:rgba(255,255,255,0.7);","X .modebar--hover":"opacity:0;-webkit-transition:opacity 0.3s ease 0s;-moz-transition:opacity 0.3s ease 0s;-ms-transition:opacity 0.3s ease 0s;-o-transition:opacity 0.3s ease 0s;transition:opacity 0.3s ease 0s;","X:hover .modebar--hover":"opacity:1;","X .modebar-group":"float:left;display:inline-block;box-sizing:border-box;margin-left:8px;position:relative;vertical-align:middle;white-space:nowrap;","X .modebar-group:first-child":"margin-left:0px;","X .modebar-btn":"position:relative;font-size:16px;padding:3px 4px;cursor:pointer;line-height:normal;box-sizing:border-box;","X .modebar-btn svg":"position:relative;top:2px;","X .modebar-btn path":"fill:rgba(0,31,95,0.3);","X .modebar-btn.active path,X .modebar-btn:hover path":"fill:rgba(0,22,72,0.5);","X .modebar-btn.modebar-btn--logo":"padding:3px 1px;","X .modebar-btn.modebar-btn--logo path":"fill:#447adb !important;","X [data-title]:before,X [data-title]:after":"position:absolute;-webkit-transform:translate3d(0, 0, 0);-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);display:none;opacity:0;z-index:1001;pointer-events:none;top:110%;right:50%;","X [data-title]:hover:before,X [data-ti'..b' "scattergl"}], "scattermapbox": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scattermapbox"}], "scatterpolar": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolar"}], "scatterpolargl": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolargl"}], "scatterternary": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterternary"}], "surface": [{"colorbar": {"outlinewidth": 0, "ticks": ""}, "colorscale": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "type": "surface"}], "table": [{"cells": {"fill": {"color": "#EBF0F8"}, "line": {"color": "white"}}, "header": {"fill": {"color": "#C8D4E3"}, "line": {"color": "white"}}, "type": "table"}]}, "layout": {"annotationdefaults": {"arrowcolor": "#2a3f5f", "arrowhead": 0, "arrowwidth": 1}, "coloraxis": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "colorscale": {"diverging": [[0, "#8e0152"], [0.1, "#c51b7d"], [0.2, "#de77ae"], [0.3, "#f1b6da"], [0.4, "#fde0ef"], [0.5, "#f7f7f7"], [0.6, "#e6f5d0"], [0.7, "#b8e186"], [0.8, "#7fbc41"], [0.9, "#4d9221"], [1, "#276419"]], "sequential": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "sequentialminus": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]]}, "colorway": ["#636efa", "#EF553B", "#00cc96", "#ab63fa", "#FFA15A", "#19d3f3", "#FF6692", "#B6E880", "#FF97FF", "#FECB52"], "font": {"color": "#2a3f5f"}, "geo": {"bgcolor": "white", "lakecolor": "white", "landcolor": "#E5ECF6", "showlakes": true, "showland": true, "subunitcolor": "white"}, "hoverlabel": {"align": "left"}, "hovermode": "closest", "mapbox": {"style": "light"}, "paper_bgcolor": "white", "plot_bgcolor": "#E5ECF6", "polar": {"angularaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "radialaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "scene": {"xaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "yaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "zaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}}, "shapedefaults": {"line": {"color": "#2a3f5f"}}, "ternary": {"aaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "baxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "caxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "title": {"x": 0.05}, "xaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}, "yaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}}}, "title": {"text": "Feature Importances"}},\n+                        {"responsive": true}\n+                    )\n+                };\n+                \n+            </script>\n+        </div>\n+</body>\n+</html>\n\\ No newline at end of file\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/ml_vis02.html
--- a/test-data/ml_vis02.html Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/ml_vis02.html Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,14 +1,31 @@\n-<html><head><meta charset="utf-8" /></head><body><script type="text/javascript">/**\n-* plotly.js v1.39.4\n-* Copyright 2012-2018, Plotly, Inc.\n+<html>\n+<head><meta charset="utf-8" /></head>\n+<body>\n+    <div>\n+        \n+                <script type="text/javascript">window.PlotlyConfig = {MathJaxConfig: \'local\'};</script>\n+        <script type="text/javascript">/**\n+* plotly.js v1.51.1\n+* Copyright 2012-2019, Plotly, Inc.\n * All rights reserved.\n * Licensed under the MIT license\n */\n-!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Plotly=t()}}(function(){return function(){return function t(e,r,n){function i(o,s){if(!r[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(a)return a(o,!0);var c=new Error("Cannot find module \'"+o+"\'");throw c.code="MODULE_NOT_FOUND",c}var u=r[o]={exports:{}};e[o][0].call(u.exports,function(t){var r=e[o][1][t];return i(r||t)},u,u.exports,t,e,r,n)}return r[o].exports}for(var a="function"==typeof require&&require,o=0;o<n.length;o++)i(n[o]);return i}}()({1:[function(t,e,r){"use strict";var n=t("../src/lib"),i={"X,X div":"direction:ltr;font-family:\'Open Sans\', verdana, arial, sans-serif;margin:0;padding:0;","X input,X button":"font-family:\'Open Sans\', verdana, arial, sans-serif;","X input:focus,X button:focus":"outline:none;","X a":"text-decoration:none;","X a:hover":"text-decoration:none;","X .crisp":"shape-rendering:crispEdges;","X .user-select-none":"-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;","X svg":"overflow:hidden;","X svg a":"fill:#447adb;","X svg a:hover":"fill:#3c6dc5;","X .main-svg":"position:absolute;top:0;left:0;pointer-events:none;","X .main-svg .draglayer":"pointer-events:all;","X .cursor-default":"cursor:default;","X .cursor-pointer":"cursor:pointer;","X .cursor-crosshair":"cursor:crosshair;","X .cursor-move":"cursor:move;","X .cursor-col-resize":"cursor:col-resize;","X .cursor-row-resize":"cursor:row-resize;","X .cursor-ns-resize":"cursor:ns-resize;","X .cursor-ew-resize":"cursor:ew-resize;","X .cursor-sw-resize":"cursor:sw-resize;","X .cursor-s-resize":"cursor:s-resize;","X .cursor-se-resize":"cursor:se-resize;","X .cursor-w-resize":"cursor:w-resize;","X .cursor-e-resize":"cursor:e-resize;","X .cursor-nw-resize":"cursor:nw-resize;","X .cursor-n-resize":"cursor:n-resize;","X .cursor-ne-resize":"cursor:ne-resize;","X .cursor-grab":"cursor:-webkit-grab;cursor:grab;","X .modebar":"position:absolute;top:2px;right:2px;z-index:1001;background:rgba(255,255,255,0.7);","X .modebar--hover":"opacity:0;-webkit-transition:opacity 0.3s ease 0s;-moz-transition:opacity 0.3s ease 0s;-ms-transition:opacity 0.3s ease 0s;-o-transition:opacity 0.3s ease 0s;transition:opacity 0.3s ease 0s;","X:hover .modebar--hover":"opacity:1;","X .modebar-group":"float:left;display:inline-block;box-sizing:border-box;margin-left:8px;position:relative;vertical-align:middle;white-space:nowrap;","X .modebar-group:first-child":"margin-left:0px;","X .modebar-btn":"position:relative;font-size:16px;padding:3px 4px;cursor:pointer;line-height:normal;box-sizing:border-box;","X .modebar-btn svg":"position:relative;top:2px;","X .modebar-btn path":"fill:rgba(0,31,95,0.3);","X .modebar-btn.active path,X .modebar-btn:hover path":"fill:rgba(0,22,72,0.5);","X .modebar-btn.modebar-btn--logo":"padding:3px 1px;","X .modebar-btn.modebar-btn--logo path":"fill:#447adb !important;","X [data-title]:before,X [data-title]:after":"position:absolute;-webkit-transform:translate3d(0, 0, 0);-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);display:none;opacity:0;z-index:1001;pointer-events:none;top:110%;right:50%;","X [data-title]:hover:before,X [data-ti'..b'rbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolar"}], "scatterpolargl": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolargl"}], "scatterternary": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterternary"}], "surface": [{"colorbar": {"outlinewidth": 0, "ticks": ""}, "colorscale": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "type": "surface"}], "table": [{"cells": {"fill": {"color": "#EBF0F8"}, "line": {"color": "white"}}, "header": {"fill": {"color": "#C8D4E3"}, "line": {"color": "white"}}, "type": "table"}]}, "layout": {"annotationdefaults": {"arrowcolor": "#2a3f5f", "arrowhead": 0, "arrowwidth": 1}, "coloraxis": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "colorscale": {"diverging": [[0, "#8e0152"], [0.1, "#c51b7d"], [0.2, "#de77ae"], [0.3, "#f1b6da"], [0.4, "#fde0ef"], [0.5, "#f7f7f7"], [0.6, "#e6f5d0"], [0.7, "#b8e186"], [0.8, "#7fbc41"], [0.9, "#4d9221"], [1, "#276419"]], "sequential": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "sequentialminus": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]]}, "colorway": ["#636efa", "#EF553B", "#00cc96", "#ab63fa", "#FFA15A", "#19d3f3", "#FF6692", "#B6E880", "#FF97FF", "#FECB52"], "font": {"color": "#2a3f5f"}, "geo": {"bgcolor": "white", "lakecolor": "white", "landcolor": "#E5ECF6", "showlakes": true, "showland": true, "subunitcolor": "white"}, "hoverlabel": {"align": "left"}, "hovermode": "closest", "mapbox": {"style": "light"}, "paper_bgcolor": "white", "plot_bgcolor": "#E5ECF6", "polar": {"angularaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "radialaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "scene": {"xaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "yaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "zaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}}, "shapedefaults": {"line": {"color": "#2a3f5f"}}, "ternary": {"aaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "baxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "caxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "title": {"x": 0.05}, "xaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}, "yaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}}}, "title": {"text": "Learning Curve", "x": 0.5, "xanchor": "center", "y": 0.92, "yanchor": "top"}, "xaxis": {"title": {"text": "No. of samples"}}, "yaxis": {"title": {"text": "Performance Score"}}},\n+                        {"responsive": true}\n+                    )\n+                };\n+                \n+            </script>\n+        </div>\n+</body>\n+</html>\n\\ No newline at end of file\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/ml_vis03.html
--- a/test-data/ml_vis03.html Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/ml_vis03.html Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,14 +1,31 @@\n-<html><head><meta charset="utf-8" /></head><body><script type="text/javascript">/**\n-* plotly.js v1.39.4\n-* Copyright 2012-2018, Plotly, Inc.\n+<html>\n+<head><meta charset="utf-8" /></head>\n+<body>\n+    <div>\n+        \n+                <script type="text/javascript">window.PlotlyConfig = {MathJaxConfig: \'local\'};</script>\n+        <script type="text/javascript">/**\n+* plotly.js v1.51.1\n+* Copyright 2012-2019, Plotly, Inc.\n * All rights reserved.\n * Licensed under the MIT license\n */\n-!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Plotly=t()}}(function(){return function(){return function t(e,r,n){function i(o,s){if(!r[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(a)return a(o,!0);var c=new Error("Cannot find module \'"+o+"\'");throw c.code="MODULE_NOT_FOUND",c}var u=r[o]={exports:{}};e[o][0].call(u.exports,function(t){var r=e[o][1][t];return i(r||t)},u,u.exports,t,e,r,n)}return r[o].exports}for(var a="function"==typeof require&&require,o=0;o<n.length;o++)i(n[o]);return i}}()({1:[function(t,e,r){"use strict";var n=t("../src/lib"),i={"X,X div":"direction:ltr;font-family:\'Open Sans\', verdana, arial, sans-serif;margin:0;padding:0;","X input,X button":"font-family:\'Open Sans\', verdana, arial, sans-serif;","X input:focus,X button:focus":"outline:none;","X a":"text-decoration:none;","X a:hover":"text-decoration:none;","X .crisp":"shape-rendering:crispEdges;","X .user-select-none":"-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;","X svg":"overflow:hidden;","X svg a":"fill:#447adb;","X svg a:hover":"fill:#3c6dc5;","X .main-svg":"position:absolute;top:0;left:0;pointer-events:none;","X .main-svg .draglayer":"pointer-events:all;","X .cursor-default":"cursor:default;","X .cursor-pointer":"cursor:pointer;","X .cursor-crosshair":"cursor:crosshair;","X .cursor-move":"cursor:move;","X .cursor-col-resize":"cursor:col-resize;","X .cursor-row-resize":"cursor:row-resize;","X .cursor-ns-resize":"cursor:ns-resize;","X .cursor-ew-resize":"cursor:ew-resize;","X .cursor-sw-resize":"cursor:sw-resize;","X .cursor-s-resize":"cursor:s-resize;","X .cursor-se-resize":"cursor:se-resize;","X .cursor-w-resize":"cursor:w-resize;","X .cursor-e-resize":"cursor:e-resize;","X .cursor-nw-resize":"cursor:nw-resize;","X .cursor-n-resize":"cursor:n-resize;","X .cursor-ne-resize":"cursor:ne-resize;","X .cursor-grab":"cursor:-webkit-grab;cursor:grab;","X .modebar":"position:absolute;top:2px;right:2px;z-index:1001;background:rgba(255,255,255,0.7);","X .modebar--hover":"opacity:0;-webkit-transition:opacity 0.3s ease 0s;-moz-transition:opacity 0.3s ease 0s;-ms-transition:opacity 0.3s ease 0s;-o-transition:opacity 0.3s ease 0s;transition:opacity 0.3s ease 0s;","X:hover .modebar--hover":"opacity:1;","X .modebar-group":"float:left;display:inline-block;box-sizing:border-box;margin-left:8px;position:relative;vertical-align:middle;white-space:nowrap;","X .modebar-group:first-child":"margin-left:0px;","X .modebar-btn":"position:relative;font-size:16px;padding:3px 4px;cursor:pointer;line-height:normal;box-sizing:border-box;","X .modebar-btn svg":"position:relative;top:2px;","X .modebar-btn path":"fill:rgba(0,31,95,0.3);","X .modebar-btn.active path,X .modebar-btn:hover path":"fill:rgba(0,22,72,0.5);","X .modebar-btn.modebar-btn--logo":"padding:3px 1px;","X .modebar-btn.modebar-btn--logo path":"fill:#447adb !important;","X [data-title]:before,X [data-title]:after":"position:absolute;-webkit-transform:translate3d(0, 0, 0);-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);display:none;opacity:0;z-index:1001;pointer-events:none;top:110%;right:50%;","X [data-title]:hover:before,X [data-ti'..b'[{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolargl"}], "scatterternary": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterternary"}], "surface": [{"colorbar": {"outlinewidth": 0, "ticks": ""}, "colorscale": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "type": "surface"}], "table": [{"cells": {"fill": {"color": "#EBF0F8"}, "line": {"color": "white"}}, "header": {"fill": {"color": "#C8D4E3"}, "line": {"color": "white"}}, "type": "table"}]}, "layout": {"annotationdefaults": {"arrowcolor": "#2a3f5f", "arrowhead": 0, "arrowwidth": 1}, "coloraxis": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "colorscale": {"diverging": [[0, "#8e0152"], [0.1, "#c51b7d"], [0.2, "#de77ae"], [0.3, "#f1b6da"], [0.4, "#fde0ef"], [0.5, "#f7f7f7"], [0.6, "#e6f5d0"], [0.7, "#b8e186"], [0.8, "#7fbc41"], [0.9, "#4d9221"], [1, "#276419"]], "sequential": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "sequentialminus": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]]}, "colorway": ["#636efa", "#EF553B", "#00cc96", "#ab63fa", "#FFA15A", "#19d3f3", "#FF6692", "#B6E880", "#FF97FF", "#FECB52"], "font": {"color": "#2a3f5f"}, "geo": {"bgcolor": "white", "lakecolor": "white", "landcolor": "#E5ECF6", "showlakes": true, "showland": true, "subunitcolor": "white"}, "hoverlabel": {"align": "left"}, "hovermode": "closest", "mapbox": {"style": "light"}, "paper_bgcolor": "white", "plot_bgcolor": "#E5ECF6", "polar": {"angularaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "radialaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "scene": {"xaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "yaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "zaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}}, "shapedefaults": {"line": {"color": "#2a3f5f"}}, "ternary": {"aaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "baxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "caxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "title": {"x": 0.05}, "xaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}, "yaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}}}, "title": {"text": "Precision-Recall Curve", "x": 0.5, "xanchor": "center", "y": 0.92, "yanchor": "top"}, "xaxis": {"linecolor": "lightslategray", "linewidth": 1, "title": {"text": "Recall"}}, "yaxis": {"linecolor": "lightslategray", "linewidth": 1, "title": {"text": "Precision"}}},\n+                        {"responsive": true}\n+                    )\n+                };\n+                \n+            </script>\n+        </div>\n+</body>\n+</html>\n\\ No newline at end of file\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/ml_vis04.html
--- a/test-data/ml_vis04.html Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/ml_vis04.html Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,14 +1,31 @@\n-<html><head><meta charset="utf-8" /></head><body><script type="text/javascript">/**\n-* plotly.js v1.39.4\n-* Copyright 2012-2018, Plotly, Inc.\n+<html>\n+<head><meta charset="utf-8" /></head>\n+<body>\n+    <div>\n+        \n+                <script type="text/javascript">window.PlotlyConfig = {MathJaxConfig: \'local\'};</script>\n+        <script type="text/javascript">/**\n+* plotly.js v1.51.1\n+* Copyright 2012-2019, Plotly, Inc.\n * All rights reserved.\n * Licensed under the MIT license\n */\n-!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Plotly=t()}}(function(){return function(){return function t(e,r,n){function i(o,s){if(!r[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(a)return a(o,!0);var c=new Error("Cannot find module \'"+o+"\'");throw c.code="MODULE_NOT_FOUND",c}var u=r[o]={exports:{}};e[o][0].call(u.exports,function(t){var r=e[o][1][t];return i(r||t)},u,u.exports,t,e,r,n)}return r[o].exports}for(var a="function"==typeof require&&require,o=0;o<n.length;o++)i(n[o]);return i}}()({1:[function(t,e,r){"use strict";var n=t("../src/lib"),i={"X,X div":"direction:ltr;font-family:\'Open Sans\', verdana, arial, sans-serif;margin:0;padding:0;","X input,X button":"font-family:\'Open Sans\', verdana, arial, sans-serif;","X input:focus,X button:focus":"outline:none;","X a":"text-decoration:none;","X a:hover":"text-decoration:none;","X .crisp":"shape-rendering:crispEdges;","X .user-select-none":"-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;","X svg":"overflow:hidden;","X svg a":"fill:#447adb;","X svg a:hover":"fill:#3c6dc5;","X .main-svg":"position:absolute;top:0;left:0;pointer-events:none;","X .main-svg .draglayer":"pointer-events:all;","X .cursor-default":"cursor:default;","X .cursor-pointer":"cursor:pointer;","X .cursor-crosshair":"cursor:crosshair;","X .cursor-move":"cursor:move;","X .cursor-col-resize":"cursor:col-resize;","X .cursor-row-resize":"cursor:row-resize;","X .cursor-ns-resize":"cursor:ns-resize;","X .cursor-ew-resize":"cursor:ew-resize;","X .cursor-sw-resize":"cursor:sw-resize;","X .cursor-s-resize":"cursor:s-resize;","X .cursor-se-resize":"cursor:se-resize;","X .cursor-w-resize":"cursor:w-resize;","X .cursor-e-resize":"cursor:e-resize;","X .cursor-nw-resize":"cursor:nw-resize;","X .cursor-n-resize":"cursor:n-resize;","X .cursor-ne-resize":"cursor:ne-resize;","X .cursor-grab":"cursor:-webkit-grab;cursor:grab;","X .modebar":"position:absolute;top:2px;right:2px;z-index:1001;background:rgba(255,255,255,0.7);","X .modebar--hover":"opacity:0;-webkit-transition:opacity 0.3s ease 0s;-moz-transition:opacity 0.3s ease 0s;-ms-transition:opacity 0.3s ease 0s;-o-transition:opacity 0.3s ease 0s;transition:opacity 0.3s ease 0s;","X:hover .modebar--hover":"opacity:1;","X .modebar-group":"float:left;display:inline-block;box-sizing:border-box;margin-left:8px;position:relative;vertical-align:middle;white-space:nowrap;","X .modebar-group:first-child":"margin-left:0px;","X .modebar-btn":"position:relative;font-size:16px;padding:3px 4px;cursor:pointer;line-height:normal;box-sizing:border-box;","X .modebar-btn svg":"position:relative;top:2px;","X .modebar-btn path":"fill:rgba(0,31,95,0.3);","X .modebar-btn.active path,X .modebar-btn:hover path":"fill:rgba(0,22,72,0.5);","X .modebar-btn.modebar-btn--logo":"padding:3px 1px;","X .modebar-btn.modebar-btn--logo path":"fill:#447adb !important;","X [data-title]:before,X [data-title]:after":"position:absolute;-webkit-transform:translate3d(0, 0, 0);-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);display:none;opacity:0;z-index:1001;pointer-events:none;top:110%;right:50%;","X [data-title]:hover:before,X [data-ti'..b'"ticks": ""}}, "type": "scatterpolargl"}], "scatterternary": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterternary"}], "surface": [{"colorbar": {"outlinewidth": 0, "ticks": ""}, "colorscale": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "type": "surface"}], "table": [{"cells": {"fill": {"color": "#EBF0F8"}, "line": {"color": "white"}}, "header": {"fill": {"color": "#C8D4E3"}, "line": {"color": "white"}}, "type": "table"}]}, "layout": {"annotationdefaults": {"arrowcolor": "#2a3f5f", "arrowhead": 0, "arrowwidth": 1}, "coloraxis": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "colorscale": {"diverging": [[0, "#8e0152"], [0.1, "#c51b7d"], [0.2, "#de77ae"], [0.3, "#f1b6da"], [0.4, "#fde0ef"], [0.5, "#f7f7f7"], [0.6, "#e6f5d0"], [0.7, "#b8e186"], [0.8, "#7fbc41"], [0.9, "#4d9221"], [1, "#276419"]], "sequential": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "sequentialminus": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]]}, "colorway": ["#636efa", "#EF553B", "#00cc96", "#ab63fa", "#FFA15A", "#19d3f3", "#FF6692", "#B6E880", "#FF97FF", "#FECB52"], "font": {"color": "#2a3f5f"}, "geo": {"bgcolor": "white", "lakecolor": "white", "landcolor": "#E5ECF6", "showlakes": true, "showland": true, "subunitcolor": "white"}, "hoverlabel": {"align": "left"}, "hovermode": "closest", "mapbox": {"style": "light"}, "paper_bgcolor": "white", "plot_bgcolor": "#E5ECF6", "polar": {"angularaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "radialaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "scene": {"xaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "yaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "zaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}}, "shapedefaults": {"line": {"color": "#2a3f5f"}}, "ternary": {"aaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "baxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "caxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "title": {"x": 0.05}, "xaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}, "yaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}}}, "title": {"text": "Receiver Operating Characteristic (ROC) Curve", "x": 0.5, "xanchor": "center", "y": 0.92, "yanchor": "top"}, "xaxis": {"linecolor": "lightslategray", "linewidth": 1, "title": {"text": "False Positive Rate"}}, "yaxis": {"linecolor": "lightslategray", "linewidth": 1, "title": {"text": "True Positive Rate"}}},\n+                        {"responsive": true}\n+                    )\n+                };\n+                \n+            </script>\n+        </div>\n+</body>\n+</html>\n\\ No newline at end of file\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/ml_vis05.html
--- a/test-data/ml_vis05.html Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/ml_vis05.html Mon Dec 16 05:39:20 2019 -0500
[
b'@@ -1,14 +1,31 @@\n-<html><head><meta charset="utf-8" /></head><body><script type="text/javascript">/**\n-* plotly.js v1.39.4\n-* Copyright 2012-2018, Plotly, Inc.\n+<html>\n+<head><meta charset="utf-8" /></head>\n+<body>\n+    <div>\n+        \n+                <script type="text/javascript">window.PlotlyConfig = {MathJaxConfig: \'local\'};</script>\n+        <script type="text/javascript">/**\n+* plotly.js v1.51.1\n+* Copyright 2012-2019, Plotly, Inc.\n * All rights reserved.\n * Licensed under the MIT license\n */\n-!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Plotly=t()}}(function(){return function(){return function t(e,r,n){function i(o,s){if(!r[o]){if(!e[o]){var l="function"==typeof require&&require;if(!s&&l)return l(o,!0);if(a)return a(o,!0);var c=new Error("Cannot find module \'"+o+"\'");throw c.code="MODULE_NOT_FOUND",c}var u=r[o]={exports:{}};e[o][0].call(u.exports,function(t){var r=e[o][1][t];return i(r||t)},u,u.exports,t,e,r,n)}return r[o].exports}for(var a="function"==typeof require&&require,o=0;o<n.length;o++)i(n[o]);return i}}()({1:[function(t,e,r){"use strict";var n=t("../src/lib"),i={"X,X div":"direction:ltr;font-family:\'Open Sans\', verdana, arial, sans-serif;margin:0;padding:0;","X input,X button":"font-family:\'Open Sans\', verdana, arial, sans-serif;","X input:focus,X button:focus":"outline:none;","X a":"text-decoration:none;","X a:hover":"text-decoration:none;","X .crisp":"shape-rendering:crispEdges;","X .user-select-none":"-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none;","X svg":"overflow:hidden;","X svg a":"fill:#447adb;","X svg a:hover":"fill:#3c6dc5;","X .main-svg":"position:absolute;top:0;left:0;pointer-events:none;","X .main-svg .draglayer":"pointer-events:all;","X .cursor-default":"cursor:default;","X .cursor-pointer":"cursor:pointer;","X .cursor-crosshair":"cursor:crosshair;","X .cursor-move":"cursor:move;","X .cursor-col-resize":"cursor:col-resize;","X .cursor-row-resize":"cursor:row-resize;","X .cursor-ns-resize":"cursor:ns-resize;","X .cursor-ew-resize":"cursor:ew-resize;","X .cursor-sw-resize":"cursor:sw-resize;","X .cursor-s-resize":"cursor:s-resize;","X .cursor-se-resize":"cursor:se-resize;","X .cursor-w-resize":"cursor:w-resize;","X .cursor-e-resize":"cursor:e-resize;","X .cursor-nw-resize":"cursor:nw-resize;","X .cursor-n-resize":"cursor:n-resize;","X .cursor-ne-resize":"cursor:ne-resize;","X .cursor-grab":"cursor:-webkit-grab;cursor:grab;","X .modebar":"position:absolute;top:2px;right:2px;z-index:1001;background:rgba(255,255,255,0.7);","X .modebar--hover":"opacity:0;-webkit-transition:opacity 0.3s ease 0s;-moz-transition:opacity 0.3s ease 0s;-ms-transition:opacity 0.3s ease 0s;-o-transition:opacity 0.3s ease 0s;transition:opacity 0.3s ease 0s;","X:hover .modebar--hover":"opacity:1;","X .modebar-group":"float:left;display:inline-block;box-sizing:border-box;margin-left:8px;position:relative;vertical-align:middle;white-space:nowrap;","X .modebar-group:first-child":"margin-left:0px;","X .modebar-btn":"position:relative;font-size:16px;padding:3px 4px;cursor:pointer;line-height:normal;box-sizing:border-box;","X .modebar-btn svg":"position:relative;top:2px;","X .modebar-btn path":"fill:rgba(0,31,95,0.3);","X .modebar-btn.active path,X .modebar-btn:hover path":"fill:rgba(0,22,72,0.5);","X .modebar-btn.modebar-btn--logo":"padding:3px 1px;","X .modebar-btn.modebar-btn--logo path":"fill:#447adb !important;","X [data-title]:before,X [data-title]:after":"position:absolute;-webkit-transform:translate3d(0, 0, 0);-moz-transform:translate3d(0, 0, 0);-ms-transform:translate3d(0, 0, 0);-o-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0);display:none;opacity:0;z-index:1001;pointer-events:none;top:110%;right:50%;","X [data-title]:hover:before,X [data-ti'..b': {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolar"}], "scatterpolargl": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterpolargl"}], "scatterternary": [{"marker": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "type": "scatterternary"}], "surface": [{"colorbar": {"outlinewidth": 0, "ticks": ""}, "colorscale": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "type": "surface"}], "table": [{"cells": {"fill": {"color": "#EBF0F8"}, "line": {"color": "white"}}, "header": {"fill": {"color": "#C8D4E3"}, "line": {"color": "white"}}, "type": "table"}]}, "layout": {"annotationdefaults": {"arrowcolor": "#2a3f5f", "arrowhead": 0, "arrowwidth": 1}, "coloraxis": {"colorbar": {"outlinewidth": 0, "ticks": ""}}, "colorscale": {"diverging": [[0, "#8e0152"], [0.1, "#c51b7d"], [0.2, "#de77ae"], [0.3, "#f1b6da"], [0.4, "#fde0ef"], [0.5, "#f7f7f7"], [0.6, "#e6f5d0"], [0.7, "#b8e186"], [0.8, "#7fbc41"], [0.9, "#4d9221"], [1, "#276419"]], "sequential": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]], "sequentialminus": [[0.0, "#0d0887"], [0.1111111111111111, "#46039f"], [0.2222222222222222, "#7201a8"], [0.3333333333333333, "#9c179e"], [0.4444444444444444, "#bd3786"], [0.5555555555555556, "#d8576b"], [0.6666666666666666, "#ed7953"], [0.7777777777777778, "#fb9f3a"], [0.8888888888888888, "#fdca26"], [1.0, "#f0f921"]]}, "colorway": ["#636efa", "#EF553B", "#00cc96", "#ab63fa", "#FFA15A", "#19d3f3", "#FF6692", "#B6E880", "#FF97FF", "#FECB52"], "font": {"color": "#2a3f5f"}, "geo": {"bgcolor": "white", "lakecolor": "white", "landcolor": "#E5ECF6", "showlakes": true, "showland": true, "subunitcolor": "white"}, "hoverlabel": {"align": "left"}, "hovermode": "closest", "mapbox": {"style": "light"}, "paper_bgcolor": "white", "plot_bgcolor": "#E5ECF6", "polar": {"angularaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "radialaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "scene": {"xaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "yaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}, "zaxis": {"backgroundcolor": "#E5ECF6", "gridcolor": "white", "gridwidth": 2, "linecolor": "white", "showbackground": true, "ticks": "", "zerolinecolor": "white"}}, "shapedefaults": {"line": {"color": "#2a3f5f"}}, "ternary": {"aaxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "baxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}, "bgcolor": "#E5ECF6", "caxis": {"gridcolor": "white", "linecolor": "white", "ticks": ""}}, "title": {"x": 0.05}, "xaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}, "yaxis": {"automargin": true, "gridcolor": "white", "linecolor": "white", "ticks": "", "title": {"standoff": 15}, "zerolinecolor": "white", "zerolinewidth": 2}}}, "title": {"x": 0.5, "xanchor": "center", "y": 0.92, "yanchor": "top"}, "xaxis": {"title": {"text": "Number of features selected"}}, "yaxis": {"title": {"text": "Cross validation score"}}},\n+                        {"responsive": true}\n+                    )\n+                };\n+                \n+            </script>\n+        </div>\n+</body>\n+</html>\n\\ No newline at end of file\n'
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/ml_vis05.png
b
Binary file test-data/ml_vis05.png has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/model_fit01
b
Binary file test-data/model_fit01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/model_fit02
b
Binary file test-data/model_fit02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/model_fit02.h5
b
Binary file test-data/model_fit02.h5 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/named_steps.txt
--- a/test-data/named_steps.txt Thu Nov 07 05:42:25 2019 -0500
+++ b/test-data/named_steps.txt Mon Dec 16 05:39:20 2019 -0500
b
@@ -1,6 +1,6 @@
-{'preprocessing_1': SelectKBest(k=10, score_func=<function f_regression at 0x113310ea0>), 'estimator': XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
-       colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
-       max_depth=3, min_child_weight=1, missing=nan, n_estimators=100,
-       n_jobs=1, nthread=None, objective='reg:linear', random_state=10,
-       reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
-       silent=True, subsample=1)}
\ No newline at end of file
+{'preprocessing_1': SelectKBest(k=10, score_func=<function f_regression at 0x11b4ba8c8>), 'estimator': XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
+             colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
+             max_depth=3, min_child_weight=1, missing=nan, n_estimators=100,
+             n_jobs=1, nthread=None, objective='reg:linear', random_state=10,
+             reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
+             silent=True, subsample=1)}
\ No newline at end of file
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/nn_model01
b
Binary file test-data/nn_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/nn_model02
b
Binary file test-data/nn_model02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/nn_model03
b
Binary file test-data/nn_model03 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline01
b
Binary file test-data/pipeline01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline02
b
Binary file test-data/pipeline02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline03
b
Binary file test-data/pipeline03 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline04
b
Binary file test-data/pipeline04 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline05
b
Binary file test-data/pipeline05 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline06
b
Binary file test-data/pipeline06 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline07
b
Binary file test-data/pipeline07 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline08
b
Binary file test-data/pipeline08 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline09
b
Binary file test-data/pipeline09 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline10
b
Binary file test-data/pipeline10 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline11
b
Binary file test-data/pipeline11 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline12
b
Binary file test-data/pipeline12 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline14
b
Binary file test-data/pipeline14 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline15
b
Binary file test-data/pipeline15 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline16
b
Binary file test-data/pipeline16 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline17
b
Binary file test-data/pipeline17 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline_params05.tabular
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/pipeline_params05.tabular Mon Dec 16 05:39:20 2019 -0500
b
@@ -0,0 +1,18 @@
+ Parameter Value
+@ bootstrap bootstrap: True
+@ criterion criterion: 'mse'
+@ max_depth max_depth: None
+@ max_features max_features: 'auto'
+@ max_leaf_nodes max_leaf_nodes: None
+@ min_impurity_decrease min_impurity_decrease: 0.0
+@ min_impurity_split min_impurity_split: None
+@ min_samples_leaf min_samples_leaf: 1
+@ min_samples_split min_samples_split: 2
+@ min_weight_fraction_leaf min_weight_fraction_leaf: 0.0
+@ n_estimators n_estimators: 100
+* n_jobs n_jobs: 1
+@ oob_score oob_score: False
+@ random_state random_state: 42
+* verbose verbose: 0
+@ warm_start warm_start: False
+ Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/pipeline_params18
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/pipeline_params18 Mon Dec 16 05:39:20 2019 -0500
[
@@ -0,0 +1,89 @@
+ Parameter Value
+* memory memory: None
+@ powertransformer powertransformer: PowerTransformer(copy=True, method='yeo-johnson', standardize=True)
+* steps "steps: [('powertransformer', PowerTransformer(copy=True, method='yeo-johnson', standardize=True)), ('transformedtargetregressor', TransformedTargetRegressor(check_inverse=True, func=None, inverse_func=None,
+                           regressor=RandomForestRegressor(bootstrap=True,
+                                                           criterion='mse',
+                                                           max_depth=None,
+                                                           max_features='auto',
+                                                           max_leaf_nodes=None,
+                                                           min_impurity_decrease=0.0,
+                                                           min_impurity_split=None,
+                                                           min_samples_leaf=1,
+                                                           min_samples_split=2,
+                                                           min_weight_fraction_leaf=0.0,
+                                                           n_estimators='warn',
+                                                           n_jobs=1,
+                                                           oob_score=False,
+                                                           random_state=10,
+                                                           verbose=0,
+                                                           warm_start=False),
+                           transformer=QuantileTransformer(copy=True,
+                                                           ignore_implicit_zeros=False,
+                                                           n_quantiles=1000,
+                                                           output_distribution='uniform',
+                                                           random_state=10,
+                                                           subsample=100000)))]"
+@ transformedtargetregressor "transformedtargetregressor: TransformedTargetRegressor(check_inverse=True, func=None, inverse_func=None,
+                           regressor=RandomForestRegressor(bootstrap=True,
+                                                           criterion='mse',
+                                                           max_depth=None,
+                                                           max_features='auto',
+                                                           max_leaf_nodes=None,
+                                                           min_impurity_decrease=0.0,
+                                                           min_impurity_split=None,
+                                                           min_samples_leaf=1,
+                                                           min_samples_split=2,
+                                                           min_weight_fraction_leaf=0.0,
+                                                           n_estimators='warn',
+                                                           n_jobs=1,
+                                                           oob_score=False,
+                                                           random_state=10,
+                                                           verbose=0,
+                                                           warm_start=False),
+                           transformer=QuantileTransformer(copy=True,
+                                                           ignore_implicit_zeros=False,
+                                                           n_quantiles=1000,
+                                                           output_distribution='uniform',
+                                                           random_state=10,
+                                                           subsample=100000))"
+* verbose verbose: False
+@ powertransformer__copy powertransformer__copy: True
+@ powertransformer__method powertransformer__method: 'yeo-johnson'
+@ powertransformer__standardize powertransformer__standardize: True
+@ transformedtargetregressor__check_inverse transformedtargetregressor__check_inverse: True
+@ transformedtargetregressor__func transformedtargetregressor__func: None
+@ transformedtargetregressor__inverse_func transformedtargetregressor__inverse_func: None
+@ transformedtargetregressor__regressor "transformedtargetregressor__regressor: RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,
+                      max_features='auto', max_leaf_nodes=None,
+                      min_impurity_decrease=0.0, min_impurity_split=None,
+                      min_samples_leaf=1, min_samples_split=2,
+                      min_weight_fraction_leaf=0.0, n_estimators='warn',
+                      n_jobs=1, oob_score=False, random_state=10, verbose=0,
+                      warm_start=False)"
+@ transformedtargetregressor__regressor__bootstrap transformedtargetregressor__regressor__bootstrap: True
+@ transformedtargetregressor__regressor__criterion transformedtargetregressor__regressor__criterion: 'mse'
+@ transformedtargetregressor__regressor__max_depth transformedtargetregressor__regressor__max_depth: None
+@ transformedtargetregressor__regressor__max_features transformedtargetregressor__regressor__max_features: 'auto'
+@ transformedtargetregressor__regressor__max_leaf_nodes transformedtargetregressor__regressor__max_leaf_nodes: None
+@ transformedtargetregressor__regressor__min_impurity_decrease transformedtargetregressor__regressor__min_impurity_decrease: 0.0
+@ transformedtargetregressor__regressor__min_impurity_split transformedtargetregressor__regressor__min_impurity_split: None
+@ transformedtargetregressor__regressor__min_samples_leaf transformedtargetregressor__regressor__min_samples_leaf: 1
+@ transformedtargetregressor__regressor__min_samples_split transformedtargetregressor__regressor__min_samples_split: 2
+@ transformedtargetregressor__regressor__min_weight_fraction_leaf transformedtargetregressor__regressor__min_weight_fraction_leaf: 0.0
+@ transformedtargetregressor__regressor__n_estimators transformedtargetregressor__regressor__n_estimators: 'warn'
+* transformedtargetregressor__regressor__n_jobs transformedtargetregressor__regressor__n_jobs: 1
+@ transformedtargetregressor__regressor__oob_score transformedtargetregressor__regressor__oob_score: False
+@ transformedtargetregressor__regressor__random_state transformedtargetregressor__regressor__random_state: 10
+* transformedtargetregressor__regressor__verbose transformedtargetregressor__regressor__verbose: 0
+@ transformedtargetregressor__regressor__warm_start transformedtargetregressor__regressor__warm_start: False
+@ transformedtargetregressor__transformer "transformedtargetregressor__transformer: QuantileTransformer(copy=True, ignore_implicit_zeros=False, n_quantiles=1000,
+                    output_distribution='uniform', random_state=10,
+                    subsample=100000)"
+@ transformedtargetregressor__transformer__copy transformedtargetregressor__transformer__copy: True
+@ transformedtargetregressor__transformer__ignore_implicit_zeros transformedtargetregressor__transformer__ignore_implicit_zeros: False
+@ transformedtargetregressor__transformer__n_quantiles transformedtargetregressor__transformer__n_quantiles: 1000
+@ transformedtargetregressor__transformer__output_distribution transformedtargetregressor__transformer__output_distribution: 'uniform'
+@ transformedtargetregressor__transformer__random_state transformedtargetregressor__transformer__random_state: 10
+@ transformedtargetregressor__transformer__subsample transformedtargetregressor__transformer__subsample: 100000
+ Note: @, params eligible for search in searchcv tool.
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/prp_model03
b
Binary file test-data/prp_model03 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/prp_model05
b
Binary file test-data/prp_model05 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/prp_model08
b
Binary file test-data/prp_model08 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/prp_model09
b
Binary file test-data/prp_model09 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/qda_model01
b
Binary file test-data/qda_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/rfc_model01
b
Binary file test-data/rfc_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/rfr_model01
b
Binary file test-data/rfr_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/searchCV01
b
Binary file test-data/searchCV01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/searchCV02
b
Binary file test-data/searchCV02 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/train_test_eval_model01
b
Binary file test-data/train_test_eval_model01 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/train_test_eval_weights01.h5
b
Binary file test-data/train_test_eval_weights01.h5 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e test-data/train_test_eval_weights02.h5
b
Binary file test-data/train_test_eval_weights02.h5 has changed
b
diff -r c33145a815ee -r 3f3c6dc38f3e train_test_eval.py
--- a/train_test_eval.py Thu Nov 07 05:42:25 2019 -0500
+++ b/train_test_eval.py Mon Dec 16 05:39:20 2019 -0500
b
@@ -2,6 +2,7 @@
 import joblib
 import json
 import numpy as np
+import os
 import pandas as pd
 import pickle
 import warnings
@@ -29,8 +30,9 @@
 setattr(_search, '_fit_and_score', _fit_and_score)
 setattr(_validation, '_fit_and_score', _fit_and_score)
 
-N_JOBS = int(__import__('os').environ.get('GALAXY_SLOTS', 1))
-CACHE_DIR = './cached'
+N_JOBS = int(os.environ.get('GALAXY_SLOTS', 1))
+CACHE_DIR = os.path.join(os.getcwd(), 'cached')
+del os
 NON_SEARCHABLE = ('n_jobs', 'pre_dispatch', 'memory', '_path',
                   'nthread', 'callbacks')
 ALLOWED_CALLBACKS = ('EarlyStopping', 'TerminateOnNaN', 'ReduceLROnPlateau',