Repository 'create_tool_recommendation_model'
hg clone https://toolshed.g2.bx.psu.edu/repos/bgruening/create_tool_recommendation_model

Changeset 6:e94dc7945639 (2022-10-16)
Previous changeset 5:4f7e6612906b (2022-05-06)
Commit message:
planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit 24bab7a797f53fe4bcc668b18ee0326625486164
modified:
create_tool_recommendation_model.xml
extract_workflow_connections.py
main.py
predict_tool_usage.py
prepare_data.py
test-data/test_tool_usage
utils.py
added:
test-data/test_workflow_connections
train_transformer.py
transformer_network.py
removed:
optimise_hyperparameters.py
test-data/test_workflows
b
diff -r 4f7e6612906b -r e94dc7945639 create_tool_recommendation_model.xml
--- a/create_tool_recommendation_model.xml Fri May 06 09:05:18 2022 +0000
+++ b/create_tool_recommendation_model.xml Sun Oct 16 11:52:10 2022 +0000
[
b'@@ -1,32 +1,32 @@\n-<tool id="create_tool_recommendation_model" name="Create a model to recommend tools" version="0.0.4">\n+<tool id="create_tool_recommendation_model" name="Create a model to recommend tools" version="0.0.5">\n     <description>using deep learning</description>\n     <requirements>\n-        <requirement type="package" version="3.9.7">python</requirement>\n-        <requirement type="package" version="2.7.0">tensorflow</requirement>\n-        <requirement type="package" version="2.7.0">keras</requirement>\n+        <requirement type="package" version="3.9.12">python</requirement>\n+        <requirement type="package" version="2.9.1">tensorflow</requirement>\n+        <requirement type="package" version="2.9.0">keras</requirement>\n         <requirement type="package" version="1.0.2">scikit-learn</requirement>\n+        <requirement type="package" version="1.4.2">pandas</requirement>\n         <requirement type="package" version="3.6.0">h5py</requirement>\n-        <requirement type="package" version="1.0.4">csvkit</requirement>\n-        <requirement type="package" version="0.2.5">hyperopt</requirement>\n     </requirements>\n     <version_command>echo "@VERSION@"</version_command>\n     <command detect_errors="aggressive">\n <![CDATA[\n         python \'$__tool_directory__/main.py\'\n+\n             --workflow_file \'$input_tabular_workflows\'\n             --tool_usage_file \'$input_tabular_tool_usage\'\n             --cutoff_date \'$data_parameters.input_cutoff_date\'\n             --maximum_path_length \'$data_parameters.input_maximum_path_length\'\n-            --n_epochs \'$training_parameters.n_epochs\'\n-            --optimize_n_epochs \'$training_parameters.optimize_n_epochs\'\n-            --max_evals \'$training_parameters.max_evals\'\n-            --test_share \'$training_parameters.test_share\'\n-            --batch_size \'$nn_parameters.batch_size\'\n-            --units \'$nn_parameters.units\'\n-            --embedding_size \'$nn_parameters.embedding_size\'\n+            --te_share \'$training_parameters.te_share\'\n+            --tr_batch_size \'$training_parameters.tr_batch_size\'\n+            --te_batch_size \'$training_parameters.te_batch_size\'\n+            --n_train_iter \'$training_parameters.n_train_iter\'\n+            --tr_logging_step \'$training_parameters.tr_logging_step\'\n+            --te_logging_step \'$training_parameters.te_logging_step\'\n+            --n_heads \'$nn_parameters.n_heads\'\n+            --n_embed_dim \'$nn_parameters.n_embed_dim\'\n+            --n_feed_forward_dim \'$nn_parameters.n_feed_forward_dim\'\n             --dropout \'$nn_parameters.dropout\'\n-            --spatial_dropout \'$nn_parameters.spatial_dropout\'\n-            --recurrent_dropout \'$nn_parameters.recurrent_dropout\'\n             --learning_rate \'$nn_parameters.learning_rate\'\n             --output_model \'$outfile_model\'\n ]]>\n@@ -34,37 +34,26 @@\n     <inputs>\n         <param name="input_tabular_workflows" type="data" format="tabular" label="Dataset containing workflows" help="Please provide Galaxy workflows as a tabular file."/>\n         <param name="input_tabular_tool_usage" type="data" format="tabular" label="Dataset containing usage frequencies of tools" help="Please provide tools usage frequencies as a tabular file."/>\n+\n         <section name="data_parameters" title="Data parameters" expanded="False">\n-\n             <param name="input_cutoff_date" type="text" value="2017-12-01" label="Cutoff date" help="Provide a date (in the past) in yyyy-mm-dd format. The earliest date from which usage of tools will be extracted. For example, 2017-12-01 specifies that the usage of tools from this date until the data extraction date is extracted. The usage of tools before this date is not considered."/>\n-\n             <param name="input_maximum_path_length" type="integer" value="25" label="Maximum number of tools in a tool sequence" help="Provide an integer between 1 and 25. A workflow is divided into unique paths and this number specifies the maximum number of tools a path can have. Path'..b'" ftype="tabular"/>\n-            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>\n-            <param name="embedding_size" value="1"/>\n+            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>\n+            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> \n+            <param name="n_embed_dim" value="1,6"/>\n         </test>\n         <test expect_failure="true">\n-            <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>\n+            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>\n             <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>\n-            <param name="dropout" value="0.1"/>\n+            <param name="n_feed_forward_dim" value="1,6"/>\n         </test>\n         <test expect_failure="true">\n-            <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>\n-            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>\n-            <param name="spatial_dropout" value="0.1"/>\n+            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>\n+            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> \n+            <param name="dropout" value="0.1,0.2"/>\n         </test>\n         <test expect_failure="true">\n-            <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>\n+            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>\n             <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>\n-            <param name="recurrent_dropout" value="0.1"/>\n+            <param name="n_heads" value="0.1"/>\n         </test>\n          <test expect_failure="true">\n-            <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>\n+            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>\n             <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>\n-            <param name="learning_rate" value="0.0001"/>\n+            <param name="learning_rate" value="0.0001,0.04"/>\n         </test>\n     </tests>\n     <help>\n@@ -188,7 +191,7 @@\n     - "max_evals": The hyperparameters of the neural network are tuned using a Bayesian optimisation approach and multiple configurations are sampled from different ranges of parameters. The number specified in this parameter is the number of configurations of hyperparameters evaluated to optimise them. Higher the number, the longer is the running time of the tool.\n     - "optimize_n_epochs": This number specifies how many iterations would the neural network executes to evaluate each sampled configuration.\n     - "n_epochs": Once the best configuration of hyperparameters has been found, the neural network takes this configuration and runs for "n_epochs" number of times minimising the error to produce a model at the end.\n-    - "test_share": It specifies the size of the test set. For example, if it is 0.5, then the test set is half of the entire data available. It should not be set to more than 0.5. This set is used for evaluating the precision on an unseen set.\n+    - "te_share": It specifies the size of the test set. For example, if it is 0.5, then the test set is half of the entire data available. It should not be set to more than 0.5. This set is used for evaluating the precision on an unseen set.\n     \n 3. Neural network parameters:\n     - "batch_size": The training of the neural network is done using batch learning in this work. The training data is divided into equal batches and for each epoch (a training iteration), all batches of data are trained one after another. A higher or lower value can unsettle the training. Therefore, this parameter should be optimised.\n'
b
diff -r 4f7e6612906b -r e94dc7945639 extract_workflow_connections.py
--- a/extract_workflow_connections.py Fri May 06 09:05:18 2022 +0000
+++ b/extract_workflow_connections.py Sun Oct 16 11:52:10 2022 +0000
[
@@ -2,26 +2,26 @@
 Extract workflow paths from the tabular file containing
 input and output tools
 """
-
-import csv
 import random
 
 import utils
 
 
 class ExtractWorkflowConnections:
+
     def __init__(self):
         """ Init method. """
 
-    def collect_standard_connections(self, row):
-        published = row[8]
-        deleted = row[9]
-        has_errors = row[10]
-        if published == "t" and deleted == "f" and has_errors == "f":
-            return True
-        return False
+    def process_raw_files(self, wf_path, tool_popu_path, config):
+        """
+        Remove pipe from workflows and popularity tabular files
+        """
+        print("Removing pipe from tabular datasets...")
+        wf_frame = utils.remove_pipe(wf_path)
+        tool_popu_frame = utils.remove_pipe(tool_popu_path)
+        return wf_frame, tool_popu_frame
 
-    def read_tabular_file(self, raw_file_path):
+    def read_tabular_file(self, wf_dataframe, config):
         """
         Read tabular file and extract workflow connections
         """
@@ -32,17 +32,18 @@
         workflow_paths = list()
         unique_paths = dict()
         standard_connections = dict()
-        with open(raw_file_path, "rt") as workflow_connections_file:
-            workflow_connections = csv.reader(workflow_connections_file, delimiter="\t")
-            for index, row in enumerate(workflow_connections):
-                wf_id = str(row[0])
-                in_tool = row[3].strip()
-                out_tool = row[6].strip()
+        for index, row in wf_dataframe.iterrows():
+            row = row.tolist()
+            row = [str(item).strip() for item in row]
+            wf_id = str(row[0])
+            if row[1] > config["cutoff_date"]:
+                in_tool = row[3]
+                out_tool = row[6]
                 if wf_id not in workflows:
                     workflows[wf_id] = list()
                 if out_tool and in_tool and out_tool != in_tool:
                     workflows[wf_id].append((out_tool, in_tool))
-                    qc = self.collect_standard_connections(row)
+                    qc = self.__collect_standard_connections(row)
                     if qc:
                         i_t = utils.format_tool_id(in_tool)
                         o_t = utils.format_tool_id(out_tool)
@@ -54,15 +55,15 @@
         wf_ctr = 0
         for wf_id in workflows:
             wf_ctr += 1
-            workflow_parents[wf_id] = self.read_workflow(wf_id, workflows[wf_id])
+            workflow_parents[wf_id] = self.__read_workflow(wf_id, workflows[wf_id])
 
         for wf_id in workflow_parents:
             flow_paths = list()
             parents_graph = workflow_parents[wf_id]
-            roots, leaves = self.get_roots_leaves(parents_graph)
+            roots, leaves = self.__get_roots_leaves(parents_graph)
             for root in roots:
                 for leaf in leaves:
-                    paths = self.find_tool_paths_workflow(parents_graph, root, leaf)
+                    paths = self.__find_tool_paths_workflow(parents_graph, root, leaf)
                     # reverse the paths as they are computed from leaves to roots leaf
                     paths = [tool_path for tool_path in paths]
                     if len(paths) > 0:
@@ -84,13 +85,20 @@
         unique_paths = list(workflow_paths_dup.split("\n"))
         unique_paths = list(filter(None, unique_paths))
         random.shuffle(unique_paths)
+        print("unique_paths: {}".format(len(unique_paths)))
         no_dup_paths = list(set(unique_paths))
+        print("no_dup_paths: {}".format(len(no_dup_paths)))
+        return no_dup_paths, standard_connections
 
-        print("Finding compatible next tools...")
-        compatible_next_tools = self.set_compatible_next_tools(no_dup_paths)
-        return unique_paths, compatible_next_tools, standard_connections
+    def __collect_standard_connections(self, row):
+        published = row[8].strip()
+        deleted = row[9].strip()
+        has_errors = row[10].strip()
+        if published == "t" and deleted == "f" and has_errors == "f":
+            return True
+        return False
 
-    def set_compatible_next_tools(self, workflow_paths):
+    def __set_compatible_next_tools(self, workflow_paths):
         """
         Find next tools for each tool
         """
@@ -109,7 +117,7 @@
             next_tools[tool] = ",".join(list(set(next_tools[tool].split(","))))
         return next_tools
 
-    def read_workflow(self, wf_id, workflow_rows):
+    def __read_workflow(self, wf_id, workflow_rows):
         """
         Read all connections for a workflow
         """
@@ -123,7 +131,7 @@
                 tool_parents[out_tool].append(in_tool)
         return tool_parents
 
-    def get_roots_leaves(self, graph):
+    def __get_roots_leaves(self, graph):
         roots = list()
         leaves = list()
         all_parents = list()
@@ -135,7 +143,7 @@
         leaves = list(set(children).difference(set(all_parents)))
         return roots, leaves
 
-    def find_tool_paths_workflow(self, graph, start, end, path=[]):
+    def __find_tool_paths_workflow(self, graph, start, end, path=[]):
         path = path + [end]
         if start == end:
             return [path]
@@ -143,9 +151,7 @@
         if end in graph:
             for node in graph[end]:
                 if node not in path:
-                    new_tools_paths = self.find_tool_paths_workflow(
-                        graph, start, node, path
-                    )
+                    new_tools_paths = self.__find_tool_paths_workflow(graph, start, node, path)
                     for tool_path in new_tools_paths:
                         path_list.append(tool_path)
         return path_list
b
diff -r 4f7e6612906b -r e94dc7945639 main.py
--- a/main.py Fri May 06 09:05:18 2022 +0000
+++ b/main.py Sun Oct 16 11:52:10 2022 +0000
[
b'@@ -1,260 +1,36 @@\n """\n Predict next tools in the Galaxy workflows\n-using machine learning (recurrent neural network)\n+using deep learning learning (Transformers)\n """\n-\n import argparse\n import time\n \n import extract_workflow_connections\n-import keras.callbacks as callbacks\n-import numpy as np\n-import optimise_hyperparameters\n import prepare_data\n-import utils\n-\n-\n-class PredictTool:\n-    def __init__(self, num_cpus):\n-        """ Init method. """\n-\n-    def find_train_best_network(\n-        self,\n-        network_config,\n-        reverse_dictionary,\n-        train_data,\n-        train_labels,\n-        test_data,\n-        test_labels,\n-        n_epochs,\n-        class_weights,\n-        usage_pred,\n-        standard_connections,\n-        tool_freq,\n-        tool_tr_samples,\n-    ):\n-        """\n-        Define recurrent neural network and train sequential data\n-        """\n-        # get tools with lowest representation\n-        lowest_tool_ids = utils.get_lowest_tools(tool_freq)\n-\n-        print("Start hyperparameter optimisation...")\n-        hyper_opt = optimise_hyperparameters.HyperparameterOptimisation()\n-        best_params, best_model = hyper_opt.train_model(\n-            network_config,\n-            reverse_dictionary,\n-            train_data,\n-            train_labels,\n-            test_data,\n-            test_labels,\n-            tool_tr_samples,\n-            class_weights,\n-        )\n-\n-        # define callbacks\n-        early_stopping = callbacks.EarlyStopping(\n-            monitor="loss",\n-            mode="min",\n-            verbose=1,\n-            min_delta=1e-1,\n-            restore_best_weights=True,\n-        )\n-        predict_callback_test = PredictCallback(\n-            test_data,\n-            test_labels,\n-            reverse_dictionary,\n-            n_epochs,\n-            usage_pred,\n-            standard_connections,\n-            lowest_tool_ids,\n-        )\n-\n-        callbacks_list = [predict_callback_test, early_stopping]\n-        batch_size = int(best_params["batch_size"])\n-\n-        print("Start training on the best model...")\n-        train_performance = dict()\n-        trained_model = best_model.fit_generator(\n-            utils.balanced_sample_generator(\n-                train_data,\n-                train_labels,\n-                batch_size,\n-                tool_tr_samples,\n-                reverse_dictionary,\n-            ),\n-            steps_per_epoch=len(train_data) // batch_size,\n-            epochs=n_epochs,\n-            callbacks=callbacks_list,\n-            validation_data=(test_data, test_labels),\n-            verbose=2,\n-            shuffle=True,\n-        )\n-        train_performance["validation_loss"] = np.array(\n-            trained_model.history["val_loss"]\n-        )\n-        train_performance["precision"] = predict_callback_test.precision\n-        train_performance["usage_weights"] = predict_callback_test.usage_weights\n-        train_performance[\n-            "published_precision"\n-        ] = predict_callback_test.published_precision\n-        train_performance[\n-            "lowest_pub_precision"\n-        ] = predict_callback_test.lowest_pub_precision\n-        train_performance[\n-            "lowest_norm_precision"\n-        ] = predict_callback_test.lowest_norm_precision\n-        train_performance["train_loss"] = np.array(trained_model.history["loss"])\n-        train_performance["model"] = best_model\n-        train_performance["best_parameters"] = best_params\n-        return train_performance\n-\n-\n-class PredictCallback(callbacks.Callback):\n-    def __init__(\n-        self,\n-        test_data,\n-        test_labels,\n-        reverse_data_dictionary,\n-        n_epochs,\n-        usg_scores,\n-        standard_connections,\n-        lowest_tool_ids,\n-    ):\n-        self.test_data = test_data\n-        self.test_labels = test_labels\n-        self.reverse_data_dictionary = reverse_data_dictionary\n-        self.precision = list()\n-        self.usage_weights = list()\n-        self.publish'..b'pochs = int(args["n_epochs"])\n-    optimize_n_epochs = int(args["optimize_n_epochs"])\n-    max_evals = int(args["max_evals"])\n-    test_share = float(args["test_share"])\n-    batch_size = args["batch_size"]\n-    units = args["units"]\n-    embedding_size = args["embedding_size"]\n-    dropout = args["dropout"]\n-    spatial_dropout = args["spatial_dropout"]\n-    recurrent_dropout = args["recurrent_dropout"]\n-    learning_rate = args["learning_rate"]\n-    num_cpus = 16\n \n     config = {\n-        "cutoff_date": cutoff_date,\n-        "maximum_path_length": maximum_path_length,\n-        "n_epochs": n_epochs,\n-        "optimize_n_epochs": optimize_n_epochs,\n-        "max_evals": max_evals,\n-        "test_share": test_share,\n-        "batch_size": batch_size,\n-        "units": units,\n-        "embedding_size": embedding_size,\n-        "dropout": dropout,\n-        "spatial_dropout": spatial_dropout,\n-        "recurrent_dropout": recurrent_dropout,\n-        "learning_rate": learning_rate,\n+        \'cutoff_date\': cutoff_date,\n+        \'maximum_path_length\': maximum_path_length,\n+        \'n_train_iter\': n_train_iter,\n+        \'n_heads\': n_heads,\n+        \'feed_forward_dim\': feed_forward_dim,\n+        \'embedding_dim\': embedding_dim,\n+        \'dropout\': dropout,\n+        \'learning_rate\': learning_rate,\n+        \'te_share\': te_share,\n+        \'te_logging_step\': te_logging_step,\n+        \'tr_logging_step\': tr_logging_step,\n+        \'tr_batch_size\': tr_batch_size,\n+        \'te_batch_size\': te_batch_size,\n+        \'trained_model_path\': trained_model_path\n     }\n-\n+    print("Preprocessing workflows...")\n     # Extract and process workflows\n     connections = extract_workflow_connections.ExtractWorkflowConnections()\n-    (\n-        workflow_paths,\n-        compatible_next_tools,\n-        standard_connections,\n-    ) = connections.read_tabular_file(workflows_path)\n+    # Process raw workflow file\n+    wf_dataframe, usage_df = connections.process_raw_files(workflows_path, tool_usage_path, config)\n+    workflow_paths, pub_conn = connections.read_tabular_file(wf_dataframe, config)\n     # Process the paths from workflows\n     print("Dividing data...")\n-    data = prepare_data.PrepareData(maximum_path_length, test_share)\n-    (\n-        train_data,\n-        train_labels,\n-        test_data,\n-        test_labels,\n-        data_dictionary,\n-        reverse_dictionary,\n-        class_weights,\n-        usage_pred,\n-        train_tool_freq,\n-        tool_tr_samples,\n-    ) = data.get_data_labels_matrices(\n-        workflow_paths,\n-        tool_usage_path,\n-        cutoff_date,\n-        compatible_next_tools,\n-        standard_connections,\n-    )\n-    # find the best model and start training\n-    predict_tool = PredictTool(num_cpus)\n-    # start training with weighted classes\n-    print("Training with weighted classes and samples ...")\n-    results_weighted = predict_tool.find_train_best_network(\n-        config,\n-        reverse_dictionary,\n-        train_data,\n-        train_labels,\n-        test_data,\n-        test_labels,\n-        n_epochs,\n-        class_weights,\n-        usage_pred,\n-        standard_connections,\n-        train_tool_freq,\n-        tool_tr_samples,\n-    )\n-    utils.save_model(\n-        results_weighted,\n-        data_dictionary,\n-        compatible_next_tools,\n-        trained_model_path,\n-        class_weights,\n-        standard_connections,\n-    )\n+    data = prepare_data.PrepareData(maximum_path_length, te_share)\n+    train_data, train_labels, test_data, test_labels, f_dict, r_dict, c_wts, c_tools, tr_tool_freq = data.get_data_labels_matrices(workflow_paths, usage_df, cutoff_date, pub_conn)\n+    print(train_data.shape, train_labels.shape, test_data.shape, test_labels.shape)\n+    train_transformer.create_enc_transformer(train_data, train_labels, test_data, test_labels, f_dict, r_dict, c_wts, c_tools, pub_conn, tr_tool_freq, config)\n     end_time = time.time()\n     print("Program finished in %s seconds" % str(end_time - start_time))\n'
b
diff -r 4f7e6612906b -r e94dc7945639 optimise_hyperparameters.py
--- a/optimise_hyperparameters.py Fri May 06 09:05:18 2022 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
[
@@ -1,147 +0,0 @@
-"""
-Find the optimal combination of hyperparameters
-"""
-
-import numpy as np
-import utils
-from hyperopt import fmin, hp, STATUS_OK, tpe, Trials
-from tensorflow.keras.callbacks import EarlyStopping
-from tensorflow.keras.layers import Dense, Dropout, Embedding, GRU, SpatialDropout1D
-from tensorflow.keras.models import Sequential
-from tensorflow.keras.optimizers import RMSprop
-
-
-class HyperparameterOptimisation:
-    def __init__(self):
-        """ Init method. """
-
-    def train_model(
-        self,
-        config,
-        reverse_dictionary,
-        train_data,
-        train_labels,
-        test_data,
-        test_labels,
-        tool_tr_samples,
-        class_weights,
-    ):
-        """
-        Train a model and report accuracy
-        """
-        # convert items to integer
-        l_batch_size = list(map(int, config["batch_size"].split(",")))
-        l_embedding_size = list(map(int, config["embedding_size"].split(",")))
-        l_units = list(map(int, config["units"].split(",")))
-
-        # convert items to float
-        l_learning_rate = list(map(float, config["learning_rate"].split(",")))
-        l_dropout = list(map(float, config["dropout"].split(",")))
-        l_spatial_dropout = list(map(float, config["spatial_dropout"].split(",")))
-        l_recurrent_dropout = list(map(float, config["recurrent_dropout"].split(",")))
-
-        optimize_n_epochs = int(config["optimize_n_epochs"])
-
-        # get dimensions
-        dimensions = len(reverse_dictionary) + 1
-        best_model_params = dict()
-        early_stopping = EarlyStopping(
-            monitor="val_loss",
-            mode="min",
-            verbose=1,
-            min_delta=1e-1,
-            restore_best_weights=True,
-        )
-
-        # specify the search space for finding the best combination of parameters using Bayesian optimisation
-        params = {
-            "embedding_size": hp.quniform(
-                "embedding_size", l_embedding_size[0], l_embedding_size[1], 1
-            ),
-            "units": hp.quniform("units", l_units[0], l_units[1], 1),
-            "batch_size": hp.quniform(
-                "batch_size", l_batch_size[0], l_batch_size[1], 1
-            ),
-            "learning_rate": hp.loguniform(
-                "learning_rate", np.log(l_learning_rate[0]), np.log(l_learning_rate[1])
-            ),
-            "dropout": hp.uniform("dropout", l_dropout[0], l_dropout[1]),
-            "spatial_dropout": hp.uniform(
-                "spatial_dropout", l_spatial_dropout[0], l_spatial_dropout[1]
-            ),
-            "recurrent_dropout": hp.uniform(
-                "recurrent_dropout", l_recurrent_dropout[0], l_recurrent_dropout[1]
-            ),
-        }
-
-        def create_model(params):
-            model = Sequential()
-            model.add(
-                Embedding(dimensions, int(params["embedding_size"]), mask_zero=True)
-            )
-            model.add(SpatialDropout1D(params["spatial_dropout"]))
-            model.add(
-                GRU(
-                    int(params["units"]),
-                    dropout=params["dropout"],
-                    recurrent_dropout=params["recurrent_dropout"],
-                    return_sequences=True,
-                    activation="elu",
-                )
-            )
-            model.add(Dropout(params["dropout"]))
-            model.add(
-                GRU(
-                    int(params["units"]),
-                    dropout=params["dropout"],
-                    recurrent_dropout=params["recurrent_dropout"],
-                    return_sequences=False,
-                    activation="elu",
-                )
-            )
-            model.add(Dropout(params["dropout"]))
-            model.add(Dense(2 * dimensions, activation="sigmoid"))
-            optimizer_rms = RMSprop(lr=params["learning_rate"])
-            batch_size = int(params["batch_size"])
-            model.compile(
-                loss=utils.weighted_loss(class_weights), optimizer=optimizer_rms
-            )
-            print(model.summary())
-            model_fit = model.fit(
-                utils.balanced_sample_generator(
-                    train_data,
-                    train_labels,
-                    batch_size,
-                    tool_tr_samples,
-                    reverse_dictionary,
-                ),
-                steps_per_epoch=len(train_data) // batch_size,
-                epochs=optimize_n_epochs,
-                callbacks=[early_stopping],
-                validation_data=(test_data, test_labels),
-                verbose=2,
-                shuffle=True,
-            )
-            return {
-                "loss": model_fit.history["val_loss"][-1],
-                "status": STATUS_OK,
-                "model": model,
-            }
-
-        # minimize the objective function using the set of parameters above
-        trials = Trials()
-        learned_params = fmin(
-            create_model,
-            params,
-            trials=trials,
-            algo=tpe.suggest,
-            max_evals=int(config["max_evals"]),
-        )
-        best_model = trials.results[np.argmin([r["loss"] for r in trials.results])][
-            "model"
-        ]
-        # set the best params with respective values
-        for item in learned_params:
-            item_val = learned_params[item]
-            best_model_params[item] = item_val
-        return best_model_params, best_model
b
diff -r 4f7e6612906b -r e94dc7945639 predict_tool_usage.py
--- a/predict_tool_usage.py Fri May 06 09:05:18 2022 +0000
+++ b/predict_tool_usage.py Sun Oct 16 11:52:10 2022 +0000
[
@@ -3,9 +3,6 @@
 """
 
 import collections
-import csv
-import os
-import warnings
 
 import numpy as np
 import utils
@@ -13,40 +10,36 @@
 from sklearn.pipeline import Pipeline
 from sklearn.svm import SVR
 
-warnings.filterwarnings("ignore")
-
-main_path = os.getcwd()
-
 
 class ToolPopularity:
+
     def __init__(self):
         """ Init method. """
 
-    def extract_tool_usage(self, tool_usage_file, cutoff_date, dictionary):
+    def extract_tool_usage(self, tool_usage_df, cutoff_date, dictionary):
         """
         Extract the tool usage over time for each tool
         """
         tool_usage_dict = dict()
         all_dates = list()
         all_tool_list = list(dictionary.keys())
-        with open(tool_usage_file, "rt") as usage_file:
-            tool_usage = csv.reader(usage_file, delimiter="\t")
-            for index, row in enumerate(tool_usage):
-                row = [item.strip() for item in row]
-                if (str(row[1]).strip() > cutoff_date) is True:
-                    tool_id = utils.format_tool_id(row[0])
-                    if tool_id in all_tool_list:
-                        all_dates.append(row[1])
-                        if tool_id not in tool_usage_dict:
-                            tool_usage_dict[tool_id] = dict()
-                            tool_usage_dict[tool_id][row[1]] = int(row[2])
+        for index, row in tool_usage_df.iterrows():
+            row = row.tolist()
+            row = [str(item).strip() for item in row]
+            if (row[1] > cutoff_date) is True:
+                tool_id = utils.format_tool_id(row[0])
+                if tool_id in all_tool_list:
+                    all_dates.append(row[1])
+                    if tool_id not in tool_usage_dict:
+                        tool_usage_dict[tool_id] = dict()
+                        tool_usage_dict[tool_id][row[1]] = int(float(row[2]))
+                    else:
+                        curr_date = row[1]
+                        # merge the usage of different version of tools into one
+                        if curr_date in tool_usage_dict[tool_id]:
+                            tool_usage_dict[tool_id][curr_date] += int(float(row[2]))
                         else:
-                            curr_date = row[1]
-                            # merge the usage of different version of tools into one
-                            if curr_date in tool_usage_dict[tool_id]:
-                                tool_usage_dict[tool_id][curr_date] += int(row[2])
-                            else:
-                                tool_usage_dict[tool_id][curr_date] = int(row[2])
+                            tool_usage_dict[tool_id][curr_date] = int(float(row[2]))
         # get unique dates
         unique_dates = list(set(all_dates))
         for tool in tool_usage_dict:
@@ -66,25 +59,17 @@
         """
         epsilon = 0.0
         cv = 5
-        s_typ = "neg_mean_absolute_error"
+        s_typ = 'neg_mean_absolute_error'
         n_jobs = 4
         s_error = 1
         tr_score = False
         try:
-            pipe = Pipeline(steps=[("regressor", SVR(gamma="scale"))])
+            pipe = Pipeline(steps=[('regressor', SVR(gamma='scale'))])
             param_grid = {
-                "regressor__kernel": ["rbf", "poly", "linear"],
-                "regressor__degree": [2, 3],
+                'regressor__kernel': ['rbf', 'poly', 'linear'],
+                'regressor__degree': [2, 3]
             }
-            search = GridSearchCV(
-                pipe,
-                param_grid,
-                cv=cv,
-                scoring=s_typ,
-                n_jobs=n_jobs,
-                error_score=s_error,
-                return_train_score=tr_score,
-            )
+            search = GridSearchCV(pipe, param_grid, cv=cv, scoring=s_typ, n_jobs=n_jobs, error_score=s_error, return_train_score=tr_score)
             search.fit(x_reshaped, y_reshaped.ravel())
             model = search.best_estimator_
             # set the next time point to get prediction for
b
diff -r 4f7e6612906b -r e94dc7945639 prepare_data.py
--- a/prepare_data.py Fri May 06 09:05:18 2022 +0000
+++ b/prepare_data.py Sun Oct 16 11:52:10 2022 +0000
[
b'@@ -5,16 +5,15 @@\n """\n \n import collections\n-import os\n import random\n \n import numpy as np\n import predict_tool_usage\n-\n-main_path = os.getcwd()\n+from sklearn.model_selection import train_test_split\n \n \n class PrepareData:\n+\n     def __init__(self, max_seq_length, test_data_share):\n         """ Init method. """\n         self.max_tool_sequence_len = max_seq_length\n@@ -26,7 +25,7 @@\n         """\n         tokens = list()\n         raw_paths = workflow_paths\n-        raw_paths = [x.replace("\\n", "") for x in raw_paths]\n+        raw_paths = [x.replace("\\n", \'\') for x in raw_paths]\n         for item in raw_paths:\n             split_items = item.split(",")\n             for token in split_items:\n@@ -34,12 +33,7 @@\n                     tokens.append(token)\n         tokens = list(set(tokens))\n         tokens = np.array(tokens)\n-        tokens = np.reshape(\n-            tokens,\n-            [\n-                -1,\n-            ],\n-        )\n+        tokens = np.reshape(tokens, [-1, ])\n         return tokens, raw_paths\n \n     def create_new_dict(self, new_data_dict):\n@@ -62,116 +56,110 @@\n         """\n         count = collections.Counter(words).most_common()\n         dictionary = dict()\n-        for word, _ in count:\n+        for index, (word, _) in enumerate(count):\n+            word = word.lstrip()\n+            word = word.rstrip()\n             dictionary[word] = len(dictionary) + 1\n-            word = word.strip()\n-        dictionary, reverse_dictionary = self.assemble_dictionary(\n-            dictionary, old_data_dictionary\n-        )\n+        dictionary, reverse_dictionary = self.assemble_dictionary(dictionary, old_data_dictionary)\n         return dictionary, reverse_dictionary\n \n     def decompose_paths(self, paths, dictionary):\n         """\n         Decompose the paths to variable length sub-paths keeping the first tool fixed\n         """\n+        max_len = 0\n         sub_paths_pos = list()\n         for index, item in enumerate(paths):\n             tools = item.split(",")\n             len_tools = len(tools)\n-            if len_tools <= self.max_tool_sequence_len:\n-                for window in range(1, len_tools):\n-                    sequence = tools[0: window + 1]\n-                    tools_pos = [\n-                        str(dictionary[str(tool_item)]) for tool_item in sequence\n-                    ]\n-                    if len(tools_pos) > 1:\n-                        sub_paths_pos.append(",".join(tools_pos))\n+            if len_tools > max_len:\n+                max_len = len_tools\n+            if len_tools < self.max_tool_sequence_len:\n+                sequence = tools[0: len_tools]\n+                tools_pos = [str(dictionary[str(tool_item)]) for tool_item in sequence]\n+                if len(tools_pos) > 1:\n+                    sub_paths_pos.append(",".join(tools_pos))\n         sub_paths_pos = list(set(sub_paths_pos))\n+        print("Max length of tools: ", max_len)\n         return sub_paths_pos\n \n-    def prepare_paths_labels_dictionary(\n-        self, dictionary, reverse_dictionary, paths, compatible_next_tools\n-    ):\n-        """\n-        Create a dictionary of sequences with their labels for training and test paths\n-        """\n-        paths_labels = dict()\n-        random.shuffle(paths)\n-        for item in paths:\n-            if item and item not in "":\n-                tools = item.split(",")\n-                label = tools[-1]\n-                train_tools = tools[: len(tools) - 1]\n-                last_but_one_name = reverse_dictionary[int(train_tools[-1])]\n-                try:\n-                    compatible_tools = compatible_next_tools[last_but_one_name].split(\n-                        ","\n-                    )\n-                except Exception:\n-                    continue\n-                if len(compatible_tools) > 0:\n-                    compatible_tools_ids = [\n-                        str(dictionary[x]) for x in compatible_tools\n-                    ]\n-                    compatible_tools_ids.append(labe'..b'st_tool)]] += 1\n-        return last_tool_freq\n+        sorted_dict = dict(sorted(last_tool_freq.items(), key=lambda kv: kv[1], reverse=True))\n+        return sorted_dict\n \n     def get_toolid_samples(self, train_data, l_tool_freq):\n         l_tool_tr_samples = dict()\n@@ -248,22 +261,13 @@\n                     l_tool_tr_samples[last_tool_id].append(index)\n         return l_tool_tr_samples\n \n-    def get_data_labels_matrices(\n-        self,\n-        workflow_paths,\n-        tool_usage_path,\n-        cutoff_date,\n-        compatible_next_tools,\n-        standard_connections,\n-        old_data_dictionary={},\n-    ):\n+    def get_data_labels_matrices(self, workflow_paths, usage_df, cutoff_date, standard_connections, old_data_dictionary={}):\n         """\n         Convert the training and test paths into corresponding numpy matrices\n         """\n         processed_data, raw_paths = self.process_workflow_paths(workflow_paths)\n-        dictionary, rev_dict = self.create_data_dictionary(\n-            processed_data, old_data_dictionary\n-        )\n+        dictionary, rev_dict = self.create_data_dictionary(processed_data, old_data_dictionary)\n+\n         num_classes = len(dictionary)\n \n         print("Raw paths: %d" % len(raw_paths))\n@@ -274,50 +278,26 @@\n         random.shuffle(all_unique_paths)\n \n         print("Creating dictionaries...")\n-        multilabels_paths = self.prepare_paths_labels_dictionary(\n-            dictionary, rev_dict, all_unique_paths, compatible_next_tools\n-        )\n+        multilabels_paths, compatible_tools, d_size = self.prepare_input_target_paths(dictionary, rev_dict, all_unique_paths)\n \n-        print("Complete data: %d" % len(multilabels_paths))\n-        train_paths_dict, test_paths_dict = self.split_test_train_data(\n-            multilabels_paths\n-        )\n-\n-        print("Train data: %d" % len(train_paths_dict))\n-        print("Test data: %d" % len(test_paths_dict))\n+        print("Complete data: %d" % d_size)\n \n         print("Padding train and test data...")\n-        # pad training and test data with leading zeros\n-        test_data, test_labels = self.pad_paths(\n-            test_paths_dict, num_classes, standard_connections, rev_dict\n-        )\n-        train_data, train_labels = self.pad_paths(\n-            train_paths_dict, num_classes, standard_connections, rev_dict\n-        )\n+        # pad training and test data with trailing zeros\n+        train_data, train_labels, test_data, test_labels = self.pad_paths_one_tool_target(multilabels_paths, compatible_tools, d_size, rev_dict, dictionary)\n+\n+        print("Train data: ", train_data.shape)\n+        print("Test data: ", test_data.shape)\n \n         print("Estimating sample frequency...")\n-        l_tool_freq = self.get_train_last_tool_freq(train_paths_dict, rev_dict)\n-        l_tool_tr_samples = self.get_toolid_samples(train_data, l_tool_freq)\n+        tr_tool_freq = self.get_train_tool_labels_freq(train_labels, rev_dict)\n \n         # Predict tools usage\n         print("Predicting tools\' usage...")\n         usage_pred = predict_tool_usage.ToolPopularity()\n-        usage = usage_pred.extract_tool_usage(tool_usage_path, cutoff_date, dictionary)\n+        usage = usage_pred.extract_tool_usage(usage_df, cutoff_date, dictionary)\n         tool_usage_prediction = usage_pred.get_pupularity_prediction(usage)\n         t_pred_usage = self.get_predicted_usage(dictionary, tool_usage_prediction)\n-\n         # get class weights using the predicted usage for each tool\n         class_weights = self.assign_class_weights(num_classes, t_pred_usage)\n-\n-        return (\n-            train_data,\n-            train_labels,\n-            test_data,\n-            test_labels,\n-            dictionary,\n-            rev_dict,\n-            class_weights,\n-            t_pred_usage,\n-            l_tool_freq,\n-            l_tool_tr_samples,\n-        )\n+        return train_data, train_labels, test_data, test_labels, dictionary, rev_dict, class_weights, compatible_tools, tr_tool_freq\n'
b
diff -r 4f7e6612906b -r e94dc7945639 test-data/test_tool_usage
--- a/test-data/test_tool_usage Fri May 06 09:05:18 2022 +0000
+++ b/test-data/test_tool_usage Sun Oct 16 11:52:10 2022 +0000
b
b'@@ -1,93 +1,2001 @@\n- toolshed.g2.bx.psu.edu/repos/devteam/column_maker/Add_a_column1/1.6                                                                                 \t2022-02-01\t50054\n- upload1                                                                                                                                             \t2022-02-01\t43169\n- toolshed.g2.bx.psu.edu/repos/bgruening/text_processing/tp_find_and_replace/1.1.3                                                                    \t2022-02-01\t39139\n- Cut1                                                                                                                                                \t2022-02-01\t29046\n- toolshed.g2.bx.psu.edu/repos/iuc/snpsift/snpSift_extractFields/4.3+t.galaxy0                                                                        \t2022-02-01\t27481\n- toolshed.g2.bx.psu.edu/repos/iuc/featurecounts/featurecounts/2.0.1+galaxy2                                                                          \t2022-02-01\t17579\n- toolshed.g2.bx.psu.edu/repos/bgruening/align_it/ctb_alignit/1.0.4+galaxy0                                                                           \t2022-02-01\t15902\n- toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.73+galaxy0                                                                                     \t2022-02-01\t14479\n- toolshed.g2.bx.psu.edu/repos/iuc/lofreq_call/lofreq_call/2.1.5+galaxy1                                                                              \t2022-02-01\t13513\n- toolshed.g2.bx.psu.edu/repos/iuc/rgrnastar/rna_star/2.7.8a+galaxy0                                                                                  \t2022-02-01\t13197\n- toolshed.g2.bx.psu.edu/repos/iuc/bcftools_annotate/bcftools_annotate/1.10                                                                           \t2022-02-01\t13176\n- toolshed.g2.bx.psu.edu/repos/iuc/lofreq_filter/lofreq_filter/2.1.5+galaxy0                                                                          \t2022-02-01\t12931\n- Filter1                                                                                                                                             \t2022-02-01\t12490\n- __UNZIP_COLLECTION__                                                                                                                                \t2022-02-01\t12042\n- toolshed.g2.bx.psu.edu/repos/iuc/datamash_ops/datamash_ops/1.1.0                                                                                    \t2022-02-01\t11810\n- toolshed.g2.bx.psu.edu/repos/bgruening/text_processing/tp_replace_in_line/1.1.2                                                                     \t2022-02-01\t11502\n- toolshed.g2.bx.psu.edu/repos/devteam/vcfvcfintersect/vcfvcfintersect/1.0.0_rc3+galaxy0                                                              \t2022-02-01\t11487\n- toolshed.g2.bx.psu.edu/repos/devteam/samtools_stats/samtools_stats/2.0.2+galaxy2                                                                    \t2022-02-01\t10705\n- toolshed.g2.bx.psu.edu/repos/iuc/snpeff_sars_cov_2/snpeff_sars_cov_2/4.5covid19                                                                     \t2022-02-01\t10569\n- toolshed.g2.bx.psu.edu/repos/iuc/lofreq_indelqual/lofreq_indelqual/2.1.5+galaxy0                                                                    \t2022-02-01\t10545\n- toolshed.g2.bx.psu.edu/repos/iuc/lofreq_viterbi/lofreq_viterbi/2.1.5+galaxy0                                                                        \t2022-02-01\t10543\n- toolshed.g2.bx.psu.edu/repos/iuc/samtools_view/samtools_view/1.9+galaxy2                                                                            \t2022-02-01\t10456\n- toolshed.g2.bx.psu.edu/repos/bgruening/replace_column_by_key_value_file/replace_column_with_key_value_file/0.2                                      \t2022-02-01\t9307\n- toolshed.g2.bx.psu.edu/repos/iuc/qualimap_bamqc/qualimap_bamqc/2.2.2d+galaxy3                                     '..b'_nspdk/nspdk_sparse/9.2.2                                                                            | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/iuc/busco/busco/3.0.2+galaxy1                                                                                          | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/iuc/seqtk/seqtk_mergepe/1.3.1                                                                                          | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/devteam/vcfbedintersect/vcfbedintersect/1.0.0_rc3+galaxy0                                                              | 2020-09-01 |1.0\n+ qual_stats_boxplot                                                                                                                                  | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/peptide_shaker/1.11.0                                                                            | 2020-09-01 |1.0\n+ interactive_tool_askomics                                                                                                                           | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/ebi-gxa/seurat_scale_data/seurat_scale_data/2.3.1+galaxy1                                                              | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/ebi-gxa/seurat_export_cellbrowser/seurat_export_cellbrowser/2.3.1+galaxy0                                              | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/bgruening/sklearn_model_validation/sklearn_model_validation/1.0.8.2                                                    | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/ebi-gxa/scanpy_filter_genes/scanpy_filter_genes/1.4.3+galaxy9                                                          | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/iuc/mothur_chimera_slayer/mothur_chimera_slayer/1.39.5.0                                                               | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/bgruening/autodock_vina_prepare_ligand/prepare_ligand/1.5.7+galaxy0                                                    | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/bgruening/sklearn_train_test_split/sklearn_train_test_split/1.0.8.2                                                    | 2020-09-01 |1.0\n+ tombo_text_output                                                                                                                                   | 2020-09-01 |1.0\n+ hgv_linkToGProfile                                                                                                                                  | 2020-09-01 |1.0\n+ CONVERTER_wiggle_to_interval_0                                                                                                                      | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/iuc/valet/valet/1.0.0                                                                                                  | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/nml/sistr_cmd/sistr_cmd/1.0.2                                                                                          | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/devteam/vcfaddinfo/vcfaddinfo/1.0.0_rc1+galaxy0                                                                        | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/iuc/ngsutils_bam_filter/ngsutils_bam_filter/0.5.7.0                                                                    | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/devteam/fastqtofasta/fastq_to_fasta_python/1.0.0                                                                       | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/earlhaminst/ensembl_get_sequences/get_sequences/0.1.2                                                                  | 2020-09-01 |1.0\n+ toolshed.g2.bx.psu.edu/repos/bgruening/rxdock_rbcavity/rxdock_rbcavity/0.1.5                                                                        | 2020-09-01 |1.0\n+(51301 rows)||\n'
b
diff -r 4f7e6612906b -r e94dc7945639 test-data/test_workflow_connections
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/test_workflow_connections Sun Oct 16 11:52:10 2022 +0000
b
b'@@ -0,0 +1,1101 @@\n+ wf_id  | wf_updated |  in_id  |                                                                in_tool                                                                 |                  in_tool_v                   | out_id  |                                                                out_tool                                                                |                  out_tool_v                  | published | deleted | has_errors \n+--------+------------+---------+----------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------+---------+----------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------+-----------+---------+------------||||||||||\n+      3 | 2013-02-07 |5.0| Grep1                                                                                                                                  | 1.0.1                                        |7.0| Remove beginning1                                                                                                                      | 1.0.0                                        | f         | t       | f\n+      3 | 2013-02-07 |6.0| Cut1                                                                                                                                   | 1.0.1                                        |8.0| addValue                                                                                                                               | 1.0.0                                        | f         | t       | f\n+      3 | 2013-02-07 |7.0| Remove beginning1                                                                                                                      | 1.0.0                                        |6.0| Cut1                                                                                                                                   | 1.0.1                                        | f         | t       | f\n+      3 | 2013-02-07 |7.0| Remove beginning1                                                                                                                      | 1.0.0                                        |9.0| Cut1                                                                                                                                   | 1.0.1                                        | f         | t       | f\n+      3 | 2013-02-07 |8.0| addValue                                                                                                                               | 1.0.0                                        |11.0| Paste1                                                                                                                                 | 1.0.0                                        | f         | t       | f\n+      3 | 2013-02-07 |9.0| Cut1                                                                                                                                   | 1.0.1                                        |11.0| Paste1                                                                                                                                 | 1.0.0                                        | f         | t       | f\n+      3 | 2013-02-07 |11.0| Paste1                                                                                                                                 | 1.0.0                                        |10.0| addValue                                                                                                                               | 1.0.0                                        | f         | t       | f\n+      3 | 2013-02-07 |12.0|                                                                              '..b'                       | f         | f       | f\n+ 157077 | 2022-08-08 |2419704.0|                                                                                                                                        |                                              |2419708.0| toolshed.g2.bx.psu.edu/repos/nml/spades/spades/3.12.0+galaxy1                                                                          | 3.12.0+galaxy1                               | f         | f       | f\n+ 157077 | 2022-08-08 |2419705.0|                                                                                                                                        |                                              |2419707.0| toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/0.4.6.0                                                                           | 0.4.6.0                                      | f         | f       | f\n+ 157077 | 2022-08-08 |2419705.0|                                                                                                                                        |                                              |2419708.0| toolshed.g2.bx.psu.edu/repos/nml/spades/spades/3.12.0+galaxy1                                                                          | 3.12.0+galaxy1                               | f         | f       | f\n+ 157077 | 2022-08-08 |2419706.0|                                                                                                                                        |                                              |2419707.0| toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/0.4.6.0                                                                           | 0.4.6.0                                      | f         | f       | f\n+ 157077 | 2022-08-08 |2419706.0|                                                                                                                                        |                                              |2419708.0| toolshed.g2.bx.psu.edu/repos/nml/spades/spades/3.12.0+galaxy1                                                                          | 3.12.0+galaxy1                               | f         | f       | f\n+ 157077 | 2022-08-08 |2419707.0| toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/0.4.6.0                                                                           | 0.4.6.0                                      |2419709.0| toolshed.g2.bx.psu.edu/repos/iuc/bandage/bandage_info/0.8.1+galaxy1                                                                    | 0.8.1+galaxy1                                | f         | f       | f\n+ 157077 | 2022-08-08 |2419707.0| toolshed.g2.bx.psu.edu/repos/iuc/unicycler/unicycler/0.4.6.0                                                                           | 0.4.6.0                                      |2419710.0| toolshed.g2.bx.psu.edu/repos/iuc/bandage/bandage_image/0.8.1+galaxy2                                                                   | 0.8.1+galaxy2                                | f         | f       | f\n+ 157077 | 2022-08-08 |2419708.0| toolshed.g2.bx.psu.edu/repos/nml/spades/spades/3.12.0+galaxy1                                                                          | 3.12.0+galaxy1                               |2419711.0| toolshed.g2.bx.psu.edu/repos/iuc/bandage/bandage_image/0.8.1+galaxy2                                                                   | 0.8.1+galaxy2                                | f         | f       | f\n+ 157077 | 2022-08-08 |2419708.0| toolshed.g2.bx.psu.edu/repos/nml/spades/spades/3.12.0+galaxy1                                                                          | 3.12.0+galaxy1                               |2419712.0| toolshed.g2.bx.psu.edu/repos/iuc/bandage/bandage_info/0.8.1+galaxy1                                                                    | 0.8.1+galaxy1                                | f         | f       | f\n+(2980814 rows)||||||||||\n'
b
diff -r 4f7e6612906b -r e94dc7945639 test-data/test_workflows
--- a/test-data/test_workflows Fri May 06 09:05:18 2022 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
b
b'@@ -1,499 +0,0 @@\n-3\t2013-02-07\t7\t Remove beginning1                                                                                                                      \t 1.0.0                                        \t6\t Cut1                                                                                                                                   \t 1.0.1                                        \t f         \t t       \t f\n-3\t2013-02-07\t7\t Remove beginning1                                                                                                                      \t 1.0.0                                        \t9\t Cut1                                                                                                                                   \t 1.0.1                                        \t f         \t t       \t f\n-3\t2013-02-07\t8\t addValue                                                                                                                               \t 1.0.0                                        \t11\t Paste1                                                                                                                                 \t 1.0.0                                        \t f         \t t       \t f\n-3\t2013-02-07\t9\t Cut1                                                                                                                                   \t 1.0.1                                        \t11\t Paste1                                                                                                                                 \t 1.0.0                                        \t f         \t t       \t f\n-3\t2013-02-07\t11\t Paste1                                                                                                                                 \t 1.0.0                                        \t10\t addValue                                                                                                                               \t 1.0.0                                        \t f         \t t       \t f\n-3\t2013-02-07\t12\t                                                                                                                                        \t                                              \t5\t Grep1                                                                                                                                  \t 1.0.1                                        \t f         \t t       \t f\n-4\t2013-02-07\t13\t cat1                                                                                                                                   \t 1.0.0                                        \t22\t barchart_gnuplot                                                                                                                       \t 1.0.0                                        \t f         \t t       \t f\n-4\t2013-02-07\t14\t bedtools_intersectBed                                                                                                                  \t                                              \t16\t wc_gnu                                                                                                                                 \t 1.0.0                                        \t f         \t t       \t f\n-4\t2013-02-07\t15\t bedtools_intersectBed                                                                                                                  \t                                              \t17\t sort1                                                                                                                                  \t 1.0.1                                        \t f         \t t       \t f\n-4\t2013-02-07\t16\t wc_gnu                                                                                                                                 \t 1.0.0                                        \t18\t addValue                                                          '..b'0                                        \t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t f         \t f       \t f\n-31\t2013-02-18\t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t277\t Cut1                                                                                                                                   \t 1.0.1                                        \t f         \t f       \t f\n-31\t2013-02-18\t274\t                                                                                                                                        \t                                              \t275\t addValue                                                                                                                               \t 1.0.0                                        \t f         \t f       \t f\n-31\t2013-02-18\t275\t addValue                                                                                                                               \t 1.0.0                                        \t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t f         \t f       \t f\n-31\t2013-02-18\t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t277\t Cut1                                                                                                                                   \t 1.0.1                                        \t f         \t f       \t f\n-31\t2013-02-18\t274\t                                                                                                                                        \t                                              \t275\t addValue                                                                                                                               \t 1.0.0                                        \t f         \t f       \t f\n-31\t2013-02-18\t275\t addValue                                                                                                                               \t 1.0.0                                        \t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t f         \t f       \t f\n-31\t2013-02-18\t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t277\t Cut1                                                                                                                                   \t 1.0.1                                        \t f         \t f       \t f\n-31\t2013-02-18\t274\t                                                                                                                                        \t                                              \t275\t addValue                                                                                                                               \t 1.0.0                                        \t f         \t f       \t f\n-31\t2013-02-18\t275\t addValue                                                                                                                               \t 1.0.0                                        \t276\t mergeCols1                                                                                                                             \t 1.0.1                                        \t f         \t f       \t f\n'
b
diff -r 4f7e6612906b -r e94dc7945639 train_transformer.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/train_transformer.py Sun Oct 16 11:52:10 2022 +0000
[
@@ -0,0 +1,73 @@
+import tensorflow as tf
+import transformer_network
+import utils
+from tensorflow.keras.layers import (Dense, Dropout, GlobalAveragePooling1D,
+                                     Input)
+from tensorflow.keras.models import Model
+
+
+def create_model(vocab_size, config):
+    embed_dim = config["embedding_dim"]
+    ff_dim = config["feed_forward_dim"]
+    max_len = config["maximum_path_length"]
+    dropout = config["dropout"]
+
+    inputs = Input(shape=(max_len,))
+    embedding_layer = transformer_network.TokenAndPositionEmbedding(max_len, vocab_size, embed_dim)
+    x = embedding_layer(inputs)
+    transformer_block = transformer_network.TransformerBlock(embed_dim, config["n_heads"], ff_dim)
+    x, weights = transformer_block(x)
+    x = GlobalAveragePooling1D()(x)
+    x = Dropout(dropout)(x)
+    x = Dense(ff_dim, activation="relu")(x)
+    x = Dropout(dropout)(x)
+    outputs = Dense(vocab_size, activation="sigmoid")(x)
+    return Model(inputs=inputs, outputs=[outputs, weights])
+
+
+def create_enc_transformer(train_data, train_labels, test_data, test_labels, f_dict, r_dict, c_wts, c_tools, pub_conn, tr_t_freq, config):
+    print("Train transformer...")
+    vocab_size = len(f_dict) + 1
+
+    enc_optimizer = tf.keras.optimizers.Adam(learning_rate=config["learning_rate"])
+
+    model = create_model(vocab_size, config)
+
+    u_tr_y_labels, u_tr_y_labels_dict = utils.get_u_tr_labels(train_labels)
+    u_te_y_labels, u_te_y_labels_dict = utils.get_u_tr_labels(test_labels)
+
+    trained_on_labels = [int(item) for item in list(u_tr_y_labels_dict.keys())]
+
+    epo_tr_batch_loss = list()
+    epo_tr_batch_acc = list()
+    all_sel_tool_ids = list()
+
+    te_lowest_t_ids = utils.get_low_freq_te_samples(test_data, test_labels, tr_t_freq)
+    tr_log_step = config["tr_logging_step"]
+    te_log_step = config["te_logging_step"]
+    n_train_steps = config["n_train_iter"]
+    te_batch_size = config["te_batch_size"]
+    tr_batch_size = config["tr_batch_size"]
+    sel_tools = list()
+    for batch in range(n_train_steps):
+        x_train, y_train, sel_tools = utils.sample_balanced_tr_y(train_data, train_labels, u_tr_y_labels_dict, tr_batch_size, tr_t_freq, sel_tools)
+        all_sel_tool_ids.extend(sel_tools)
+        with tf.GradientTape() as model_tape:
+            prediction, att_weights = model(x_train, training=True)
+            tr_loss, tr_cat_loss = utils.compute_loss(y_train, prediction)
+            tr_acc = tf.reduce_mean(utils.compute_acc(y_train, prediction))
+        trainable_vars = model.trainable_variables
+        model_gradients = model_tape.gradient(tr_loss, trainable_vars)
+        enc_optimizer.apply_gradients(zip(model_gradients, trainable_vars))
+        epo_tr_batch_loss.append(tr_loss.numpy())
+        epo_tr_batch_acc.append(tr_acc.numpy())
+        if (batch + 1) % tr_log_step == 0:
+            print("Total train data size: ", train_data.shape, train_labels.shape)
+            print("Batch train data size: ", x_train.shape, y_train.shape)
+            print("At Step {}/{} training loss:".format(str(batch + 1), str(n_train_steps)))
+            print(tr_loss.numpy())
+        if (batch + 1) % te_log_step == 0:
+            print("Predicting on test data...")
+            utils.validate_model(test_data, test_labels, te_batch_size, model, f_dict, r_dict, u_te_y_labels_dict, trained_on_labels, te_lowest_t_ids)
+    print("Saving model after training for {} steps".format(n_train_steps))
+    utils.save_model_file(model, r_dict, c_wts, c_tools, pub_conn, config["trained_model_path"])
b
diff -r 4f7e6612906b -r e94dc7945639 transformer_network.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/transformer_network.py Sun Oct 16 11:52:10 2022 +0000
[
@@ -0,0 +1,39 @@
+import tensorflow as tf
+from tensorflow.keras.layers import (Dense, Dropout, Embedding, Layer,
+                                     LayerNormalization, MultiHeadAttention)
+from tensorflow.keras.models import Sequential
+
+
+class TransformerBlock(Layer):
+    def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
+        super(TransformerBlock, self).__init__()
+        self.att = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim, dropout=rate)
+        self.ffn = Sequential(
+            [Dense(ff_dim, activation="relu"), Dense(embed_dim)]
+        )
+        self.layernorm1 = LayerNormalization(epsilon=1e-6)
+        self.layernorm2 = LayerNormalization(epsilon=1e-6)
+        self.dropout1 = Dropout(rate)
+        self.dropout2 = Dropout(rate)
+
+    def call(self, inputs, training):
+        attn_output, attention_scores = self.att(inputs, inputs, inputs, return_attention_scores=True, training=training)
+        attn_output = self.dropout1(attn_output, training=training)
+        out1 = self.layernorm1(inputs + attn_output)
+        ffn_output = self.ffn(out1)
+        ffn_output = self.dropout2(ffn_output, training=training)
+        return self.layernorm2(out1 + ffn_output), attention_scores
+
+
+class TokenAndPositionEmbedding(Layer):
+    def __init__(self, maxlen, vocab_size, embed_dim):
+        super(TokenAndPositionEmbedding, self).__init__()
+        self.token_emb = Embedding(input_dim=vocab_size, output_dim=embed_dim, mask_zero=True)
+        self.pos_emb = Embedding(input_dim=maxlen, output_dim=embed_dim, mask_zero=True)
+
+    def call(self, x):
+        maxlen = tf.shape(x)[-1]
+        positions = tf.range(start=0, limit=maxlen, delta=1)
+        positions = self.pos_emb(positions)
+        x = self.token_emb(x)
+        return x + positions
b
diff -r 4f7e6612906b -r e94dc7945639 utils.py
--- a/utils.py Fri May 06 09:05:18 2022 +0000
+++ b/utils.py Sun Oct 16 11:52:10 2022 +0000
[
b'@@ -1,11 +1,15 @@\n import json\n+import os\n import random\n \n import h5py\n import numpy as np\n+import pandas as pd\n import tensorflow as tf\n-from numpy.random import choice\n-from tensorflow.keras import backend\n+\n+binary_ce = tf.keras.losses.BinaryCrossentropy()\n+binary_acc = tf.keras.metrics.BinaryAccuracy()\n+categorical_ce = tf.keras.metrics.CategoricalCrossentropy(from_logits=True)\n \n \n def read_file(file_path):\n@@ -17,6 +21,43 @@\n     return file_content\n \n \n+def write_file(file_path, content):\n+    """\n+    Write a file\n+    """\n+    remove_file(file_path)\n+    with open(file_path, "w") as json_file:\n+        json_file.write(json.dumps(content))\n+\n+\n+def save_h5_data(inp, tar, filename):\n+    hf_file = h5py.File(filename, \'w\')\n+    hf_file.create_dataset("input", data=inp)\n+    hf_file.create_dataset("target", data=tar)\n+    hf_file.close()\n+\n+\n+def get_low_freq_te_samples(te_data, te_target, tr_freq_dict):\n+    lowest_tool_te_ids = list()\n+    lowest_t_ids = get_lowest_tools(tr_freq_dict)\n+    for i, te_labels in enumerate(te_target):\n+        tools_pos = np.where(te_labels > 0)[0]\n+        tools_pos = [str(int(item)) for item in tools_pos]\n+        intersection = list(set(tools_pos).intersection(set(lowest_t_ids)))\n+        if len(intersection) > 0:\n+            lowest_tool_te_ids.append(i)\n+            lowest_t_ids = [item for item in lowest_t_ids if item not in intersection]\n+    return lowest_tool_te_ids\n+\n+\n+def save_processed_workflows(file_path, unique_paths):\n+    workflow_paths_unique = ""\n+    for path in unique_paths:\n+        workflow_paths_unique += path + "\\n"\n+    with open(file_path, "w") as workflows_file:\n+        workflows_file.write(workflow_paths_unique)\n+\n+\n def format_tool_id(tool_link):\n     """\n     Extract tool id from tool link\n@@ -26,158 +67,166 @@\n     return tool_id\n \n \n-def set_trained_model(dump_file, model_values):\n-    """\n-    Create an h5 file with the trained weights and associated dicts\n-    """\n-    hf_file = h5py.File(dump_file, "w")\n-    for key in model_values:\n-        value = model_values[key]\n-        if key == "model_weights":\n-            for idx, item in enumerate(value):\n-                w_key = "weight_" + str(idx)\n-                if w_key in hf_file:\n-                    hf_file.modify(w_key, item)\n-                else:\n-                    hf_file.create_dataset(w_key, data=item)\n-        else:\n-            if key in hf_file:\n-                hf_file.modify(key, json.dumps(value))\n-            else:\n-                hf_file.create_dataset(key, data=json.dumps(value))\n+def save_model_file(model, r_dict, c_wts, c_tools, s_conn, model_file):\n+    model.save_weights(model_file, save_format="h5")\n+    hf_file = h5py.File(model_file, \'r+\')\n+    model_values = {\n+        "reverse_dict": r_dict,\n+        "class_weights": c_wts,\n+        "compatible_tools": c_tools,\n+        "standard_connections": s_conn\n+    }\n+    for k in model_values:\n+        hf_file.create_dataset(k, data=json.dumps(model_values[k]))\n     hf_file.close()\n \n \n-def weighted_loss(class_weights):\n-    """\n-    Create a weighted loss function. Penalise the misclassification\n-    of classes more with the higher usage\n-    """\n-    weight_values = list(class_weights.values())\n-    weight_values.extend(weight_values)\n+def remove_file(file_path):\n+    if os.path.exists(file_path):\n+        os.remove(file_path)\n+\n \n-    def weighted_binary_crossentropy(y_true, y_pred):\n-        # add another dimension to compute dot product\n-        expanded_weights = tf.expand_dims(weight_values, axis=-1)\n-        bce = backend.binary_crossentropy(y_true, y_pred)\n-        return backend.dot(bce, expanded_weights)\n-\n-    return weighted_binary_crossentropy\n+def verify_oversampling_freq(oversampled_tr_data, rev_dict):\n+    """\n+    Compute the frequency of tool sequences after oversampling\n+    """\n+    freq_dict = dict()\n+    freq_dict_names = dict()\n+    for tr_data in oversampled_tr_data:\n+        t_pos = np.where(tr_data > 0)[0'..b' te_batch_size, model, f_dict, r_dict, ulabels_te_dict, tr_labels, lowest_t_ids):\n+    te_x_batch, y_train_batch, _ = sample_balanced_te_y(te_x, te_y, ulabels_te_dict, te_batch_size)\n+    print("Total test data size: ", te_x.shape, te_y.shape)\n+    print("Batch test data size: ", te_x_batch.shape, y_train_batch.shape)\n+    te_pred_batch, _ = model(te_x_batch, training=False)\n+    test_err, _ = compute_loss(y_train_batch, te_pred_batch)\n+    print("Test loss:")\n+    print(test_err.numpy())\n+    print("Test finished")\n \n \n def get_lowest_tools(l_tool_freq, fraction=0.25):\n@@ -187,98 +236,7 @@\n     return lowest_ids\n \n \n-def verify_model(\n-    model,\n-    x,\n-    y,\n-    reverse_data_dictionary,\n-    usage_scores,\n-    standard_conn,\n-    lowest_tool_ids,\n-    topk_list=[1, 2, 3],\n-):\n-    """\n-    Verify the model on test data\n-    """\n-    print("Evaluating performance on test data...")\n-    print("Test data size: %d" % len(y))\n-    size = y.shape[0]\n-    precision = np.zeros([len(y), len(topk_list)])\n-    usage_weights = np.zeros([len(y), len(topk_list)])\n-    epo_pub_prec = np.zeros([len(y), len(topk_list)])\n-    epo_lowest_tools_pub_prec = list()\n-    epo_lowest_tools_norm_prec = list()\n-    lowest_counter = 0\n-    # loop over all the test samples and find prediction precision\n-    for i in range(size):\n-        lowest_pub_topk = list()\n-        lowest_norm_topk = list()\n-        actual_classes_pos = np.where(y[i] > 0)[0]\n-        test_sample = x[i, :]\n-        last_tool_id = str(int(test_sample[-1]))\n-        for index, abs_topk in enumerate(topk_list):\n-            (\n-                usg_wt_score,\n-                absolute_precision,\n-                pub_prec,\n-                lowest_p_prec,\n-                lowest_n_prec,\n-            ) = compute_precision(\n-                model,\n-                test_sample,\n-                y,\n-                reverse_data_dictionary,\n-                usage_scores,\n-                actual_classes_pos,\n-                abs_topk,\n-                standard_conn,\n-                last_tool_id,\n-                lowest_tool_ids,\n-            )\n-            precision[i][index] = absolute_precision\n-            usage_weights[i][index] = usg_wt_score\n-            epo_pub_prec[i][index] = pub_prec\n-            lowest_pub_topk.append(lowest_p_prec)\n-            lowest_norm_topk.append(lowest_n_prec)\n-        epo_lowest_tools_pub_prec.append(lowest_pub_topk)\n-        epo_lowest_tools_norm_prec.append(lowest_norm_topk)\n-        if last_tool_id in lowest_tool_ids:\n-            lowest_counter += 1\n-    mean_precision = np.mean(precision, axis=0)\n-    mean_usage = np.mean(usage_weights, axis=0)\n-    mean_pub_prec = np.nanmean(epo_pub_prec, axis=0)\n-    mean_lowest_pub_prec = np.nanmean(epo_lowest_tools_pub_prec, axis=0)\n-    mean_lowest_norm_prec = np.nanmean(epo_lowest_tools_norm_prec, axis=0)\n-    return (\n-        mean_usage,\n-        mean_precision,\n-        mean_pub_prec,\n-        mean_lowest_pub_prec,\n-        mean_lowest_norm_prec,\n-        lowest_counter,\n-    )\n-\n-\n-def save_model(\n-    results,\n-    data_dictionary,\n-    compatible_next_tools,\n-    trained_model_path,\n-    class_weights,\n-    standard_connections,\n-):\n-    # save files\n-    trained_model = results["model"]\n-    best_model_parameters = results["best_parameters"]\n-    model_config = trained_model.to_json()\n-    model_weights = trained_model.get_weights()\n-    model_values = {\n-        "data_dictionary": data_dictionary,\n-        "model_config": model_config,\n-        "best_parameters": best_model_parameters,\n-        "model_weights": model_weights,\n-        "compatible_tools": compatible_next_tools,\n-        "class_weights": class_weights,\n-        "standard_connections": standard_connections,\n-    }\n-    set_trained_model(trained_model_path, model_values)\n+def remove_pipe(file_path):\n+    dataframe = pd.read_csv(file_path, sep="|", header=None)\n+    dataframe = dataframe[1:len(dataframe.index) - 1]\n+    return dataframe[1:]\n'