# HG changeset patch # User bgruening # Date 1569408160 14400 # Node ID 12764915e1c5e2000af40d8406fce9fde3e26f41 # Parent 9bf25dbe00adb261f3dac74da53430ae94e840ee "planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit edeb85d311990eabd65f3c4576fbeabc6d9165c9" diff -r 9bf25dbe00ad -r 12764915e1c5 create_tool_recommendation_model.xml --- a/create_tool_recommendation_model.xml Wed Aug 28 07:19:38 2019 -0400 +++ b/create_tool_recommendation_model.xml Wed Sep 25 06:42:40 2019 -0400 @@ -31,7 +31,6 @@ --learning_rate '$nn_parameters.learning_rate' --activation_recurrent '$nn_parameters.activation_recurrent' --activation_output '$nn_parameters.activation_output' - --loss_type '$nn_parameters.loss_type' --output_model '$outfile_model' ]]> @@ -59,7 +58,6 @@ - @@ -138,7 +136,6 @@ - "learning_rate": The learning rate specifies the speed of learning. A higher value ensures fast learning (the optimiser may diverge) and a lower value causes slow learning (may not reach the optimum). This parameter should be optimised as well. - "activation_recurrent": Activations are mathematical functions to transform input into output. This takes the name of an activation function from the list of Keras activations (https://keras.io/activations/) for recurrent layers. - "activation_output": This takes the activation for transforming the input of the last layer to the output of the neural network. It is also taken from Keras activations (https://keras.io/activations/). - - "loss_type": This is also a mathematical function which computes the error between true and predicted outputs. An optimizer uses this loss function to compute error and minimize it. It is taken from the list of Keras optimisers (https://keras.io/optimizers/). ----- diff -r 9bf25dbe00ad -r 12764915e1c5 main.py --- a/main.py Wed Aug 28 07:19:38 2019 -0400 +++ b/main.py Wed Sep 25 06:42:40 2019 -0400 @@ -112,7 +112,6 @@ arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate") arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers") arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers") - arg_parser.add_argument("-lt", "--loss_type", required=True, help="type of the loss/error function") # get argument values args = vars(arg_parser.parse_args()) tool_usage_path = args["tool_usage_file"] @@ -134,7 +133,6 @@ learning_rate = args["learning_rate"] activation_recurrent = args["activation_recurrent"] activation_output = args["activation_output"] - loss_type = args["loss_type"] config = { 'cutoff_date': cutoff_date, @@ -152,8 +150,7 @@ 'recurrent_dropout': recurrent_dropout, 'learning_rate': learning_rate, 'activation_recurrent': activation_recurrent, - 'activation_output': activation_output, - 'loss_type': loss_type + 'activation_output': activation_output } # Extract and process workflows