view create_tool_recommendation_model.xml @ 6:e94dc7945639 draft default tip

planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit 24bab7a797f53fe4bcc668b18ee0326625486164
author bgruening
date Sun, 16 Oct 2022 11:52:10 +0000
parents 4f7e6612906b
children
line wrap: on
line source

<tool id="create_tool_recommendation_model" name="Create a model to recommend tools" version="0.0.5">
    <description>using deep learning</description>
    <requirements>
        <requirement type="package" version="3.9.12">python</requirement>
        <requirement type="package" version="2.9.1">tensorflow</requirement>
        <requirement type="package" version="2.9.0">keras</requirement>
        <requirement type="package" version="1.0.2">scikit-learn</requirement>
        <requirement type="package" version="1.4.2">pandas</requirement>
        <requirement type="package" version="3.6.0">h5py</requirement>
    </requirements>
    <version_command>echo "@VERSION@"</version_command>
    <command detect_errors="aggressive">
<![CDATA[
        python '$__tool_directory__/main.py'

            --workflow_file '$input_tabular_workflows'
            --tool_usage_file '$input_tabular_tool_usage'
            --cutoff_date '$data_parameters.input_cutoff_date'
            --maximum_path_length '$data_parameters.input_maximum_path_length'
            --te_share '$training_parameters.te_share'
            --tr_batch_size '$training_parameters.tr_batch_size'
            --te_batch_size '$training_parameters.te_batch_size'
            --n_train_iter '$training_parameters.n_train_iter'
            --tr_logging_step '$training_parameters.tr_logging_step'
            --te_logging_step '$training_parameters.te_logging_step'
            --n_heads '$nn_parameters.n_heads'
            --n_embed_dim '$nn_parameters.n_embed_dim'
            --n_feed_forward_dim '$nn_parameters.n_feed_forward_dim'
            --dropout '$nn_parameters.dropout'
            --learning_rate '$nn_parameters.learning_rate'
            --output_model '$outfile_model'
]]>
    </command>
    <inputs>
        <param name="input_tabular_workflows" type="data" format="tabular" label="Dataset containing workflows" help="Please provide Galaxy workflows as a tabular file."/>
        <param name="input_tabular_tool_usage" type="data" format="tabular" label="Dataset containing usage frequencies of tools" help="Please provide tools usage frequencies as a tabular file."/>

        <section name="data_parameters" title="Data parameters" expanded="False">
            <param name="input_cutoff_date" type="text" value="2017-12-01" label="Cutoff date" help="Provide a date (in the past) in yyyy-mm-dd format. The earliest date from which usage of tools will be extracted. For example, 2017-12-01 specifies that the usage of tools from this date until the data extraction date is extracted. The usage of tools before this date is not considered."/>
            <param name="input_maximum_path_length" type="integer" value="25" label="Maximum number of tools in a tool sequence" help="Provide an integer between 1 and 25. A workflow is divided into unique paths and this number specifies the maximum number of tools a path can have. Paths longer than this number are ignored and are not included in the deep learning training."/>
        </section>
        <section name="training_parameters" title="Training parameters" expanded="False">
            <param name="n_train_iter" type="integer" value="20" label="Maximum number of evaluations of different configurations of parameters" help="Provide an integer. Different combinations of parameters are sampled and optimized to find the best one. This number specifies the number of different configurations sampled and tested."/>
            <param name="tr_batch_size" type="integer" value="128" label="Training batch size" help="" />
            <param name="te_batch_size" type="integer" value="128" label="Test batch size" help=""/>
            <param name="tr_logging_step" type="integer" value="5" label="Training log step" help="" />
            <param name="te_logging_step" type="integer" value="5" label="Test log step" help="" />
            <param name="te_share" type="text" value="0.2" label="Share of the test data" help="Provide a real number between 0.0 and 1.0. This set of data is used to look through the prediction accuracy on unseen data after neural network training on an optimised configuration of parameters."/>
        </section>

        <section name="nn_parameters" title="Neural network parameters" expanded="False">
            <param name="n_heads" type="integer" value="4" label="Number of heads in Multihead attention network"/>
            <param name="n_embed_dim" type="integer" value="128" label="Embedding dimensions"/>
            <param name="n_feed_forward_dim" type="text" value="128" label="Number of units for feed forward network" help=""/>
            <param name="dropout" type="text" value="0.2" label="Amount of dropout between neural network layers. E.g. 0.2 means 20%" help=""/>
            <param name="learning_rate" type="text" value="0.001" label="Learning rate" help=""/>
        </section>
    </inputs>
    <outputs>
        <data format="h5" name="outfile_model" label="Model to recommend tools in Galaxy"></data>
    </outputs>
    <tests>
        <test>
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 
            <param name="tr_batch_size" value="4"/>
            <param name="te_batch_size" value="4"/>
            <param name="n_train_iter" value="20"/>
            <param name="tr_logging_step" value="10"/>
            <param name="te_logging_step" value="10"/>
            <param name="te_share" value="0.4"/>
            <output name="outfile_model">
                <assert_contents>
                    <has_h5_keys keys="class_weights,compatible_tools,dense_2,dense_3,dropout_2,dropout_3,global_average_pooling1d,input_1,reverse_dict,standard_connections,token_and_position_embedding,top_level_model_weights,transformer_block"/>
                </assert_contents>
            </output>
        </test>
        <test>
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 
            <param name="tr_batch_size" value="4"/>
            <param name="te_batch_size" value="4"/>
            <param name="n_train_iter" value="20"/>
            <param name="tr_logging_step" value="10"/>
            <param name="te_logging_step" value="10"/>
            <param name="te_share" value="0.4"/>
            <param name="n_heads" value="2"/>
            <param name="n_embed_dim" value="32"/>
            <param name="n_feed_forward_dim" value="32"/>
            <param name="dropout" value="0.05"/>
            <param name="learning_rate" value="0.0001"/>
            <output name="outfile_model">
                <assert_contents>
                    <has_h5_keys keys="class_weights,compatible_tools,dense_2,dense_3,dropout_2,dropout_3,global_average_pooling1d,input_1,reverse_dict,standard_connections,token_and_position_embedding,top_level_model_weights,transformer_block"/>
                </assert_contents>
            </output>
        </test>
        
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
            <param name="te_share" value="0.0,0.1"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 
            <param name="tr_batch_size" value="1,9"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
            <param name="te_batch_size" value="1,1"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 
            <param name="tr_logging_step" value="1,6"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 
            <param name="n_embed_dim" value="1,6"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
            <param name="n_feed_forward_dim" value="1,6"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 
            <param name="dropout" value="0.1,0.2"/>
        </test>
        <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
            <param name="n_heads" value="0.1"/>
        </test>
         <test expect_failure="true">
            <param name="input_tabular_workflows" value="test_workflow_connections" ftype="tabular"/>
            <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
            <param name="learning_rate" value="0.0001,0.04"/>
        </test>
    </tests>
    <help>
        <![CDATA[
**What it does**

**Description**

It creates a model to recommend tools in Galaxy by learning the connections of tools in workflows. The model is an HDF5 file containing the tool dictionary, weights and configuration of the neural network. The recurrent neural network (Gated Recurrent Units) is used as a deep learner to learn the higher-order dependencies in tool connections of workflows. It takes two tabular files as input - one for the workflows and another for tools' usage frequencies. There are multiple other parameters to be set to find the best configuration of parameters of the neural network. This is achieved using bayesian optimisation hyperparameter search approach. Once the best configuration is found, a model is created which can be used to recommend tools in Galaxy. Further details about the input data and network parameters are explained below.

-----

**Input files**

There are two input files:

1. The first file ("dataset containing workflows") contains tool connections for workflows in a tabular format. The workflows are arranged as pairs of tool connections. Each row is a pair of tool connections in a workflow as shown below:

 ==========  ================   ==========  =============   =============   ===========  =============  ==============  ==============  ===========  =============
 **wf_id**   **wf_updated**      **in_id**   **in_tool**    **in_tool_v**   **out_id**    **out_tool**  **out_tool_v**   **published**  **deleted**  **has_error**
 ----------  ----------------   ----------  -------------   -------------   -----------  -------------  --------------  --------------  -----------  -------------
     3         2013-02-07            7        Cut1              1.0.0            5            Grep1          1.0.1              f             t            f
 ==========  ================   ==========  =============   =============   ===========  =============  ==============  ==============  ===========  =============

The first column (wf_id) is the workflow id, second (wf_updated) is the last updated date timestamp, third (in_id) is the id of the tool which is the input to the tool connection, fourth (in_tool) is the   name of the input tool, fifth (in_tool_v) is the version of the input tool, sixth (out_id) is the id of the output tool in the tool connection, seventh (out_tool) is the name of the output tool and the last one (out_tool_v) is the version of the output tool. The tools connections (rows) for each workflow are used to recreate the workflow (directed acyclic graph) and unique tool sequences for each workflow are extracted. These tool sequences are then used to learn higher-order dependencies using a recurrent neural network to recommend tools. The last 3 columns give more information about workflows if they are published, non-deleted and has any errors. Collectively, they are useful to determine if the workflows are of good quality.

2. The second file ("dataset containing usage frequencies of tools") is also a tabular file containing the usage frequencies of tools for a period of time. It has 3 columns:

    ============================================================================================  ==========  ===   
    upload1                                                                                       2019-03-01  176
    toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.72                                       2019-03-01  97
    toolshed.g2.bx.psu.edu/repos/bgruening/deeptools_bam_coverage/deeptools_bam_coverage/3.0.2.0  2019-03-01  67
    ============================================================================================  ==========  ===

The first column is the name of the tool, second is the date and the last one is the number of times the tool has been used in a month. For example, if this data is collected for 1 year, then each tool will appear in this list at most 12 times (months in which the usage is > 0). This data helps to know the usage pattern of a tool i.e. if a tool is being used often (high frequency in recent months) in the last one year or not being used at all (low frequency in recent months). This frequency is then used as weights for these tools in the neural network learning. The tools with high frequency in recent months is more important than tools with low frequency in recent months. This constraint allows to phase-out those tools from predictions which are not being used recently.

-----


**Parameters**

There are multiple parameters which can be set. They are divided into 3 categories:

1. Data parameters
    - "input_cutoff_date": It is used to set the earliest date from which the usage frequencies of tools should be considered. The format of the date is YYYY-MM-DD. This date should be in the past.
    - "input_maximum_path_length": This takes an integer and specifies the maximum size of a tool sequence extracted from any workflow. Any tool sequence of length larger than this number is not included in the dataset for training.
    
2. Training parameters
    - "max_evals": The hyperparameters of the neural network are tuned using a Bayesian optimisation approach and multiple configurations are sampled from different ranges of parameters. The number specified in this parameter is the number of configurations of hyperparameters evaluated to optimise them. Higher the number, the longer is the running time of the tool.
    - "optimize_n_epochs": This number specifies how many iterations would the neural network executes to evaluate each sampled configuration.
    - "n_epochs": Once the best configuration of hyperparameters has been found, the neural network takes this configuration and runs for "n_epochs" number of times minimising the error to produce a model at the end.
    - "te_share": It specifies the size of the test set. For example, if it is 0.5, then the test set is half of the entire data available. It should not be set to more than 0.5. This set is used for evaluating the precision on an unseen set.
    
3. Neural network parameters:
    - "batch_size": The training of the neural network is done using batch learning in this work. The training data is divided into equal batches and for each epoch (a training iteration), all batches of data are trained one after another. A higher or lower value can unsettle the training. Therefore, this parameter should be optimised.
    - "units": This number is the number of hidden recurrent units. A higher number means stronger learning (may lead to overfitting) and a lower number means weaker learning (may lead to underfitting). Therefore, this number should be optimised.
    - "embedding_size": For each tool, a fixed-size vector is learned and this fixed-size is known as the embedding size. This size remains same for all the tools. A lower number may underfit and a higher number may overfit. This parameter should be optimised as well.
    - "dropout": A neural network tends to overfit (especially when it is stronger). Therefore, to avoid or minimize overfitting, dropout is used. The fraction specified by dropout is the fraction of units "deleted" randomly from the network to impose randomness which helps in avoiding overfitting. This parameter should be optimised as well.
    - "spatial_dropout": Similar to dropout, this is used to reduce overfitting in the embedding layer. This parameter should be optimised as well.
    - "recurrent_dropout": Similar to dropout and spatial dropout, this is used to reduce overfitting in the recurrent layers (hidden). This parameter should be optimised as well.
    - "learning_rate": The learning rate specifies the speed of learning. A higher value ensures fast learning (the optimiser may diverge) and a lower value causes slow learning (may not reach the optimum). This parameter should be optimised as well.

-----


**Output file**

The output file (model) is an HDF5 file (http://docs.h5py.org/en/latest/high/file.html) containing multiple attributes like a dictionary of tools, neural network configuration and weights for each layer, weights of all tools and so on. After the tool has finished executing, it can be downloaded and placed at "/galaxy/database/" inside a Galaxy instance codebase. To see the recommended tools (enable the UI integrations) in Galaxy, the following changes should be made to "galaxy.yml" file:

    - Enable and then set the property "enable_tool_recommendation" to "true".

        ]]>
    </help>
    <citations>
        <citation type="bibtex">
            @ARTICLE{anuprulez_galaxytools,
                Author = {Anup Kumar and Björn Grüning},
                keywords = {bioinformatics, recommendation system, deep learning},
                title = {{Tool recommendation system for Galaxy}},
                url = {https://github.com/bgruening/galaxytools}
            }
        </citation>
    </citations>
</tool>