view svm.xml @ 19:d67dcd63f6cb draft

"planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit e2a5eade6d0e5ddf3a47630381a0ad90d80e8a04"
author bgruening
date Tue, 13 Apr 2021 17:32:55 +0000
parents 2df8f5c30edc
children b878e4cdd63a
line wrap: on
line source

<tool id="sklearn_svm_classifier" name="Support vector machines (SVMs)" version="@VERSION@" profile="20.05">
    <description>for classification</description>
    <macros>
        <import>main_macros.xml</import>
        <!-- macro name="class_weight" argument="class_weight"-->
    </macros>
    <expand macro="python_requirements" />
    <expand macro="macro_stdio" />
    <version_command>echo "@VERSION@"</version_command>
    <command><![CDATA[
    python '$svc_script' '$inputs'
]]>
    </command>
    <configfiles>
        <inputs name="inputs" />
        <configfile name="svc_script">
            <![CDATA[
import sys
import json
import sklearn.svm
import pandas
import pickle

from galaxy_ml.utils import load_model, get_X_y


input_json_path = sys.argv[1]
with open(input_json_path, "r") as param_handler:
    params = json.load(param_handler)

#if $selected_tasks.selected_task == "load":

header = 'infer' if params["selected_tasks"]["header"] else None
data = pandas.read_csv("$selected_tasks.infile_data", sep='\t', header=header, index_col=None, parse_dates=True, encoding=None)

with open("$infile_model", 'rb') as model_handler:
    classifier_object = load_model(model_handler)

prediction = classifier_object.predict(data)
prediction_df = pandas.DataFrame(prediction)
res = pandas.concat([data, prediction_df], axis=1)
res.to_csv(path_or_buf = "$outfile_predict", sep="\t", index=False)

#else:

X, y = get_X_y(params, "$selected_tasks.selected_algorithms.input_options.infile1", "$selected_tasks.selected_algorithms.input_options.infile2")

options = params["selected_tasks"]["selected_algorithms"]["options"]
selected_algorithm = params["selected_tasks"]["selected_algorithms"]["selected_algorithm"]

if not(selected_algorithm=="LinearSVC"):
    if options["kernel"]:
        options["kernel"] = str(options["kernel"])

my_class = getattr(sklearn.svm, selected_algorithm)
classifier_object = my_class(**options)
classifier_object.fit(X, y)

with open("$outfile_fit", 'wb') as out_handler:
    pickle.dump(classifier_object, out_handler)

#end if

]]>
        </configfile>
    </configfiles>
    <inputs>
        <expand macro="sl_Conditional" model="zip">
            <param name="selected_algorithm" type="select" label="Classifier type">
                <option value="SVC">C-Support Vector Classification</option>
                <option value="NuSVC">Nu-Support Vector Classification</option>
                <option value="LinearSVC">Linear Support Vector Classification</option>
            </param>
            <when value="SVC">
                <expand macro="sl_mixed_input" />
                <expand macro="svc_advanced_options">
                    <expand macro="C" />
                </expand>
            </when>
            <when value="NuSVC">
                <expand macro="sl_mixed_input" />
                <expand macro="svc_advanced_options">
                    <param argument="nu" type="float" optional="true" value="0.5" label="Nu control parameter" help="Controls the number of support vectors. Should be in the interval (0, 1]. " />
                </expand>
            </when>
            <when value="LinearSVC">
                <expand macro="sl_mixed_input" />
                <section name="options" title="Advanced Options" expanded="False">
                    <expand macro="C" />
                    <expand macro="tol" default_value="0.001" help_text="Tolerance for stopping criterion. " />
                    <expand macro="random_state" help_text="Integer number. The seed of the pseudo random number generator to use when shuffling the data for probability estimation. A fixed seed allows reproducible results." />
                    <!--expand macro="class_weight"/-->
                    <param argument="max_iter" type="integer" optional="true" value="1000" label="Maximum number of iterations" help="The maximum number of iterations to be run." />
                    <param argument="loss" type="select" label="Loss function" help="Specifies the loss function. ''squared_hinge'' is the square of the hinge loss.">
                        <option value="squared_hinge" selected="true">Squared hinge</option>
                        <option value="hinge">Hinge</option>
                    </param>
                    <param argument="penalty" type="select" label="Penalization norm" help=" ">
                        <option value="l1">l1</option>
                        <option value="l2" selected="true">l2</option>
                    </param>
                    <param argument="dual" type="boolean" optional="true" truevalue="booltrue" falsevalue="boolflase" checked="true" label="Use the shrinking heuristic" help="Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features." />
                    <param argument="multi_class" type="select" label="Multi-class strategy" help="Determines the multi-class strategy if y contains more than two classes.">
                        <option value="ovr" selected="true">ovr</option>
                        <option value="crammer_singer">crammer_singer</option>
                    </param>
                    <param argument="fit_intercept" type="boolean" optional="true" truevalue="booltrue" falsevalue="boolflase" checked="true" label="Calculate the intercept for this model" help="If set to false, data is expected to be already centered." />
                    <param argument="intercept_scaling" type="float" optional="true" value="1" label="Add synthetic feature to the instance vector" help=" " />
                </section>
            </when>
        </expand>
    </inputs>
    <expand macro="output" />
    <tests>
        <test>
            <param name="infile1" value="train_set.tabular" ftype="tabular" />
            <param name="infile2" value="train_set.tabular" ftype="tabular" />
            <param name="header1" value="True" />
            <param name="header2" value="True" />
            <param name="col1" value="1,2,3,4" />
            <param name="col2" value="5" />
            <param name="selected_task" value="train" />
            <param name="selected_algorithm" value="SVC" />
            <param name="random_state" value="5" />
            <output name="outfile_fit" file="svc_model01" compare="sim_size" />
        </test>
        <test>
            <param name="infile1" value="train_set.tabular" ftype="tabular" />
            <param name="infile2" value="train_set.tabular" ftype="tabular" />
            <param name="header1" value="True" />
            <param name="header2" value="True" />
            <param name="col1" value="1,2,3,4" />
            <param name="col2" value="5" />
            <param name="selected_task" value="train" />
            <param name="selected_algorithm" value="NuSVC" />
            <param name="random_state" value="5" />
            <output name="outfile_fit" file="svc_model02" compare="sim_size" />
        </test>
        <test>
            <param name="infile1" value="train_set.tabular" ftype="tabular" />
            <param name="infile2" value="train_set.tabular" ftype="tabular" />
            <param name="header1" value="True" />
            <param name="header2" value="True" />
            <param name="col1" value="1,2,3,4" />
            <param name="col2" value="5" />
            <param name="selected_task" value="train" />
            <param name="selected_algorithm" value="LinearSVC" />
            <param name="random_state" value="5" />
            <output name="outfile_fit" file="svc_model03" compare="sim_size" />
        </test>
        <test>
            <param name="infile_model" value="svc_model01" ftype="zip" />
            <param name="infile_data" value="test_set.tabular" ftype="tabular" />
            <param name="header" value="True" />
            <param name="selected_task" value="load" />
            <output name="outfile_predict" file="svc_prediction_result01.tabular" />
        </test>
        <test>
            <param name="infile_model" value="svc_model02" ftype="zip" />
            <param name="infile_data" value="test_set.tabular" ftype="tabular" />
            <param name="header" value="True" />
            <param name="selected_task" value="load" />
            <output name="outfile_predict" file="svc_prediction_result02.tabular" />
        </test>
        <test>
            <param name="infile_model" value="svc_model03" ftype="zip" />
            <param name="infile_data" value="test_set.tabular" ftype="tabular" />
            <param name="header" value="True" />
            <param name="selected_task" value="load" />
            <output name="outfile_predict" file="svc_prediction_result03.tabular" />
        </test>
        <!-- The following test is expected to fail, it is testing the whitelist/blacklist filtering.
        It loads a pickle with malicious content that we do not accept. -->
        <test expect_failure="true">
            <param name="infile_model" value="pickle_blacklist" ftype="zip" />
            <param name="infile_data" value="test_set.tabular" ftype="tabular" />
            <param name="header" value="True" />
            <param name="selected_task" value="load" />
        </test>
    </tests>
    <help><![CDATA[
**What it does**
This module implements the Support Vector Machine (SVM) classification algorithms.
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.

**The advantages of support vector machines are:**

        1- Effective in high dimensional spaces.

        2- Still effective in cases where number of dimensions is greater than the number of samples.

        3- Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient.

        4- Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels.

**The disadvantages of support vector machines include:**

        1- If the number of features is much greater than the number of samples, the method is likely to give poor performances.

        2- SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation

For more information check http://scikit-learn.org/stable/modules/neighbors.html

    ]]>
    </help>
    <expand macro="sklearn_citation" />
</tool>