comparison fitted_model_eval.py @ 0:af2624d5ab32 draft

"planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/sklearn commit ea12f973df4b97a2691d9e4ce6bf6fae59d57717"
author bgruening
date Sat, 01 May 2021 01:24:32 +0000
parents
children 9349ed2749c6
comparison
equal deleted inserted replaced
-1:000000000000 0:af2624d5ab32
1 import argparse
2 import json
3 import warnings
4
5 import pandas as pd
6 from galaxy_ml.utils import get_scoring, load_model, read_columns
7 from scipy.io import mmread
8 from sklearn.metrics.scorer import _check_multimetric_scoring
9 from sklearn.model_selection._validation import _score
10 from sklearn.pipeline import Pipeline
11
12
13 def _get_X_y(params, infile1, infile2):
14 """read from inputs and output X and y
15
16 Parameters
17 ----------
18 params : dict
19 Tool inputs parameter
20 infile1 : str
21 File path to dataset containing features
22 infile2 : str
23 File path to dataset containing target values
24
25 """
26 # store read dataframe object
27 loaded_df = {}
28
29 input_type = params["input_options"]["selected_input"]
30 # tabular input
31 if input_type == "tabular":
32 header = "infer" if params["input_options"]["header1"] else None
33 column_option = params["input_options"]["column_selector_options_1"][
34 "selected_column_selector_option"
35 ]
36 if column_option in [
37 "by_index_number",
38 "all_but_by_index_number",
39 "by_header_name",
40 "all_but_by_header_name",
41 ]:
42 c = params["input_options"]["column_selector_options_1"]["col1"]
43 else:
44 c = None
45
46 df_key = infile1 + repr(header)
47 df = pd.read_csv(infile1, sep="\t", header=header, parse_dates=True)
48 loaded_df[df_key] = df
49
50 X = read_columns(df, c=c, c_option=column_option).astype(float)
51 # sparse input
52 elif input_type == "sparse":
53 X = mmread(open(infile1, "r"))
54
55 # Get target y
56 header = "infer" if params["input_options"]["header2"] else None
57 column_option = params["input_options"]["column_selector_options_2"][
58 "selected_column_selector_option2"
59 ]
60 if column_option in [
61 "by_index_number",
62 "all_but_by_index_number",
63 "by_header_name",
64 "all_but_by_header_name",
65 ]:
66 c = params["input_options"]["column_selector_options_2"]["col2"]
67 else:
68 c = None
69
70 df_key = infile2 + repr(header)
71 if df_key in loaded_df:
72 infile2 = loaded_df[df_key]
73 else:
74 infile2 = pd.read_csv(infile2, sep="\t", header=header, parse_dates=True)
75 loaded_df[df_key] = infile2
76
77 y = read_columns(
78 infile2, c=c, c_option=column_option, sep="\t", header=header, parse_dates=True
79 )
80 if len(y.shape) == 2 and y.shape[1] == 1:
81 y = y.ravel()
82
83 return X, y
84
85
86 def main(
87 inputs,
88 infile_estimator,
89 outfile_eval,
90 infile_weights=None,
91 infile1=None,
92 infile2=None,
93 ):
94 """
95 Parameter
96 ---------
97 inputs : str
98 File path to galaxy tool parameter
99
100 infile_estimator : strgit
101 File path to trained estimator input
102
103 outfile_eval : str
104 File path to save the evalulation results, tabular
105
106 infile_weights : str
107 File path to weights input
108
109 infile1 : str
110 File path to dataset containing features
111
112 infile2 : str
113 File path to dataset containing target values
114 """
115 warnings.filterwarnings("ignore")
116
117 with open(inputs, "r") as param_handler:
118 params = json.load(param_handler)
119
120 X_test, y_test = _get_X_y(params, infile1, infile2)
121
122 # load model
123 with open(infile_estimator, "rb") as est_handler:
124 estimator = load_model(est_handler)
125
126 main_est = estimator
127 if isinstance(estimator, Pipeline):
128 main_est = estimator.steps[-1][-1]
129 if hasattr(main_est, "config") and hasattr(main_est, "load_weights"):
130 if not infile_weights or infile_weights == "None":
131 raise ValueError(
132 "The selected model skeleton asks for weights, "
133 "but no dataset for weights was provided!"
134 )
135 main_est.load_weights(infile_weights)
136
137 # handle scorer, convert to scorer dict
138 # Check if scoring is specified
139 scoring = params["scoring"]
140 if scoring is not None:
141 # get_scoring() expects secondary_scoring to be a comma separated string (not a list)
142 # Check if secondary_scoring is specified
143 secondary_scoring = scoring.get("secondary_scoring", None)
144 if secondary_scoring is not None:
145 # If secondary_scoring is specified, convert the list into comman separated string
146 scoring["secondary_scoring"] = ",".join(scoring["secondary_scoring"])
147
148 scorer = get_scoring(scoring)
149 scorer, _ = _check_multimetric_scoring(estimator, scoring=scorer)
150
151 if hasattr(estimator, "evaluate"):
152 scores = estimator.evaluate(
153 X_test, y_test=y_test, scorer=scorer, is_multimetric=True
154 )
155 else:
156 scores = _score(estimator, X_test, y_test, scorer, is_multimetric=True)
157
158 # handle output
159 for name, score in scores.items():
160 scores[name] = [score]
161 df = pd.DataFrame(scores)
162 df = df[sorted(df.columns)]
163 df.to_csv(path_or_buf=outfile_eval, sep="\t", header=True, index=False)
164
165
166 if __name__ == "__main__":
167 aparser = argparse.ArgumentParser()
168 aparser.add_argument("-i", "--inputs", dest="inputs", required=True)
169 aparser.add_argument("-e", "--infile_estimator", dest="infile_estimator")
170 aparser.add_argument("-w", "--infile_weights", dest="infile_weights")
171 aparser.add_argument("-X", "--infile1", dest="infile1")
172 aparser.add_argument("-y", "--infile2", dest="infile2")
173 aparser.add_argument("-O", "--outfile_eval", dest="outfile_eval")
174 args = aparser.parse_args()
175
176 main(
177 args.inputs,
178 args.infile_estimator,
179 args.outfile_eval,
180 infile_weights=args.infile_weights,
181 infile1=args.infile1,
182 infile2=args.infile2,
183 )