Mercurial > repos > shellac > sam_consensus_v3
comparison env/lib/python3.9/site-packages/gxformat2/export.py @ 0:4f3585e2f14b draft default tip
"planemo upload commit 60cee0fc7c0cda8592644e1aad72851dec82c959"
author | shellac |
---|---|
date | Mon, 22 Mar 2021 18:12:50 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:4f3585e2f14b |
---|---|
1 """Functionality for converting a standard Galaxy workflow into a format 2 workflow.""" | |
2 import argparse | |
3 import json | |
4 import sys | |
5 from collections import OrderedDict | |
6 | |
7 from ._labels import Labels | |
8 from .model import native_input_to_format2_type | |
9 from .yaml import ordered_dump | |
10 | |
11 SCRIPT_DESCRIPTION = """ | |
12 Convert a native Galaxy workflow description into a Format 2 description. | |
13 """ | |
14 | |
15 | |
16 def _copy_common_properties(from_native_step, to_format2_step): | |
17 annotation = from_native_step.get("annotation", "") | |
18 if annotation: | |
19 to_format2_step["doc"] = annotation | |
20 position = from_native_step.get("position", None) | |
21 if position: | |
22 to_format2_step["position"] = position | |
23 | |
24 | |
25 def from_galaxy_native(format2_dict, tool_interface=None, json_wrapper=False): | |
26 """Convert native .ga workflow definition to a format2 workflow. | |
27 | |
28 This is highly experimental and currently broken. | |
29 """ | |
30 data = OrderedDict() | |
31 data['class'] = 'GalaxyWorkflow' | |
32 _copy_common_properties(format2_dict, data) | |
33 if "name" in format2_dict: | |
34 data["label"] = format2_dict.pop("name") | |
35 for top_level_key in ['tags', 'uuid', 'report']: | |
36 value = format2_dict.get(top_level_key) | |
37 if value: | |
38 data[top_level_key] = value | |
39 | |
40 native_steps = format2_dict.get("steps") | |
41 | |
42 label_map = {} | |
43 all_labeled = True | |
44 for key, step in native_steps.items(): | |
45 label = step.get("label") | |
46 if not label: | |
47 all_labeled = False | |
48 label_map[str(key)] = label | |
49 | |
50 inputs = OrderedDict() | |
51 outputs = OrderedDict() | |
52 steps = [] | |
53 | |
54 labels = Labels() | |
55 | |
56 # For each step, rebuild the form and encode the state | |
57 for step in native_steps.values(): | |
58 for workflow_output in step.get("workflow_outputs", []): | |
59 source = _to_source(workflow_output, label_map, output_id=step["id"]) | |
60 output_id = labels.ensure_new_output_label(workflow_output.get("label")) | |
61 outputs[output_id] = {"outputSource": source} | |
62 | |
63 module_type = step.get("type") | |
64 if module_type in ['data_input', 'data_collection_input', 'parameter_input']: | |
65 step_id = step["label"] # TODO: auto-label | |
66 input_dict = {} | |
67 tool_state = _tool_state(step) | |
68 input_dict['type'] = native_input_to_format2_type(step, tool_state) | |
69 for tool_state_key in ['optional', 'format', 'default', 'restrictions', 'suggestions', 'restrictOnConnections']: | |
70 if tool_state_key in tool_state: | |
71 input_dict[tool_state_key] = tool_state[tool_state_key] | |
72 | |
73 _copy_common_properties(step, input_dict) | |
74 # If we are only copying property - use the CWL-style short-hand | |
75 if len(input_dict) == 1: | |
76 inputs[step_id] = input_dict["type"] | |
77 else: | |
78 inputs[step_id] = input_dict | |
79 continue | |
80 | |
81 if module_type == "pause": | |
82 step_dict = OrderedDict() | |
83 optional_props = ['label'] | |
84 _copy_common_properties(step, step_dict) | |
85 _copy_properties(step, step_dict, optional_props=optional_props) | |
86 _convert_input_connections(step, step_dict, label_map) | |
87 step_dict["type"] = "pause" | |
88 steps.append(step_dict) | |
89 continue | |
90 | |
91 if module_type == 'subworkflow': | |
92 step_dict = OrderedDict() | |
93 optional_props = ['label'] | |
94 _copy_common_properties(step, step_dict) | |
95 _copy_properties(step, step_dict, optional_props=optional_props) | |
96 _convert_input_connections(step, step_dict, label_map) | |
97 _convert_post_job_actions(step, step_dict) | |
98 subworkflow_native_dict = step["subworkflow"] | |
99 subworkflow = from_galaxy_native(subworkflow_native_dict, tool_interface=tool_interface, json_wrapper=False) | |
100 step_dict["run"] = subworkflow | |
101 steps.append(step_dict) | |
102 continue | |
103 | |
104 if module_type != 'tool': | |
105 raise NotImplementedError("Unhandled module type %s" % module_type) | |
106 | |
107 step_dict = OrderedDict() | |
108 optional_props = ['label', 'tool_shed_repository'] | |
109 required_props = ['tool_id', 'tool_version'] | |
110 _copy_properties(step, step_dict, optional_props, required_props) | |
111 _copy_common_properties(step, step_dict) | |
112 | |
113 tool_state = _tool_state(step) | |
114 tool_state.pop("__page__", None) | |
115 tool_state.pop("__rerun_remap_job_id__", None) | |
116 step_dict['tool_state'] = tool_state | |
117 | |
118 _convert_input_connections(step, step_dict, label_map) | |
119 _convert_post_job_actions(step, step_dict) | |
120 steps.append(step_dict) | |
121 | |
122 data['inputs'] = inputs | |
123 data['outputs'] = outputs | |
124 | |
125 if all_labeled: | |
126 steps_dict = OrderedDict() | |
127 for step in steps: | |
128 label = step.pop("label") | |
129 steps_dict[label] = step | |
130 data['steps'] = steps_dict | |
131 else: | |
132 data['steps'] = steps | |
133 | |
134 if json_wrapper: | |
135 return { | |
136 "yaml_content": ordered_dump(data) | |
137 } | |
138 | |
139 return data | |
140 | |
141 | |
142 def _tool_state(step): | |
143 tool_state = json.loads(step['tool_state']) | |
144 return tool_state | |
145 | |
146 | |
147 def _copy_properties(from_native_step, to_format2_step, optional_props=[], required_props=[]): | |
148 for prop in optional_props: | |
149 value = from_native_step.get(prop) | |
150 if value: | |
151 to_format2_step[prop] = value | |
152 for prop in required_props: | |
153 value = from_native_step.get(prop) | |
154 to_format2_step[prop] = value | |
155 | |
156 | |
157 def _convert_input_connections(from_native_step, to_format2_step, label_map): | |
158 in_dict = from_native_step.get("in", {}).copy() | |
159 input_connections = from_native_step['input_connections'] | |
160 for input_name, input_defs in input_connections.items(): | |
161 if not isinstance(input_defs, list): | |
162 input_defs = [input_defs] | |
163 for input_def in input_defs: | |
164 source = _to_source(input_def, label_map) | |
165 if input_name == "__NO_INPUT_OUTPUT_NAME__": | |
166 input_name = "$step" | |
167 assert source.endswith("/__NO_INPUT_OUTPUT_NAME__") | |
168 source = source[:-len("/__NO_INPUT_OUTPUT_NAME__")] | |
169 in_dict[input_name] = { | |
170 "source": source | |
171 } | |
172 to_format2_step["in"] = in_dict | |
173 | |
174 | |
175 def _convert_post_job_actions(from_native_step, to_format2_step): | |
176 | |
177 def _ensure_output_def(key): | |
178 if "outputs" in to_format2_step: | |
179 to_format2_step["out"] = to_format2_step.pop("outputs") | |
180 elif "out" not in to_format2_step: | |
181 to_format2_step["out"] = {} | |
182 | |
183 outputs_dict = to_format2_step["out"] | |
184 if key not in outputs_dict: | |
185 outputs_dict[key] = {} | |
186 return outputs_dict[key] | |
187 | |
188 if "post_job_actions" in from_native_step: | |
189 post_job_actions = from_native_step["post_job_actions"].copy() | |
190 to_remove_keys = [] | |
191 | |
192 for post_job_action_key, post_job_action_value in post_job_actions.items(): | |
193 action_type = post_job_action_value["action_type"] | |
194 output_name = post_job_action_value.get("output_name") | |
195 action_args = post_job_action_value.get("action_arguments", {}) | |
196 | |
197 handled = True | |
198 if action_type == "RenameDatasetAction": | |
199 output_dict = _ensure_output_def(output_name) | |
200 output_dict["rename"] = action_args["newname"] | |
201 handled = True | |
202 elif action_type == "HideDatasetAction": | |
203 output_dict = _ensure_output_def(output_name) | |
204 output_dict["hide"] = True | |
205 handled = True | |
206 elif action_type == "DeleteIntermediatesAction": | |
207 output_dict = _ensure_output_def(output_name) | |
208 output_dict["delete_intermediate_datasets"] = True | |
209 elif action_type == "ChangeDatatypeAction": | |
210 output_dict = _ensure_output_def(output_name) | |
211 output_dict['change_datatype'] = action_args | |
212 handled = True | |
213 elif action_type == "TagDatasetAction": | |
214 output_dict = _ensure_output_def(output_name) | |
215 output_dict["add_tags"] = action_args["tags"].split(",") | |
216 elif action_type == "RemoveTagDatasetAction": | |
217 output_dict = _ensure_output_def(output_name) | |
218 output_dict["remove_tags"] = action_args["tags"].split(",") | |
219 elif action_type == "ColumnSetAction": | |
220 output_dict = _ensure_output_def(output_name) | |
221 output_dict["set_columns"] = action_args | |
222 else: | |
223 handled = False | |
224 | |
225 if handled: | |
226 to_remove_keys.append(post_job_action_key) | |
227 | |
228 for to_remove in to_remove_keys: | |
229 del post_job_actions[to_remove] | |
230 | |
231 if post_job_actions: | |
232 to_format2_step["post_job_actions"] = post_job_actions | |
233 | |
234 | |
235 def _to_source(has_output_name, label_map, output_id=None): | |
236 output_id = output_id if output_id is not None else has_output_name['id'] | |
237 output_id = str(output_id) | |
238 output_name = has_output_name['output_name'] | |
239 output_label = label_map.get(output_id) or output_id | |
240 if output_name == "output": | |
241 source = output_label | |
242 else: | |
243 source = "%s/%s" % (output_label, output_name) | |
244 return source | |
245 | |
246 | |
247 def main(argv=None): | |
248 """Entry point for script to convert native workflows to Format 2.""" | |
249 if argv is None: | |
250 argv = sys.argv[1:] | |
251 | |
252 args = _parser().parse_args(argv) | |
253 | |
254 format2_path = args.input_path | |
255 output_path = args.output_path or (format2_path + ".gxwf.yml") | |
256 with open(format2_path, "r") as f: | |
257 native_workflow_dict = json.load(f) | |
258 | |
259 as_dict = from_galaxy_native(native_workflow_dict) | |
260 with open(output_path, "w") as f: | |
261 ordered_dump(as_dict, f) | |
262 | |
263 | |
264 def _parser(): | |
265 parser = argparse.ArgumentParser(description=SCRIPT_DESCRIPTION) | |
266 parser.add_argument('input_path', metavar='INPUT', type=str, | |
267 help='input workflow path (.ga)') | |
268 parser.add_argument('output_path', metavar='OUTPUT', type=str, nargs="?", | |
269 help='output workflow path (.gxfw.yml)') | |
270 return parser | |
271 | |
272 | |
273 __all__ = ( | |
274 'from_galaxy_native', | |
275 'main', | |
276 ) |