comparison toolfactory/galaxy-tool-test @ 121:2050b2475ae5 draft

Uploaded
author fubar
date Thu, 07 Jan 2021 09:24:17 +0000
parents d4d88d393285
children
comparison
equal deleted inserted replaced
120:0c6c3e10a8f4 121:2050b2475ae5
1 #!/usr/bin/env python
2
3 import argparse
4 import datetime as dt
5 import json
6 import logging
7 import os
8 import sys
9 import tempfile
10 from collections import namedtuple
11 from concurrent.futures import thread, ThreadPoolExecutor
12
13 import yaml
14
15 from galaxy.tool_util.verify.interactor import (
16 DictClientTestConfig,
17 GalaxyInteractorApi,
18 verify_tool,
19 )
20
21 DESCRIPTION = """Script to quickly run a tool test against a running Galaxy instance."""
22 DEFAULT_SUITE_NAME = "Galaxy Tool Tests"
23 ALL_TESTS = -1
24 ALL_TOOLS = "*"
25 ALL_VERSION = "*"
26 LATEST_VERSION = None
27
28
29 TestReference = namedtuple("TestReference", ["tool_id", "tool_version", "test_index"])
30 TestException = namedtuple("TestException", ["tool_id", "exception", "was_recorded"])
31
32
33 class Results:
34
35 def __init__(self, default_suitename, test_json, append=False):
36 self.test_json = test_json or "-"
37 test_results = []
38 test_exceptions = []
39 suitename = default_suitename
40 if append:
41 assert test_json != "-"
42 with open(test_json) as f:
43 previous_results = json.load(f)
44 test_results = previous_results["tests"]
45 if "suitename" in previous_results:
46 suitename = previous_results["suitename"]
47 self.test_results = test_results
48 self.test_exceptions = test_exceptions
49 self.suitename = suitename
50
51 def register_result(self, result):
52 self.test_results.append(result)
53
54 def register_exception(self, test_exception):
55 self.test_exceptions.append(test_exception)
56
57 def already_successful(self, test_reference):
58 test_id = _test_id_for_reference(test_reference)
59 for test_result in self.test_results:
60 if test_result.get('id') != test_id:
61 continue
62
63 has_data = test_result.get('has_data', False)
64 if has_data:
65 test_data = test_result.get("data", {})
66 if 'status' in test_data and test_data['status'] == 'success':
67 return True
68
69 return False
70
71 def write(self):
72 tests = sorted(self.test_results, key=lambda el: el['id'])
73 n_passed, n_failures, n_skips = 0, 0, 0
74 n_errors = len([e for e in self.test_exceptions if not e.was_recorded])
75 for test in tests:
76 has_data = test.get('has_data', False)
77 if has_data:
78 test_data = test.get("data", {})
79 if 'status' not in test_data:
80 raise Exception(f"Test result data {test_data} doesn't contain a status key.")
81 status = test_data['status']
82 if status == "success":
83 n_passed += 1
84 elif status == "error":
85 n_errors += 1
86 elif status == "skip":
87 n_skips += 1
88 elif status == "failure":
89 n_failures += 1
90 report_obj = {
91 'version': '0.1',
92 'suitename': self.suitename,
93 'results': {
94 'total': n_passed + n_failures + n_skips + n_errors,
95 'errors': n_errors,
96 'failures': n_failures,
97 'skips': n_skips,
98 },
99 'tests': tests,
100 }
101 if self.test_json == "-":
102 print(json.dumps(report_obj))
103 else:
104 with open(self.test_json, "w") as f:
105 json.dump(report_obj, f)
106
107 def info_message(self):
108 messages = []
109 passed_tests = self._tests_with_status('success')
110 messages.append("Passed tool tests ({}): {}".format(
111 len(passed_tests),
112 [t["id"] for t in passed_tests]
113 ))
114 failed_tests = self._tests_with_status('failure')
115 messages.append("Failed tool tests ({}): {}".format(
116 len(failed_tests),
117 [t["id"] for t in failed_tests]
118 ))
119 skiped_tests = self._tests_with_status('skip')
120 messages.append("Skipped tool tests ({}): {}".format(
121 len(skiped_tests),
122 [t["id"] for t in skiped_tests]
123 ))
124 errored_tests = self._tests_with_status('error')
125 messages.append("Errored tool tests ({}): {}".format(
126 len(errored_tests),
127 [t["id"] for t in errored_tests]
128 ))
129 return "\n".join(messages)
130
131 @property
132 def success_count(self):
133 self._tests_with_status('success')
134
135 @property
136 def skip_count(self):
137 self._tests_with_status('skip')
138
139 @property
140 def error_count(self):
141 return self._tests_with_status('error') + len(self.test_exceptions)
142
143 @property
144 def failure_count(self):
145 return self._tests_with_status('failure')
146
147 def _tests_with_status(self, status):
148 return [t for t in self.test_results if t.get("data", {}).get("status") == status]
149
150
151 def test_tools(
152 galaxy_interactor,
153 test_references,
154 results,
155 log=None,
156 parallel_tests=1,
157 history_per_test_case=False,
158 no_history_cleanup=False,
159 retries=0,
160 verify_kwds=None,
161 ):
162 """Run through tool tests and write report.
163
164 Refactor this into Galaxy in 21.01.
165 """
166 verify_kwds = (verify_kwds or {}).copy()
167 tool_test_start = dt.datetime.now()
168 history_created = False
169 if history_per_test_case:
170 test_history = None
171 else:
172 history_created = True
173 test_history = galaxy_interactor.new_history(history_name=f"History for {results.suitename}")
174 verify_kwds.update({
175 "no_history_cleanup": no_history_cleanup,
176 "test_history": test_history,
177 })
178 with ThreadPoolExecutor(max_workers=parallel_tests) as executor:
179 try:
180 for test_reference in test_references:
181 _test_tool(
182 executor=executor,
183 test_reference=test_reference,
184 results=results,
185 galaxy_interactor=galaxy_interactor,
186 log=log,
187 retries=retries,
188 verify_kwds=verify_kwds,
189 )
190 finally:
191 # Always write report, even if test was cancelled.
192 try:
193 executor.shutdown(wait=True)
194 except KeyboardInterrupt:
195 executor._threads.clear()
196 thread._threads_queues.clear()
197 results.write()
198 if log:
199 log.info("Report written to '%s'", os.path.abspath(results.test_json))
200 log.info(results.info_message())
201 log.info("Total tool test time: {}".format(dt.datetime.now() - tool_test_start))
202 if history_created and not no_history_cleanup:
203 galaxy_interactor.delete_history(test_history)
204
205
206 def _test_id_for_reference(test_reference):
207 tool_id = test_reference.tool_id
208 tool_version = test_reference.tool_version
209 test_index = test_reference.test_index
210
211 if tool_version and tool_id.endswith("/" + tool_version):
212 tool_id = tool_id[:-len("/" + tool_version)]
213
214 label_base = tool_id
215 if tool_version:
216 label_base += "/" + str(tool_version)
217
218 test_id = label_base + "-" + str(test_index)
219 return test_id
220
221
222 def _test_tool(
223 executor,
224 test_reference,
225 results,
226 galaxy_interactor,
227 log,
228 retries,
229 verify_kwds,
230 ):
231 tool_id = test_reference.tool_id
232 tool_version = test_reference.tool_version
233 test_index = test_reference.test_index
234 # If given a tool_id with a version suffix, strip it off so we can treat tool_version
235 # correctly at least in client_test_config.
236 if tool_version and tool_id.endswith("/" + tool_version):
237 tool_id = tool_id[:-len("/" + tool_version)]
238
239 test_id = _test_id_for_reference(test_reference)
240
241 def run_test():
242 run_retries = retries
243 job_data = None
244 job_exception = None
245
246 def register(job_data_):
247 nonlocal job_data
248 job_data = job_data_
249
250 try:
251 while run_retries >= 0:
252 job_exception = None
253 try:
254 if log:
255 log.info("Executing test '%s'", test_id)
256 verify_tool(
257 tool_id, galaxy_interactor, test_index=test_index, tool_version=tool_version,
258 register_job_data=register, **verify_kwds
259 )
260 if log:
261 log.info("Test '%s' passed", test_id)
262 break
263 except Exception as e:
264 if log:
265 log.warning("Test '%s' failed", test_id, exc_info=True)
266
267 job_exception = e
268 run_retries -= 1
269 finally:
270 if job_data is not None:
271 results.register_result({
272 "id": test_id,
273 "has_data": True,
274 "data": job_data,
275 })
276 if job_exception is not None:
277 was_recorded = job_data is not None
278 test_exception = TestException(tool_id, job_exception, was_recorded)
279 results.register_exception(test_exception)
280
281 executor.submit(run_test)
282
283
284 def build_case_references(
285 galaxy_interactor,
286 tool_id=ALL_TOOLS,
287 tool_version=LATEST_VERSION,
288 test_index=ALL_TESTS,
289 page_size=0,
290 page_number=0,
291 check_against=None,
292 log=None,
293 ):
294 test_references = []
295 if tool_id == ALL_TOOLS:
296 tests_summary = galaxy_interactor.get_tests_summary()
297 for tool_id, tool_versions_dict in tests_summary.items():
298 for tool_version, summary in tool_versions_dict.items():
299 for test_index in range(summary["count"]):
300 test_reference = TestReference(tool_id, tool_version, test_index)
301 test_references.append(test_reference)
302 else:
303 assert tool_id
304 tool_test_dicts = galaxy_interactor.get_tool_tests(tool_id, tool_version=tool_version) or {}
305 for i, tool_test_dict in enumerate(tool_test_dicts):
306 this_tool_version = tool_test_dict.get("tool_version", tool_version)
307 this_test_index = i
308 if test_index == ALL_TESTS or i == test_index:
309 test_reference = TestReference(tool_id, this_tool_version, this_test_index)
310 test_references.append(test_reference)
311
312 if check_against:
313 filtered_test_references = []
314 for test_reference in test_references:
315 if check_against.already_successful(test_reference):
316 if log is not None:
317 log.debug(f"Found successful test for {test_reference}, skipping")
318 continue
319 filtered_test_references.append(test_reference)
320 log.info(f"Skipping {len(test_references)-len(filtered_test_references)} out of {len(test_references)} tests.")
321 test_references = filtered_test_references
322
323 if page_size > 0:
324 slice_start = page_size * page_number
325 slice_end = page_size * (page_number + 1)
326 test_references = test_references[slice_start:slice_end]
327
328 return test_references
329
330
331 def main(argv=None):
332 if argv is None:
333 argv = sys.argv[1:]
334
335 args = _arg_parser().parse_args(argv)
336 log = setup_global_logger(__name__, verbose=args.verbose)
337 client_test_config_path = args.client_test_config
338 if client_test_config_path is not None:
339 log.debug(f"Reading client config path {client_test_config_path}")
340 with open(client_test_config_path) as f:
341 client_test_config = yaml.full_load(f)
342 else:
343 client_test_config = {}
344
345 def get_option(key):
346 arg_val = getattr(args, key, None)
347 if arg_val is None and key in client_test_config:
348 val = client_test_config.get(key)
349 else:
350 val = arg_val
351 return val
352
353 output_json_path = get_option("output_json")
354 galaxy_interactor_kwds = {
355 "galaxy_url": get_option("galaxy_url"),
356 "master_api_key": get_option("admin_key"),
357 "api_key": get_option("key"),
358 "keep_outputs_dir": args.output,
359 "download_attempts": get_option("download_attempts"),
360 "download_sleep": get_option("download_sleep"),
361 }
362 tool_id = args.tool_id
363 tool_version = args.tool_version
364 tools_client_test_config = DictClientTestConfig(client_test_config.get("tools"))
365 verbose = args.verbose
366
367 galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
368 results = Results(args.suite_name, output_json_path, append=args.append)
369 check_against = None if not args.skip_successful else results
370 test_references = build_case_references(
371 galaxy_interactor,
372 tool_id=tool_id,
373 tool_version=tool_version,
374 test_index=args.test_index,
375 page_size=args.page_size,
376 page_number=args.page_number,
377 check_against=check_against,
378 log=log,
379 )
380 log.debug(f"Built {len(test_references)} test references to executed.")
381 verify_kwds = dict(
382 client_test_config=tools_client_test_config,
383 force_path_paste=args.force_path_paste,
384 skip_with_reference_data=not args.with_reference_data,
385 quiet=not verbose,
386 )
387 test_tools(
388 galaxy_interactor,
389 test_references,
390 results,
391 log=log,
392 parallel_tests=args.parallel_tests,
393 history_per_test_case=args.history_per_test_case,
394 no_history_cleanup=args.no_history_cleanup,
395 verify_kwds=verify_kwds,
396 )
397 exceptions = results.test_exceptions
398 if exceptions:
399 exception = exceptions[0]
400 if hasattr(exception, "exception"):
401 exception = exception.exception
402 raise exception
403
404
405 def setup_global_logger(name, log_file=None, verbose=False):
406 formatter = logging.Formatter('%(asctime)s %(levelname)-5s - %(message)s')
407 console = logging.StreamHandler()
408 console.setFormatter(formatter)
409
410 logger = logging.getLogger(name)
411 logger.setLevel(logging.DEBUG if verbose else logging.INFO)
412 logger.addHandler(console)
413
414 if not log_file:
415 # delete = false is chosen here because it is always nice to have a log file
416 # ready if you need to debug. Not having the "if only I had set a log file"
417 # moment after the fact.
418 temp = tempfile.NamedTemporaryFile(prefix="ephemeris_", delete=False)
419 log_file = temp.name
420 file_handler = logging.FileHandler(log_file)
421 logger.addHandler(file_handler)
422 logger.info(f"Storing log file in: {log_file}")
423 return logger
424
425
426 def _arg_parser():
427 parser = argparse.ArgumentParser(description=DESCRIPTION)
428 parser.add_argument('-u', '--galaxy-url', default="http://localhost:8080", help='Galaxy URL')
429 parser.add_argument('-k', '--key', default=None, help='Galaxy User API Key')
430 parser.add_argument('-a', '--admin-key', default=None, help='Galaxy Admin API Key')
431 parser.add_argument('--force_path_paste', default=False, action="store_true", help='This requires Galaxy-side config option "allow_path_paste" enabled. Allows for fetching test data locally. Only for admins.')
432 parser.add_argument('-t', '--tool-id', default=ALL_TOOLS, help='Tool ID')
433 parser.add_argument('--tool-version', default=None, help='Tool Version (if tool id supplied). Defaults to just latest version, use * to test all versions')
434 parser.add_argument('-i', '--test-index', default=ALL_TESTS, type=int, help='Tool Test Index (starting at 0) - by default all tests will run.')
435 parser.add_argument('-o', '--output', default=None, help='directory to dump outputs to')
436 parser.add_argument('--append', default=False, action="store_true", help="Extend a test record json (created with --output-json) with additional tests.")
437 parser.add_argument('--skip-successful', default=False, action="store_true", help="When used with --append, skip previously run successful tests.")
438 parser.add_argument('-j', '--output-json', default=None, help='output metadata json')
439 parser.add_argument('--verbose', default=False, action="store_true", help="Verbose logging.")
440 parser.add_argument('-c', '--client-test-config', default=None, help="Test config YAML to help with client testing")
441 parser.add_argument('--suite-name', default=DEFAULT_SUITE_NAME, help="Suite name for tool test output")
442 parser.add_argument('--with-reference-data', dest="with_reference_data", default=False, action="store_true")
443 parser.add_argument('--skip-with-reference-data', dest="with_reference_data", action="store_false", help="Skip tests the Galaxy server believes use data tables or loc files.")
444 parser.add_argument('--history-per-suite', dest="history_per_test_case", default=False, action="store_false", help="Create new history per test suite (all tests in same history).")
445 parser.add_argument('--history-per-test-case', dest="history_per_test_case", action="store_true", help="Create new history per test case.")
446 parser.add_argument('--no-history-cleanup', default=False, action="store_true", help="Perserve histories created for testing.")
447 parser.add_argument('--parallel-tests', default=1, type=int, help="Parallel tests.")
448 parser.add_argument('--retries', default=0, type=int, help="Retry failed tests.")
449 parser.add_argument('--page-size', default=0, type=int, help="If positive, use pagination and just run one 'page' to tool tests.")
450 parser.add_argument('--page-number', default=0, type=int, help="If page size is used, run this 'page' of tests - starts with 0.")
451 parser.add_argument('--download-attempts', default=1, type=int, help="Galaxy may return a transient 500 status code for download if test results are written but not yet accessible.")
452 parser.add_argument('--download-sleep', default=1, type=int, help="If download attempts is greater than 1, the amount to sleep between download attempts.")
453 return parser
454
455
456 if __name__ == "__main__":
457 main()