| """Run python unittests and dump a json report of the results to stdout. |
| |
| Usage: |
| python py_unittest_runner.py path/to/unittests_dir |
| |
| """ |
| import argparse |
| import json |
| import logging |
| import os |
| from pathlib import Path |
| import sys |
| from typing import Optional |
| |
| import gunittest.runner |
| |
| LOGGER = logging.getLogger(__name__) |
| |
| def run_tests(directory, test_file_filter_regex, timeout, |
| fail_fast: bool, |
| after_module: Optional[str]=None, |
| multiprocessing=True, module_path_regex='.'): |
| """Crawl |directory| and run all python tests. |
| |
| Args: |
| directory: Path to search for python tests to run. |
| test_file_filter_regex: File pattern for tests to be run. |
| timeout: Test timeout (in seconds). |
| multiprocessing: Use multiprocessing if True. |
| module_path_regex: Regex to determine which modules to include. |
| fail_fast: Fail on first encountered error. |
| |
| Returns: |
| The summarized test results. |
| """ |
| |
| results = gunittest.runner.RunTests( |
| directory, |
| module_path_regex=module_path_regex, |
| test_file_filter_regex=test_file_filter_regex, |
| timeout=timeout, |
| use_multiprocessing=multiprocessing, |
| after_module=after_module, |
| fail_fast=fail_fast, |
| ) |
| summary = gunittest.runner.Summarize(results) |
| return summary |
| |
| |
| def generate_review(summary): |
| """Generate a Gerrit review from the test results |summary|. |
| |
| Args: |
| summary: A dictionary with: errors, failures, results keys each |
| containing a list of db.model.TestResult objects, summarizing the |
| results of a test run. |
| |
| Returns: |
| {'message': review_message, 'passing': passing}, where |review_message| |
| is a string giving summary information of the test run and |passing| is a |
| bool indicating if the tests all passed (no failurs, no errors). |
| """ |
| errors = summary.get('errors', []) |
| failures = summary.get('failures', []) |
| all_results = summary.get('results', []) |
| elapsed_secs = summary.get('elapsed_secs', 0) |
| passing = len(errors) + len(failures) == 0 |
| passing_msg = 'Passing' if passing else 'Failing' |
| message = ('Unit Test Results: {}\n' |
| 'Run: {}, Errors: {}, Failures: {}\n' |
| 'Total Run Time: {}').format(passing_msg, |
| len(all_results), |
| len(errors), |
| len(failures), elapsed_secs) |
| failure_msgs = [ |
| '{}\n\n{}\n'.format(f.GetTestName(), f.GetStack()) for f in failures |
| ] |
| if failure_msgs: |
| message += '\n\nFailures:\n{}'.format('\n'.join(failure_msgs)) |
| |
| error_msgs = [ |
| '{}\n\n{}\n'.format(e.GetTestName(), e.GetStack()) for e in errors |
| ] |
| if error_msgs: |
| message += '\n\nErrors:\n{}'.format('\n'.join(error_msgs)) |
| |
| LOGGER.info(message) |
| |
| return {'message': message, 'passing': passing} |
| |
| |
| def parse_cmdline_args() -> argparse.Namespace: |
| """Returns the parsed command line arguments.""" |
| parser = argparse.ArgumentParser( |
| description='Run python tests in a given directory.') |
| parser.add_argument('--after-module', |
| help=("Run tests that are from modules alphabetically " |
| "after the provided term.")) |
| parser.add_argument( |
| '--fail-fast', |
| action='store_true', |
| help='Fail on the first test error.') |
| parser.add_argument( |
| '--test_dir', |
| help='the directory to search for tests to run.', |
| required=True) |
| parser.add_argument( |
| '--output', type=Path, help='The file to output the JSON results to.', required=True) |
| parser.add_argument( |
| '--test_file_filter_regex', |
| help='File pattern for tests to be run.', |
| default='.*test.py$') |
| parser.add_argument( |
| '--module_path_regex', |
| help='Regex to determine which modules to include.', |
| default='.') |
| parser.add_argument( |
| '--timeout', |
| help='Timeout (in seconds) for running tests.', |
| type=int, |
| required=True) |
| parser.add_argument( |
| '--single-process', |
| help='Avoid using multiple processes when running tests.', |
| action='store_true', |
| ) |
| parser.add_argument( |
| "--verbose", |
| action="store_true", |
| help="When present, show debug logs for the test runner.", |
| ) |
| args = parser.parse_args() |
| |
| if not os.path.isdir(args.test_dir): |
| parser.error( |
| 'test_dir "{}" does not exist.'.format(os.path.abspath(args.test_dir))) |
| |
| return args |
| |
| |
| class LoggingSetupError(Exception): |
| pass |
| |
| |
| def _setup_logging(verbose: bool) -> None: |
| """Set up logging. |
| |
| We want to both allow sending logs from this runner and to capture logs |
| from tests that we plan to run. |
| |
| Args: |
| verbose: show verbose logs |
| """ |
| root = logging.getLogger() |
| if root.handlers: |
| raise LoggingSetupError( |
| "Someone is modifying the logging setup at import time. This is a bad " |
| "practice. This test runner refuses to fight with badly-behaved tests " |
| "and will now exit.") |
| root.setLevel(logging.WARNING) |
| level = logging.DEBUG if verbose else logging.INFO |
| LOGGER.setLevel(level) |
| stream = logging.StreamHandler() |
| stream.setFormatter( |
| logging.Formatter( |
| "%(levelname)s %(process)d:%(thread)d %(name)s %(asctime)s: %(message)s")) |
| LOGGER.addHandler(stream) |
| LOGGER.info("Logging handler established") |
| test_runner = logging.getLogger("gunittest.runner") |
| test_runner.addHandler(stream) |
| test_runner.setLevel(level) |
| |
| |
| def main(): |
| args = parse_cmdline_args() |
| try: |
| _setup_logging(args.verbose) |
| except LoggingSetupError as ex: |
| print(ex) |
| sys.exit(1) |
| test_results = run_tests( |
| args.test_dir, |
| test_file_filter_regex=args.test_file_filter_regex, |
| timeout=args.timeout, |
| multiprocessing=not args.single_process, |
| module_path_regex=args.module_path_regex, |
| after_module=args.after_module, |
| fail_fast=args.fail_fast, |
| ) |
| review = generate_review(test_results) |
| |
| if args.output.parent: |
| args.output.parent.mkdir(exist_ok=True) |
| |
| with open(args.output, 'w') as output: |
| json.dump(review, output) |
| |
| |
| if __name__ == '__main__': |
| main() |