1# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
6from contextlib import contextmanager
7import doctest
8import glob
9import multiprocessing
10import os
11import sys
12import unittest
13
14from u_boot_pylib import command
15
16from io import StringIO
17
18use_concurrent = True
19try:
20    from concurrencytest import ConcurrentTestSuite
21    from concurrencytest import fork_for_tests
22except:
23    use_concurrent = False
24
25
26def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None,
27                    extra_args=None):
28    """Run tests and check that we get 100% coverage
29
30    Args:
31        prog: Program to run (with be passed a '-t' argument to run tests
32        filter_fname: Normally all *.py files in the program's directory will
33            be included. If this is not None, then it is used to filter the
34            list so that only filenames that don't contain filter_fname are
35            included.
36        exclude_list: List of file patterns to exclude from the coverage
37            calculation
38        build_dir: Build directory, used to locate libfdt.py
39        required: List of modules which must be in the coverage report
40        extra_args (str): Extra arguments to pass to the tool before the -t/test
41            arg
42
43    Raises:
44        ValueError if the code coverage is not 100%
45    """
46    # This uses the build output from sandbox_spl to get _libfdt.so
47    path = os.path.dirname(prog)
48    if filter_fname:
49        glob_list = glob.glob(os.path.join(path, '*.py'))
50        glob_list = [fname for fname in glob_list if filter_fname in fname]
51    else:
52        glob_list = []
53    glob_list += exclude_list
54    glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
55    glob_list += ['*concurrencytest*']
56    test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
57    prefix = ''
58    if build_dir:
59        prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
60    cmd = ('%spython3-coverage run '
61           '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
62                                         prog, extra_args or '', test_cmd))
63    os.system(cmd)
64    stdout = command.output('python3-coverage', 'report')
65    lines = stdout.splitlines()
66    if required:
67        # Convert '/path/to/name.py' just the module name 'name'
68        test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
69                        for line in lines if '/etype/' in line])
70        missing_list = required
71        missing_list.discard('__init__')
72        missing_list.difference_update(test_set)
73        if missing_list:
74            print('Missing tests for %s' % (', '.join(missing_list)))
75            print(stdout)
76            ok = False
77
78    coverage = lines[-1].split(' ')[-1]
79    ok = True
80    print(coverage)
81    if coverage != '100%':
82        print(stdout)
83        print("To get a report in 'htmlcov/index.html', type: python3-coverage html")
84        print('Coverage error: %s, but should be 100%%' % coverage)
85        ok = False
86    if not ok:
87        raise ValueError('Test coverage failure')
88
89
90# Use this to suppress stdout/stderr output:
91# with capture_sys_output() as (stdout, stderr)
92#   ...do something...
93@contextmanager
94def capture_sys_output():
95    capture_out, capture_err = StringIO(), StringIO()
96    old_out, old_err = sys.stdout, sys.stderr
97    try:
98        sys.stdout, sys.stderr = capture_out, capture_err
99        yield capture_out, capture_err
100    finally:
101        sys.stdout, sys.stderr = old_out, old_err
102
103
104class FullTextTestResult(unittest.TextTestResult):
105    """A test result class that can print extended text results to a stream
106
107    This is meant to be used by a TestRunner as a result class. Like
108    TextTestResult, this prints out the names of tests as they are run,
109    errors as they occur, and a summary of the results at the end of the
110    test run. Beyond those, this prints information about skipped tests,
111    expected failures and unexpected successes.
112
113    Args:
114        stream: A file-like object to write results to
115        descriptions (bool): True to print descriptions with test names
116        verbosity (int): Detail of printed output per test as they run
117            Test stdout and stderr always get printed when buffering
118            them is disabled by the test runner. In addition to that,
119            0: Print nothing
120            1: Print a dot per test
121            2: Print test names
122    """
123    def __init__(self, stream, descriptions, verbosity):
124        self.verbosity = verbosity
125        super().__init__(stream, descriptions, verbosity)
126
127    def printErrors(self):
128        "Called by TestRunner after test run to summarize the tests"
129        # The parent class doesn't keep unexpected successes in the same
130        # format as the rest. Adapt it to what printErrorList expects.
131        unexpected_successes = [
132            (test, 'Test was expected to fail, but succeeded.\n')
133            for test in self.unexpectedSuccesses
134        ]
135
136        super().printErrors()  # FAIL and ERROR
137        self.printErrorList('SKIP', self.skipped)
138        self.printErrorList('XFAIL', self.expectedFailures)
139        self.printErrorList('XPASS', unexpected_successes)
140
141    def addSkip(self, test, reason):
142        """Called when a test is skipped."""
143        # Add empty line to keep spacing consistent with other results
144        if not reason.endswith('\n'):
145            reason += '\n'
146        super().addSkip(test, reason)
147
148
149def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
150                    test_name, toolpath, class_and_module_list):
151    """Run a series of test suites and collect the results
152
153    Args:
154        toolname: Name of the tool that ran the tests
155        debug: True to enable debugging, which shows a full stack trace on error
156        verbosity: Verbosity level to use (0-4)
157        test_preserve_dirs: True to preserve the input directory used by tests
158            so that it can be examined afterwards (only useful for debugging
159            tests). If a single test is selected (in args[0]) it also preserves
160            the output directory for this test. Both directories are displayed
161            on the command line.
162        processes: Number of processes to use to run tests (None=same as #CPUs)
163        test_name: Name of test to run, or None for all
164        toolpath: List of paths to use for tools
165        class_and_module_list: List of test classes (type class) and module
166           names (type str) to run
167    """
168    sys.argv = [sys.argv[0]]
169    if debug:
170        sys.argv.append('-D')
171    if verbosity:
172        sys.argv.append('-v%d' % verbosity)
173    if toolpath:
174        for path in toolpath:
175            sys.argv += ['--toolpath', path]
176
177    suite = unittest.TestSuite()
178    loader = unittest.TestLoader()
179    runner = unittest.TextTestRunner(
180        stream=sys.stdout,
181        verbosity=(1 if verbosity is None else verbosity),
182        resultclass=FullTextTestResult,
183    )
184
185    if use_concurrent and processes != 1:
186        suite = ConcurrentTestSuite(suite,
187                fork_for_tests(processes or multiprocessing.cpu_count()))
188
189    for module in class_and_module_list:
190        if isinstance(module, str) and (not test_name or test_name == module):
191            suite.addTests(doctest.DocTestSuite(module))
192
193    for module in class_and_module_list:
194        if isinstance(module, str):
195            continue
196        # Test the test module about our arguments, if it is interested
197        if hasattr(module, 'setup_test_args'):
198            setup_test_args = getattr(module, 'setup_test_args')
199            setup_test_args(preserve_indir=test_preserve_dirs,
200                preserve_outdirs=test_preserve_dirs and test_name is not None,
201                toolpath=toolpath, verbosity=verbosity)
202        if test_name:
203            # Since Python v3.5 If an ImportError or AttributeError occurs
204            # while traversing a name then a synthetic test that raises that
205            # error when run will be returned. Check that the requested test
206            # exists, otherwise these errors are included in the results.
207            if test_name in loader.getTestCaseNames(module):
208                suite.addTests(loader.loadTestsFromName(test_name, module))
209        else:
210            suite.addTests(loader.loadTestsFromTestCase(module))
211
212    print(f" Running {toolname} tests ".center(70, "="))
213    result = runner.run(suite)
214    print()
215
216    return result
217