1# SPDX-License-Identifier: GPL-2.0
2# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the ubman test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
16import configparser
17import errno
18import filelock
19import io
20import os
21import os.path
22from pathlib import Path
23import pytest
24import re
25from _pytest.runner import runtestprotocol
26import subprocess
27import sys
28from spawn import BootFail, Timeout, Unexpected, handle_exception
29import time
30
31# Globals: The HTML log file, and the top-level fixture
32log = None
33ubman_fix = None
34
35TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__))
36
37# Regex for test-function symbols
38RE_UT_TEST_LIST = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_2_(.*)\s*$')
39
40def mkdir_p(path):
41    """Create a directory path.
42
43    This includes creating any intermediate/parent directories. Any errors
44    caused due to already extant directories are ignored.
45
46    Args:
47        path: The directory path to create.
48
49    Returns:
50        Nothing.
51    """
52
53    try:
54        os.makedirs(path)
55    except OSError as exc:
56        if exc.errno == errno.EEXIST and os.path.isdir(path):
57            pass
58        else:
59            raise
60
61def pytest_addoption(parser):
62    """pytest hook: Add custom command-line options to the cmdline parser.
63
64    Args:
65        parser: The pytest command-line parser.
66
67    Returns:
68        Nothing.
69    """
70
71    parser.addoption('--build-dir', default=None,
72        help='U-Boot build directory (O=)')
73    parser.addoption('--build-dir-extra', default=None,
74        help='U-Boot build directory for extra build (O=)')
75    parser.addoption('--result-dir', default=None,
76        help='U-Boot test result/tmp directory')
77    parser.addoption('--persistent-data-dir', default=None,
78        help='U-Boot test persistent generated data directory')
79    parser.addoption('--board-type', '--bd', '-B', default='sandbox',
80        help='U-Boot board type')
81    parser.addoption('--board-type-extra', '--bde', default='sandbox',
82        help='U-Boot extra board type')
83    parser.addoption('--board-identity', '--id', default='na',
84        help='U-Boot board identity/instance')
85    parser.addoption('--build', default=False, action='store_true',
86        help='Compile U-Boot before running tests')
87    parser.addoption('--buildman', default=False, action='store_true',
88        help='Use buildman to build U-Boot (assuming --build is given)')
89    parser.addoption('--gdbserver', default=None,
90        help='Run sandbox under gdbserver. The argument is the channel '+
91        'over which gdbserver should communicate, e.g. localhost:1234')
92    parser.addoption('--role', help='U-Boot board role (for Labgrid-sjg)')
93    parser.addoption('--use-running-system', default=False, action='store_true',
94        help="Assume that U-Boot is ready and don't wait for a prompt")
95    parser.addoption('--timing', default=False, action='store_true',
96                     help='Show info on test timing')
97
98
99def run_build(config, source_dir, build_dir, board_type, log):
100    """run_build: Build U-Boot
101
102    Args:
103        config: The pytest configuration.
104        soruce_dir (str): Directory containing source code
105        build_dir (str): Directory to build in
106        board_type (str): board_type parameter (e.g. 'sandbox')
107        log (Logfile): Log file to use
108    """
109    if config.getoption('buildman'):
110        if build_dir != source_dir:
111            dest_args = ['-o', build_dir, '-w']
112        else:
113            dest_args = ['-i']
114        cmds = (['buildman', '--board', board_type] + dest_args,)
115        name = 'buildman'
116    else:
117        if build_dir != source_dir:
118            o_opt = 'O=%s' % build_dir
119        else:
120            o_opt = ''
121        cmds = (
122            ['make', o_opt, '-s', board_type + '_defconfig'],
123            ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
124        )
125        name = 'make'
126
127    with log.section(name):
128        runner = log.get_runner(name, sys.stdout)
129        for cmd in cmds:
130            runner.run(cmd, cwd=source_dir)
131        runner.close()
132        log.status_pass('OK')
133
134def get_details(config):
135    """Obtain salient details about the board and directories to use
136
137    Args:
138        config (pytest.Config): pytest configuration
139
140    Returns:
141        tuple:
142            str: Board type (U-Boot build name)
143            str: Extra board type (where two U-Boot builds are needed)
144            str: Identity for the lab board
145            str: Build directory
146            str: Extra build directory (where two U-Boot builds are needed)
147            str: Source directory
148    """
149    role = config.getoption('role')
150
151    # Get a few provided parameters
152    build_dir = config.getoption('build_dir')
153    build_dir_extra = config.getoption('build_dir_extra')
154
155    # The source tree must be the current directory
156    source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR))
157    if role:
158        # When using a role, build_dir and build_dir_extra are normally not set,
159        # since they are picked up from Labgrid-sjg via the u-boot-test-getrole
160        # script
161        board_identity = role
162        cmd = ['u-boot-test-getrole', role, '--configure']
163        env = os.environ.copy()
164        if build_dir:
165            env['U_BOOT_BUILD_DIR'] = build_dir
166        if build_dir_extra:
167            env['U_BOOT_BUILD_DIR_EXTRA'] = build_dir_extra
168
169	# Make sure the script sees that it is being run from pytest
170        env['U_BOOT_SOURCE_DIR'] = source_dir
171
172        proc = subprocess.run(cmd, stdout=subprocess.PIPE,
173                              stderr=subprocess.STDOUT, encoding='utf-8',
174                              env=env)
175        if proc.returncode:
176            raise ValueError(f"Error {proc.returncode} running {cmd}: '{proc.stderr} '{proc.stdout}'")
177        # For debugging
178        # print('conftest: lab:', proc.stdout)
179        vals = {}
180        for line in proc.stdout.splitlines():
181            item, value = line.split(' ', maxsplit=1)
182            k = item.split(':')[-1]
183            vals[k] = value
184        # For debugging
185        # print('conftest: lab info:', vals)
186
187        # Read the build directories here, in case none were provided in the
188        # command-line arguments
189        (board_type, board_type_extra, default_build_dir,
190         default_build_dir_extra) = (vals['board'],
191            vals['board_extra'], vals['build_dir'], vals['build_dir_extra'])
192    else:
193        board_type = config.getoption('board_type')
194        board_type_extra = config.getoption('board_type_extra')
195        board_identity = config.getoption('board_identity')
196
197        default_build_dir = source_dir + '/build-' + board_type
198        default_build_dir_extra = source_dir + '/build-' + board_type_extra
199
200    # Use the provided command-line arguments if present, else fall back to
201    if not build_dir:
202        build_dir = default_build_dir
203    if not build_dir_extra:
204        build_dir_extra = default_build_dir_extra
205
206    return (board_type, board_type_extra, board_identity, build_dir,
207            build_dir_extra, source_dir)
208
209def pytest_xdist_setupnodes(config, specs):
210    """Clear out any 'done' file from a previous build"""
211    global build_done_file
212
213    build_dir = get_details(config)[3]
214
215    build_done_file = Path(build_dir) / 'build.done'
216    if build_done_file.exists():
217        os.remove(build_done_file)
218
219def pytest_configure(config):
220    """pytest hook: Perform custom initialization at startup time.
221
222    Args:
223        config: The pytest configuration.
224
225    Returns:
226        Nothing.
227    """
228    def parse_config(conf_file):
229        """Parse a config file, loading it into the ubconfig container
230
231        Args:
232            conf_file: Filename to load (within build_dir)
233
234        Raises
235            Exception if the file does not exist
236        """
237        dot_config = build_dir + '/' + conf_file
238        if not os.path.exists(dot_config):
239            raise Exception(conf_file + ' does not exist; ' +
240                            'try passing --build option?')
241
242        with open(dot_config, 'rt') as f:
243            ini_str = '[root]\n' + f.read()
244            ini_sio = io.StringIO(ini_str)
245            parser = configparser.RawConfigParser()
246            parser.read_file(ini_sio)
247            ubconfig.buildconfig.update(parser.items('root'))
248
249    global log
250    global ubman_fix
251    global ubconfig
252
253    (board_type, board_type_extra, board_identity, build_dir, build_dir_extra,
254     source_dir) = get_details(config)
255
256    board_type_filename = board_type.replace('-', '_')
257    board_identity_filename = board_identity.replace('-', '_')
258    mkdir_p(build_dir)
259
260    result_dir = config.getoption('result_dir')
261    if not result_dir:
262        result_dir = build_dir
263    mkdir_p(result_dir)
264
265    persistent_data_dir = config.getoption('persistent_data_dir')
266    if not persistent_data_dir:
267        persistent_data_dir = build_dir + '/persistent-data'
268    mkdir_p(persistent_data_dir)
269
270    gdbserver = config.getoption('gdbserver')
271    if gdbserver and not board_type.startswith('sandbox'):
272        raise Exception('--gdbserver only supported with sandbox targets')
273
274    import multiplexed_log
275    log = multiplexed_log.Logfile(result_dir + '/test-log.html')
276
277    if config.getoption('build'):
278        worker_id = os.environ.get("PYTEST_XDIST_WORKER")
279        with filelock.FileLock(os.path.join(build_dir, 'build.lock')):
280            build_done_file = Path(build_dir) / 'build.done'
281            if (not worker_id or worker_id == 'master' or
282                not build_done_file.exists()):
283                run_build(config, source_dir, build_dir, board_type, log)
284                build_done_file.touch()
285
286    class ArbitraryAttributeContainer(object):
287        pass
288
289    ubconfig = ArbitraryAttributeContainer()
290    ubconfig.brd = dict()
291    ubconfig.env = dict()
292    not_found = []
293
294    with log.section('Loading lab modules', 'load_modules'):
295        modules = [
296            (ubconfig.brd, 'u_boot_board_' + board_type_filename),
297            (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
298            (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
299                board_identity_filename),
300        ]
301        for (dict_to_fill, module_name) in modules:
302            try:
303                module = __import__(module_name)
304            except ImportError:
305                not_found.append(module_name)
306                continue
307            dict_to_fill.update(module.__dict__)
308            log.info(f"Loaded {module}")
309
310        if not_found:
311            log.warning(f"Failed to find modules: {' '.join(not_found)}")
312
313    ubconfig.buildconfig = dict()
314
315    # buildman -k puts autoconf.mk in the rootdir, so handle this as well
316    # as the standard U-Boot build which leaves it in include/autoconf.mk
317    parse_config('.config')
318    if os.path.exists(build_dir + '/' + 'autoconf.mk'):
319        parse_config('autoconf.mk')
320    else:
321        parse_config('include/autoconf.mk')
322
323    ubconfig.test_py_dir = TEST_PY_DIR
324    ubconfig.source_dir = source_dir
325    ubconfig.build_dir = build_dir
326    ubconfig.build_dir_extra = build_dir_extra
327    ubconfig.result_dir = result_dir
328    ubconfig.persistent_data_dir = persistent_data_dir
329    ubconfig.board_type = board_type
330    ubconfig.board_type_extra = board_type_extra
331    ubconfig.board_identity = board_identity
332    ubconfig.gdbserver = gdbserver
333    ubconfig.use_running_system = config.getoption('use_running_system')
334    ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
335    ubconfig.connection_ok = True
336    ubconfig.timing = config.getoption('timing')
337    ubconfig.role = config.getoption('role')
338
339    env_vars = (
340        'board_type',
341        'board_type_extra',
342        'board_identity',
343        'source_dir',
344        'test_py_dir',
345        'build_dir',
346        'build_dir_extra',
347        'result_dir',
348        'persistent_data_dir',
349    )
350    for v in env_vars:
351        os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
352
353    if board_type.startswith('sandbox'):
354        import console_sandbox
355        ubman_fix = console_sandbox.ConsoleSandbox(log, ubconfig)
356    else:
357        import console_board
358        ubman_fix = console_board.ConsoleExecAttach(log, ubconfig)
359
360
361def generate_ut_subtest(metafunc, fixture_name, sym_path):
362    """Provide parametrization for a ut_subtest fixture.
363
364    Determines the set of unit tests built into a U-Boot binary by parsing the
365    list of symbols generated by the build process. Provides this information
366    to test functions by parameterizing their ut_subtest fixture parameter.
367
368    Args:
369        metafunc: The pytest test function.
370        fixture_name: The fixture name to test.
371        sym_path: Relative path to the symbol file with preceding '/'
372            (e.g. '/u-boot.sym')
373
374    Returns:
375        Nothing.
376    """
377    fn = ubman_fix.config.build_dir + sym_path
378    try:
379        with open(fn, 'rt') as f:
380            lines = f.readlines()
381    except:
382        lines = []
383    lines.sort()
384
385    vals = []
386    for l in lines:
387        m = RE_UT_TEST_LIST.search(l)
388        if not m:
389            continue
390        suite, name = m.groups()
391
392        # Tests marked with _norun should only be run manually using 'ut -f'
393        if name.endswith('_norun'):
394            continue
395
396        vals.append(f'{suite} {name}')
397
398    ids = ['ut_' + s.replace(' ', '_') for s in vals]
399    metafunc.parametrize(fixture_name, vals, ids=ids)
400
401def generate_config(metafunc, fixture_name):
402    """Provide parametrization for {env,brd}__ fixtures.
403
404    If a test function takes parameter(s) (fixture names) of the form brd__xxx
405    or env__xxx, the brd and env configuration dictionaries are consulted to
406    find the list of values to use for those parameters, and the test is
407    parametrized so that it runs once for each combination of values.
408
409    Args:
410        metafunc: The pytest test function.
411        fixture_name: The fixture name to test.
412
413    Returns:
414        Nothing.
415    """
416
417    subconfigs = {
418        'brd': ubman_fix.config.brd,
419        'env': ubman_fix.config.env,
420    }
421    parts = fixture_name.split('__')
422    if len(parts) < 2:
423        return
424    if parts[0] not in subconfigs:
425        return
426    subconfig = subconfigs[parts[0]]
427    vals = []
428    val = subconfig.get(fixture_name, [])
429    # If that exact name is a key in the data source:
430    if val:
431        # ... use the dict value as a single parameter value.
432        vals = (val, )
433    else:
434        # ... otherwise, see if there's a key that contains a list of
435        # values to use instead.
436        vals = subconfig.get(fixture_name+ 's', [])
437    def fixture_id(index, val):
438        try:
439            return val['fixture_id']
440        except:
441            return fixture_name + str(index)
442    ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
443    metafunc.parametrize(fixture_name, vals, ids=ids)
444
445def pytest_generate_tests(metafunc):
446    """pytest hook: parameterize test functions based on custom rules.
447
448    Check each test function parameter (fixture name) to see if it is one of
449    our custom names, and if so, provide the correct parametrization for that
450    parameter.
451
452    Args:
453        metafunc: The pytest test function.
454
455    Returns:
456        Nothing.
457    """
458    for fn in metafunc.fixturenames:
459        if fn == 'ut_subtest':
460            generate_ut_subtest(metafunc, fn, '/u-boot.sym')
461            continue
462        m_subtest = re.match('ut_(.)pl_subtest', fn)
463        if m_subtest:
464            spl_name = m_subtest.group(1)
465            generate_ut_subtest(
466                metafunc, fn, f'/{spl_name}pl/u-boot-{spl_name}pl.sym')
467            continue
468        generate_config(metafunc, fn)
469
470@pytest.fixture(scope='session')
471def u_boot_log(request):
472     """Generate the value of a test's log fixture.
473
474     Args:
475         request: The pytest request.
476
477     Returns:
478         The fixture value.
479     """
480
481     return ubman_fix.log
482
483@pytest.fixture(scope='session')
484def u_boot_config(request):
485     """Generate the value of a test's u_boot_config fixture.
486
487     Args:
488         request: The pytest request.
489
490     Returns:
491         The fixture value.
492     """
493
494     return ubman_fix.config
495
496@pytest.fixture(scope='function')
497def ubman(request):
498    """Generate the value of a test's ubman fixture.
499
500    Args:
501        request: The pytest request.
502
503    Returns:
504        The fixture value.
505    """
506    if not ubconfig.connection_ok:
507        pytest.skip('Cannot get target connection')
508        return None
509    try:
510        ubman_fix.ensure_spawned()
511    except OSError as err:
512        handle_exception(ubconfig, ubman_fix, log, err, 'Lab failure', True)
513    except Timeout as err:
514        handle_exception(ubconfig, ubman_fix, log, err, 'Lab timeout', True)
515    except BootFail as err:
516        handle_exception(ubconfig, ubman_fix, log, err, 'Boot fail', True,
517                         ubman_fix.get_spawn_output())
518    except Unexpected as err:
519        handle_exception(ubconfig, ubman_fix, log, err, 'Unexpected test output',
520                         False)
521    return ubman_fix
522
523anchors = {}
524tests_not_run = []
525tests_failed = []
526tests_xpassed = []
527tests_xfailed = []
528tests_skipped = []
529tests_warning = []
530tests_passed = []
531
532# Duration of each test:
533#    key (string): test name
534#    value (float): duration in ms
535test_durations = {}
536
537
538def pytest_itemcollected(item):
539    """pytest hook: Called once for each test found during collection.
540
541    This enables our custom result analysis code to see the list of all tests
542    that should eventually be run.
543
544    Args:
545        item: The item that was collected.
546
547    Returns:
548        Nothing.
549    """
550
551    tests_not_run.append(item.name)
552
553
554def show_timings():
555    """Write timings for each test, along with a histogram"""
556
557    def get_time_delta(msecs):
558        """Convert milliseconds into a user-friendly string"""
559        if msecs >= 1000:
560            return f'{msecs / 1000:.1f}s'
561        else:
562            return f'{msecs:.0f}ms'
563
564    def show_bar(key, msecs, value):
565        """Show a single bar (line) of the histogram
566
567        Args:
568            key (str): Key to write on the left
569            value (int): Value to display, i.e. the relative length of the bar
570        """
571        if value:
572            bar_length = int((value / max_count) * max_bar_length)
573            print(f"{key:>8} : {get_time_delta(msecs):>7}  |{'#' * bar_length} {value}", file=buf)
574
575    # Create the buckets we will use, each has a count and a total time
576    bucket = {}
577    for power in range(5):
578        for i in [1, 2, 3, 4, 5, 7.5]:
579            bucket[i * 10 ** power] = {'count': 0, 'msecs': 0.0}
580    max_dur = max(bucket.keys())
581
582    # Collect counts for each bucket; if outside the range, add to too_long
583    # Also show a sorted list of test timings from longest to shortest
584    too_long = 0
585    too_long_msecs = 0.0
586    max_count = 0
587    with log.section('Timing Report', 'timing_report'):
588        for name, dur in sorted(test_durations.items(), key=lambda kv: kv[1],
589                                reverse=True):
590            log.info(f'{get_time_delta(dur):>8}  {name}')
591            greater = [k for k in bucket.keys() if dur <= k]
592            if greater:
593                buck = bucket[min(greater)]
594                buck['count'] += 1
595                max_count = max(max_count, buck['count'])
596                buck['msecs'] += dur
597            else:
598                too_long += 1
599                too_long_msecs += dur
600
601    # Set the maximum length of a histogram bar, in characters
602    max_bar_length = 40
603
604    # Show a a summary with histogram
605    buf = io.StringIO()
606    with log.section('Timing Summary', 'timing_summary'):
607        print('Duration :   Total  | Number of tests', file=buf)
608        print(f'{"=" * 8} : {"=" * 7}  |{"=" * max_bar_length}', file=buf)
609        for dur, buck in bucket.items():
610            if buck['count']:
611                label = get_time_delta(dur)
612                show_bar(f'<{label}', buck['msecs'], buck['count'])
613        if too_long:
614            show_bar(f'>{get_time_delta(max_dur)}', too_long_msecs, too_long)
615        log.info(buf.getvalue())
616    if ubconfig.timing:
617        print(buf.getvalue(), end='')
618
619
620def cleanup():
621    """Clean up all global state.
622
623    Executed (via atexit) once the entire test process is complete. This
624    includes logging the status of all tests, and the identity of any failed
625    or skipped tests.
626
627    Args:
628        None.
629
630    Returns:
631        Nothing.
632    """
633
634    if ubman_fix:
635        ubman_fix.close()
636    if log:
637        with log.section('Status Report', 'status_report'):
638            log.status_pass('%d passed' % len(tests_passed))
639            if tests_warning:
640                log.status_warning('%d passed with warning' % len(tests_warning))
641                for test in tests_warning:
642                    anchor = anchors.get(test, None)
643                    log.status_warning('... ' + test, anchor)
644            if tests_skipped:
645                log.status_skipped('%d skipped' % len(tests_skipped))
646                for test in tests_skipped:
647                    anchor = anchors.get(test, None)
648                    log.status_skipped('... ' + test, anchor)
649            if tests_xpassed:
650                log.status_xpass('%d xpass' % len(tests_xpassed))
651                for test in tests_xpassed:
652                    anchor = anchors.get(test, None)
653                    log.status_xpass('... ' + test, anchor)
654            if tests_xfailed:
655                log.status_xfail('%d xfail' % len(tests_xfailed))
656                for test in tests_xfailed:
657                    anchor = anchors.get(test, None)
658                    log.status_xfail('... ' + test, anchor)
659            if tests_failed:
660                log.status_fail('%d failed' % len(tests_failed))
661                for test in tests_failed:
662                    anchor = anchors.get(test, None)
663                    log.status_fail('... ' + test, anchor)
664            if tests_not_run:
665                log.status_fail('%d not run' % len(tests_not_run))
666                for test in tests_not_run:
667                    anchor = anchors.get(test, None)
668                    log.status_fail('... ' + test, anchor)
669        show_timings()
670        log.close()
671atexit.register(cleanup)
672
673def setup_boardspec(item):
674    """Process any 'boardspec' marker for a test.
675
676    Such a marker lists the set of board types that a test does/doesn't
677    support. If tests are being executed on an unsupported board, the test is
678    marked to be skipped.
679
680    Args:
681        item: The pytest test item.
682
683    Returns:
684        Nothing.
685    """
686
687    required_boards = []
688    for boards in item.iter_markers('boardspec'):
689        board = boards.args[0]
690        if board.startswith('!'):
691            if ubconfig.board_type == board[1:]:
692                pytest.skip('board "%s" not supported' % ubconfig.board_type)
693                return
694        else:
695            required_boards.append(board)
696    if required_boards and ubconfig.board_type not in required_boards:
697        pytest.skip('board "%s" not supported' % ubconfig.board_type)
698
699def setup_buildconfigspec(item):
700    """Process any 'buildconfigspec' marker for a test.
701
702    Such a marker lists some U-Boot configuration feature that the test
703    requires. If tests are being executed on an U-Boot build that doesn't
704    have the required feature, the test is marked to be skipped.
705
706    Args:
707        item: The pytest test item.
708
709    Returns:
710        Nothing.
711    """
712
713    for options in item.iter_markers('buildconfigspec'):
714        nomatch = True
715        for arg in options.args:
716            if ubconfig.buildconfig.get('config_' + arg.lower(), None):
717                nomatch = False
718        if nomatch:
719            argsString = ', '.join(options.args)
720            pytest.skip(f'.config features "{argsString}" not enabled')
721    for options in item.iter_markers('notbuildconfigspec'):
722        option = options.args[0]
723        if ubconfig.buildconfig.get('config_' + option.lower(), None):
724            pytest.skip('.config feature "%s" enabled' % option.lower())
725
726def tool_is_in_path(tool):
727    for path in os.environ["PATH"].split(os.pathsep):
728        fn = os.path.join(path, tool)
729        if os.path.isfile(fn) and os.access(fn, os.X_OK):
730            return True
731    return False
732
733def setup_requiredtool(item):
734    """Process any 'requiredtool' marker for a test.
735
736    Such a marker lists some external tool (binary, executable, application)
737    that the test requires. If tests are being executed on a system that
738    doesn't have the required tool, the test is marked to be skipped.
739
740    Args:
741        item: The pytest test item.
742
743    Returns:
744        Nothing.
745    """
746
747    for tools in item.iter_markers('requiredtool'):
748        tool = tools.args[0]
749        if not tool_is_in_path(tool):
750            pytest.skip('tool "%s" not in $PATH' % tool)
751
752def setup_singlethread(item):
753    """Process any 'singlethread' marker for a test.
754
755    Skip this test if running in parallel.
756
757    Args:
758        item: The pytest test item.
759
760    Returns:
761        Nothing.
762    """
763    for single in item.iter_markers('singlethread'):
764        worker_id = os.environ.get("PYTEST_XDIST_WORKER")
765        if worker_id and worker_id != 'master':
766            pytest.skip('must run single-threaded')
767
768def setup_role(item):
769    """Process any 'role' marker for a test.
770
771    Skip this test if the role does not match.
772
773    Args:
774        item (pytest.Item): The pytest test item
775    """
776    required_roles = []
777    for roles in item.iter_markers('role'):
778        role = roles.args[0]
779        if role.startswith('!'):
780            if ubconfig.role == role[1:]:
781                pytest.skip(f'role "{ubconfig.role}" not supported')
782                return
783        else:
784            required_roles.append(role)
785    if required_roles and ubconfig.role not in required_roles:
786        pytest.skip(f'board "{ubconfig.role}" not supported')
787
788def start_test_section(item):
789    anchors[item.name] = log.start_section(item.name)
790
791def pytest_runtest_setup(item):
792    """pytest hook: Configure (set up) a test item.
793
794    Called once for each test to perform any custom configuration. This hook
795    is used to skip the test if certain conditions apply.
796
797    Args:
798        item: The pytest test item.
799
800    Returns:
801        Nothing.
802    """
803
804    start_test_section(item)
805    setup_boardspec(item)
806    setup_buildconfigspec(item)
807    setup_requiredtool(item)
808    setup_singlethread(item)
809    setup_role(item)
810
811def pytest_runtest_protocol(item, nextitem):
812    """pytest hook: Called to execute a test.
813
814    This hook wraps the standard pytest runtestprotocol() function in order
815    to acquire visibility into, and record, each test function's result.
816
817    Args:
818        item: The pytest test item to execute.
819        nextitem: The pytest test item that will be executed after this one.
820
821    Returns:
822        A list of pytest reports (test result data).
823    """
824
825    log.get_and_reset_warning()
826    ihook = item.ihook
827    ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
828    start = time.monotonic()
829    reports = runtestprotocol(item, nextitem=nextitem)
830    duration = round((time.monotonic() - start) * 1000, 1)
831    ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
832    was_warning = log.get_and_reset_warning()
833
834    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
835    # the test is skipped. That call is required to create the test's section
836    # in the log file. The call to log.end_section() requires that the log
837    # contain a section for this test. Create a section for the test if it
838    # doesn't already exist.
839    if not item.name in anchors:
840        start_test_section(item)
841
842    failure_cleanup = False
843    record_duration = True
844    if not was_warning:
845        test_list = tests_passed
846        msg = 'OK'
847        msg_log = log.status_pass
848    else:
849        test_list = tests_warning
850        msg = 'OK (with warning)'
851        msg_log = log.status_warning
852    for report in reports:
853        if report.outcome == 'failed':
854            if hasattr(report, 'wasxfail'):
855                test_list = tests_xpassed
856                msg = 'XPASSED'
857                msg_log = log.status_xpass
858            else:
859                failure_cleanup = True
860                test_list = tests_failed
861                msg = 'FAILED:\n' + str(report.longrepr)
862                msg_log = log.status_fail
863            break
864        if report.outcome == 'skipped':
865            if hasattr(report, 'wasxfail'):
866                failure_cleanup = True
867                test_list = tests_xfailed
868                msg = 'XFAILED:\n' + str(report.longrepr)
869                msg_log = log.status_xfail
870                break
871            test_list = tests_skipped
872            msg = 'SKIPPED:\n' + str(report.longrepr)
873            msg_log = log.status_skipped
874            record_duration = False
875
876    msg += f' {duration} ms'
877    if record_duration:
878        test_durations[item.name] = duration
879
880    if failure_cleanup:
881        ubman_fix.drain_console()
882
883    test_list.append(item.name)
884    tests_not_run.remove(item.name)
885
886    try:
887        msg_log(msg)
888    except:
889        # If something went wrong with logging, it's better to let the test
890        # process continue, which may report other exceptions that triggered
891        # the logging issue (e.g. ubman_fix.log wasn't created). Hence, just
892        # squash the exception. If the test setup failed due to e.g. syntax
893        # error somewhere else, this won't be seen. However, once that issue
894        # is fixed, if this exception still exists, it will then be logged as
895        # part of the test's stdout.
896        import traceback
897        print('Exception occurred while logging runtest status:')
898        traceback.print_exc()
899        # FIXME: Can we force a test failure here?
900
901    log.end_section(item.name)
902
903    if failure_cleanup:
904        ubman_fix.cleanup_spawn()
905
906    return True
907