1#!/usr/bin/env python3
2# Copyright (c) 2020-2024 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5
6'''
7This test file contains testsuites for testsuite.py module of twister
8'''
9import sys
10import os
11from unittest import mock
12import pytest
13
14from contextlib import nullcontext
15
16ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
17sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
18
19from twisterlib.statuses import TwisterStatus
20from twisterlib.testplan import TestPlan, TestConfiguration, change_skip_to_error_if_integration
21from twisterlib.testinstance import TestInstance
22from twisterlib.testsuite import TestSuite
23from twisterlib.platform import Platform
24from twisterlib.quarantine import Quarantine
25from twisterlib.error import TwisterRuntimeError
26
27
28def test_testplan_add_testsuites_short(class_testplan):
29    """ Testing add_testcase function of Testsuite class in twister """
30    # Test 1: Check the list of testsuites after calling add testsuites function is as expected
31    class_testplan.SAMPLE_FILENAME = 'test_sample_app.yaml'
32    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
33    class_testplan.add_testsuites()
34
35    tests_rel_dir = 'scripts/tests/twister/test_data/testsuites/tests/'
36    expected_testsuites = ['test_b.check_1',
37                          'test_b.check_2',
38                          'test_c.check_1',
39                          'test_c.check_2',
40                          'test_a.check_1',
41                          'test_a.check_2',
42                          'test_d.check_1',
43                          'test_e.check_1',
44                          'sample_test.app',
45                          'test_config.main']
46    testsuite_list = []
47    for key in sorted(class_testplan.testsuites.keys()):
48        testsuite_list.append(os.path.basename(os.path.normpath(key)))
49    assert sorted(testsuite_list) == sorted(expected_testsuites)
50
51    # Test 2 : Assert Testcase name is expected & all testsuites values are testcase class objects
52    suite = class_testplan.testsuites.get(tests_rel_dir + 'test_a/test_a.check_1')
53    assert suite.name == tests_rel_dir + 'test_a/test_a.check_1'
54    assert all(isinstance(n, TestSuite) for n in class_testplan.testsuites.values())
55
56@pytest.mark.parametrize("board_root_dir", [("board_config_file_not_exist"), ("board_config")])
57def test_add_configurations_short(test_data, class_env, board_root_dir):
58    """ Testing add_configurations function of TestPlan class in Twister
59    Test : Asserting on default platforms list
60    """
61    class_env.board_roots = [os.path.abspath(test_data + board_root_dir)]
62    plan = TestPlan(class_env)
63    plan.test_config = TestConfiguration(class_env.test_config)
64    if board_root_dir == "board_config":
65        plan.add_configurations()
66        print(sorted(plan.default_platforms))
67        assert sorted(plan.default_platforms) == sorted(['demo_board_1/unit_testing', 'demo_board_3/unit_testing'])
68    elif board_root_dir == "board_config_file_not_exist":
69        plan.add_configurations()
70        assert sorted(plan.default_platforms) != sorted(['demo_board_1'])
71
72    plan.levels = plan.test_config.get_levels(plan.scenarios)
73
74def test_get_all_testsuites_short(class_testplan, all_testsuites_dict):
75    """ Testing get_all_testsuites function of TestPlan class in Twister """
76    plan = class_testplan
77    plan.testsuites = all_testsuites_dict
78    expected_tests = ['sample_test.app', 'test_a.check_1.1a',
79                      'test_a.check_1.1c',
80                      'test_a.check_1.2a', 'test_a.check_1.2b',
81                      'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a',
82                      'test_a.check_1.unit_1b', 'test_a.check_2.1a',
83                      'test_a.check_2.1c', 'test_a.check_2.2a',
84                      'test_a.check_2.2b', 'test_a.check_2.Unit_1c',
85                      'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b',
86                      'test_b.check_1', 'test_b.check_2', 'test_c.check_1',
87                      'test_c.check_2', 'test_d.check_1.unit_1a',
88                      'test_d.check_1.unit_1b',
89                      'test_e.check_1.feature5.1a',
90                      'test_e.check_1.feature5.1b',
91                      'test_config.main']
92
93    assert sorted(plan.get_all_tests()) == sorted(expected_tests)
94
95def test_get_platforms_short(class_testplan, platforms_list):
96    """ Testing get_platforms function of TestPlan class in Twister """
97    plan = class_testplan
98    plan.platforms = platforms_list
99    platform = plan.get_platform("demo_board_1")
100    assert isinstance(platform, Platform)
101    assert platform.name == "demo_board_1/unit_testing"
102
103TESTDATA_PART1 = [
104    ("toolchain_allow", ['gcc'], None, None, "Not in testsuite toolchain allow list"),
105    ("platform_allow", ['demo_board_1/unit_testing'], None, None, "Not in testsuite platform allow list"),
106    ("toolchain_exclude", ['zephyr'], None, None, "In test case toolchain exclude"),
107    ("platform_exclude", ['demo_board_2'], None, None, "In test case platform exclude"),
108    ("arch_exclude", ['x86'], None, None, "In test case arch exclude"),
109    ("arch_allow", ['arm'], None, None, "Not in test case arch allow list"),
110    ("skip", True, None, None, "Skip filter"),
111    ("tags", set(['sensor', 'bluetooth']), "ignore_tags", ['bluetooth'], "Excluded tags per platform (exclude_tags)"),
112    ("min_flash", "2024", "flash", "1024", "Not enough FLASH"),
113    ("min_ram", "500", "ram", "256", "Not enough RAM"),
114    ("None", "None", "env", ['BSIM_OUT_PATH', 'demo_env'], "Environment (BSIM_OUT_PATH, demo_env) not satisfied"),
115    ("build_on_all", True, None, None, "Platform is excluded on command line."),
116    ("build_on_all", True, "level", "foobar", "Unknown test level 'foobar'"),
117    (None, None, "supported_toolchains", ['gcc', 'xcc', 'xt-clang'], "Not supported by the toolchain"),
118]
119
120
121@pytest.mark.parametrize("tc_attribute, tc_value, plat_attribute, plat_value, expected_discards",
122                         TESTDATA_PART1)
123def test_apply_filters_part1(class_testplan, all_testsuites_dict, platforms_list,
124                             tc_attribute, tc_value, plat_attribute, plat_value, expected_discards):
125    """ Testing apply_filters function of TestPlan class in Twister
126    Part 1: Response of apply_filters function have
127            appropriate values according to the filters
128    """
129    plan = class_testplan
130    if tc_attribute is None and plat_attribute is None:
131        plan.apply_filters()
132
133    plan.platforms = platforms_list
134    plan.platform_names = [p.name for p in platforms_list]
135    plan.testsuites = all_testsuites_dict
136    for plat in plan.platforms:
137        if plat_attribute == "ignore_tags":
138            plat.ignore_tags = plat_value
139        if plat_attribute == "flash":
140            plat.flash = plat_value
141        if plat_attribute == "ram":
142            plat.ram = plat_value
143        if plat_attribute == "env":
144            plat.env = plat_value
145            plat.env_satisfied = False
146        if plat_attribute == "supported_toolchains":
147            plat.supported_toolchains = plat_value
148    for _, testcase in plan.testsuites.items():
149        if tc_attribute == "toolchain_allow":
150            testcase.toolchain_allow = tc_value
151        if tc_attribute == "platform_allow":
152            testcase.platform_allow = tc_value
153        if tc_attribute == "toolchain_exclude":
154            testcase.toolchain_exclude = tc_value
155        if tc_attribute == "platform_exclude":
156            testcase.platform_exclude = tc_value
157        if tc_attribute == "arch_exclude":
158            testcase.arch_exclude = tc_value
159        if tc_attribute == "arch_allow":
160            testcase.arch_allow = tc_value
161        if tc_attribute == "skip":
162            testcase.skip = tc_value
163        if tc_attribute == "tags":
164            testcase.tags = tc_value
165        if tc_attribute == "min_flash":
166            testcase.min_flash = tc_value
167        if tc_attribute == "min_ram":
168            testcase.min_ram = tc_value
169
170    if plat_attribute == "level":
171        plan.options.level = plat_value
172
173    if tc_attribute == "build_on_all":
174        for _, testcase in plan.testsuites.items():
175            testcase.build_on_all = tc_value
176        plan.apply_filters(exclude_platform=['demo_board_1'])
177    elif plat_attribute == "supported_toolchains":
178        plan.apply_filters(force_toolchain=False,
179                                                 exclude_platform=['demo_board_1'],
180                                                 platform=['demo_board_2/unit_testing'])
181    elif tc_attribute is None and plat_attribute is None:
182        plan.apply_filters()
183    else:
184        plan.apply_filters(exclude_platform=['demo_board_1'],
185                                                 platform=['demo_board_2/unit_testing'])
186
187    filtered_instances = list(filter(lambda item:  item.status == TwisterStatus.FILTER, plan.instances.values()))
188    for d in filtered_instances:
189        assert d.reason == expected_discards
190
191TESTDATA_PART2 = [
192    ("runnable", "True", "Not runnable on device"),
193    ("exclude_tag", ['test_a'], "Command line testsuite exclude filter"),
194    ("run_individual_tests", ['scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1'], "TestSuite name filter"),
195    ("arch", ['arm_test'], "Command line testsuite arch filter"),
196    ("tag", ['test_d'], "Command line testsuite tag filter")
197    ]
198
199
200@pytest.mark.parametrize("extra_filter, extra_filter_value, expected_discards", TESTDATA_PART2)
201def test_apply_filters_part2(class_testplan, all_testsuites_dict,
202                             platforms_list, extra_filter, extra_filter_value, expected_discards):
203    """ Testing apply_filters function of TestPlan class in Twister
204    Part 2 : Response of apply_filters function (discard dictionary) have
205             appropriate values according to the filters
206    """
207
208    class_testplan.platforms = platforms_list
209    class_testplan.platform_names = [p.name for p in platforms_list]
210    class_testplan.testsuites = all_testsuites_dict
211    kwargs = {
212        extra_filter : extra_filter_value,
213        "exclude_platform" : [
214            'demo_board_1'
215            ],
216        "platform" : [
217            'demo_board_2'
218            ]
219        }
220    class_testplan.apply_filters(**kwargs)
221    filtered_instances = list(filter(lambda item:  item.status == TwisterStatus.FILTER, class_testplan.instances.values()))
222    for d in filtered_instances:
223        assert d.reason == expected_discards
224
225
226TESTDATA_PART3 = [
227    (20, 20, -1, 0),
228    (-2, -1, 10, 20),
229    (0, 0, 0, 0)
230    ]
231
232@pytest.mark.parametrize("tc_min_flash, plat_flash, tc_min_ram, plat_ram",
233                         TESTDATA_PART3)
234def test_apply_filters_part3(class_testplan, all_testsuites_dict, platforms_list,
235                             tc_min_flash, plat_flash, tc_min_ram, plat_ram):
236    """ Testing apply_filters function of TestPlan class in Twister
237    Part 3 : Testing edge cases for ram and flash values of platforms & testsuites
238    """
239    class_testplan.platforms = platforms_list
240    class_testplan.platform_names = [p.name for p in platforms_list]
241    class_testplan.testsuites = all_testsuites_dict
242
243    for plat in class_testplan.platforms:
244        plat.flash = plat_flash
245        plat.ram = plat_ram
246    for _, testcase in class_testplan.testsuites.items():
247        testcase.min_ram = tc_min_ram
248        testcase.min_flash = tc_min_flash
249    class_testplan.apply_filters(exclude_platform=['demo_board_1'],
250                                             platform=['demo_board_2'])
251
252    filtered_instances = list(filter(lambda item:  item.status == TwisterStatus.FILTER, class_testplan.instances.values()))
253    assert not filtered_instances
254
255def test_add_instances_short(tmp_path, class_env, all_testsuites_dict, platforms_list):
256    """ Testing add_instances() function of TestPlan class in Twister
257    Test 1: instances dictionary keys have expected values (Platform Name + Testcase Name)
258    Test 2: Values of 'instances' dictionary in Testsuite class are an
259	        instance of 'TestInstance' class
260    Test 3: Values of 'instances' dictionary have expected values.
261    """
262    class_env.outdir = tmp_path
263    plan = TestPlan(class_env)
264    plan.platforms = platforms_list
265    platform = plan.get_platform("demo_board_2")
266    instance_list = []
267    for _, testcase in all_testsuites_dict.items():
268        instance = TestInstance(testcase, platform, 'zephyr', class_env.outdir)
269        instance_list.append(instance)
270    plan.add_instances(instance_list)
271    assert list(plan.instances.keys()) == \
272		   [platform.name + '/zephyr/' + s for s in list(all_testsuites_dict.keys())]
273    assert all(isinstance(n, TestInstance) for n in list(plan.instances.values()))
274    assert list(plan.instances.values()) == instance_list
275
276
277QUARANTINE_BASIC = {
278    'demo_board_1/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3',
279    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'a1 on board_1 and board_3'
280}
281
282QUARANTINE_WITH_REGEXP = {
283    'demo_board_2/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'a2 and c2 on x86',
284    'demo_board_1/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
285    'demo_board_3/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
286    'demo_board_2/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all test_d',
287    'demo_board_2/unit_testing/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'a2 and c2 on x86'
288}
289
290QUARANTINE_PLATFORM = {
291    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_1' : 'all on board_3',
292    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_a/test_a.check_2' : 'all on board_3',
293    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_d/test_d.check_1' : 'all on board_3',
294    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_1' : 'all on board_3',
295    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_b/test_b.check_2' : 'all on board_3',
296    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_1' : 'all on board_3',
297    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_c/test_c.check_2' : 'all on board_3',
298    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_e/test_e.check_1' : 'all on board_3',
299    'demo_board_3/zephyr/scripts/tests/twister/test_data/testsuites/tests/test_config/test_config.main' : 'all on board_3'
300}
301
302QUARANTINE_MULTIFILES = {
303    **QUARANTINE_BASIC,
304    **QUARANTINE_WITH_REGEXP
305}
306
307@pytest.mark.parametrize(
308    ("quarantine_files, quarantine_verify, expected_val"),
309    [
310        (['basic.yaml'], False, QUARANTINE_BASIC),
311        (['with_regexp.yaml'], False, QUARANTINE_WITH_REGEXP),
312        (['with_regexp.yaml'], True, QUARANTINE_WITH_REGEXP),
313        (['platform.yaml'], False, QUARANTINE_PLATFORM),
314        (['basic.yaml', 'with_regexp.yaml'], False, QUARANTINE_MULTIFILES),
315        (['empty.yaml'], False, {})
316    ],
317    ids=[
318        'basic',
319        'with_regexp',
320        'quarantine_verify',
321        'platform',
322        'multifiles',
323        'empty'
324    ])
325def test_quarantine_short(class_testplan, platforms_list, test_data,
326                    quarantine_files, quarantine_verify, expected_val):
327    """ Testing quarantine feature in Twister
328    """
329    class_testplan.options.all = True
330    class_testplan.platforms = platforms_list
331    class_testplan.platform_names = [p.name for p in platforms_list]
332    class_testplan.TESTSUITE_FILENAME = 'test_data.yaml'
333    class_testplan.add_testsuites()
334
335    quarantine_list = [
336        os.path.join(test_data, 'quarantines', quarantine_file) for quarantine_file in quarantine_files
337    ]
338    class_testplan.quarantine = Quarantine(quarantine_list)
339    class_testplan.options.quarantine_verify = quarantine_verify
340    class_testplan.apply_filters()
341    for testname, instance in class_testplan.instances.items():
342        if quarantine_verify:
343            if testname in expected_val:
344                assert instance.status == TwisterStatus.NONE
345            else:
346                assert instance.status == TwisterStatus.FILTER
347                assert instance.reason == "Not under quarantine"
348        else:
349            if testname in expected_val:
350                assert instance.status == TwisterStatus.SKIP
351                assert instance.reason == "Quarantine: " + expected_val[testname]
352            else:
353                assert instance.status == TwisterStatus.NONE
354
355
356TESTDATA_PART4 = [
357    (os.path.join('test_d', 'test_d.check_1'), ['dummy'],
358     None, 'Snippet not supported'),
359    (os.path.join('test_c', 'test_c.check_1'), ['cdc-acm-console'],
360     0, None),
361    (os.path.join('test_d', 'test_d.check_1'), ['dummy', 'cdc-acm-console'],
362     2, 'Snippet not supported'),
363]
364
365@pytest.mark.parametrize(
366    'testpath, required_snippets, expected_filtered_len, expected_filtered_reason',
367    TESTDATA_PART4,
368    ids=['app', 'global', 'multiple']
369)
370def test_required_snippets_short(
371    class_testplan,
372    all_testsuites_dict,
373    platforms_list,
374    testpath,
375    required_snippets,
376    expected_filtered_len,
377    expected_filtered_reason
378):
379    """ Testing required_snippets function of TestPlan class in Twister """
380    plan = class_testplan
381    testpath = os.path.join('scripts', 'tests', 'twister', 'test_data',
382                            'testsuites', 'tests', testpath)
383    testsuite = class_testplan.testsuites.get(testpath)
384    plan.platforms = platforms_list
385    plan.platform_names = [p.name for p in platforms_list]
386    plan.testsuites = {testpath: testsuite}
387
388    for _, testcase in plan.testsuites.items():
389        testcase.exclude_platform = []
390        testcase.required_snippets = required_snippets
391        testcase.build_on_all = True
392
393    plan.apply_filters()
394
395    filtered_instances = list(
396        filter(lambda item: item.status == TwisterStatus.FILTER, plan.instances.values())
397    )
398    if expected_filtered_len is not None:
399        assert len(filtered_instances) == expected_filtered_len
400    if expected_filtered_reason is not None:
401        for d in filtered_instances:
402            assert d.reason == expected_filtered_reason
403
404
405def test_testplan_get_level():
406    testplan = TestPlan(env=mock.Mock())
407    lvl1 = mock.Mock()
408    lvl1.name = 'a lvl'
409    lvl2 = mock.Mock()
410    lvl2.name = 'a lvl'
411    lvl3 = mock.Mock()
412    lvl3.name = 'other lvl'
413    testplan.levels.append(lvl1)
414    testplan.levels.append(lvl2)
415    testplan.levels.append(lvl3)
416
417    name = 'a lvl'
418
419    res = testplan.get_level(name)
420    assert res == lvl1
421
422    res = testplan.get_level(name)
423    assert res == lvl1
424
425    lvl_missed = mock.Mock()
426    lvl_missed.name = 'missed lvl'
427    res = testplan.get_level('missed_lvl')
428    assert res is None
429
430    testplan.levels.remove(lvl1)
431    testplan.levels.remove(lvl2)
432
433    res = testplan.get_level(name)
434    assert res is None
435
436
437TESTDATA_1 = [
438    ('', {}),
439    (
440"""\
441levels:
442  - name: lvl1
443    adds:
444      - sc1
445      - sc2
446    inherits: []
447  - name: lvl2
448    adds:
449      - sc1-1
450      - sc1-2
451    inherits: [lvl1]
452""",
453    {
454        'lvl1': ['sc1', 'sc2'],
455        'lvl2': ['sc1-1', 'sc1-2', 'sc1', 'sc2']
456    }
457    ),
458]
459
460@pytest.mark.parametrize(
461    'config_yaml, expected_scenarios',
462    TESTDATA_1,
463    ids=['no config', 'valid config']
464)
465def test_testplan_parse_configuration(tmp_path, config_yaml, expected_scenarios):
466    testplan = TestPlan(env=mock.Mock())
467    testplan.scenarios = ['sc1', 'sc1-1', 'sc1-2', 'sc2']
468
469    tmp_config_file = tmp_path / 'config_file.yaml'
470    if config_yaml:
471        tmp_config_file.write_text(config_yaml)
472
473    with pytest.raises(TwisterRuntimeError) if not config_yaml else nullcontext():
474        tc = TestConfiguration(tmp_config_file)
475        testplan.levels = tc.get_levels(testplan.scenarios)
476        if not testplan.levels:
477            assert expected_scenarios == {}
478        for level in testplan.levels:
479            assert sorted(level.scenarios) == sorted(expected_scenarios[level.name])
480
481
482TESTDATA_2 = [
483    ([], [], False),
484    (['ts1.tc3'], [], True),
485    (['ts2.tc2'], ['- ts2'], False),
486]
487
488@pytest.mark.parametrize(
489    'sub_tests, expected_outs, expect_error',
490    TESTDATA_2,
491    ids=['no subtests', 'subtests not found', 'valid subtests']
492)
493def test_testplan_find_subtests(
494    capfd,
495    sub_tests,
496    expected_outs,
497    expect_error
498):
499    testplan = TestPlan(env=mock.Mock())
500    testplan.options = mock.Mock(sub_test=sub_tests)
501    testplan.run_individual_testsuite = []
502    testplan.testsuites = {
503        'ts1': mock.Mock(
504            testcases=[
505                mock.Mock(),
506                mock.Mock(),
507            ]
508        ),
509        'ts2': mock.Mock(
510            testcases=[
511                mock.Mock(),
512                mock.Mock(),
513                mock.Mock(),
514            ]
515        )
516    }
517    testplan.testsuites['ts1'].name = 'ts1'
518    testplan.testsuites['ts1'].testcases[0].name = 'ts1.tc1'
519    testplan.testsuites['ts1'].testcases[1].name = 'ts1.tc2'
520    testplan.testsuites['ts2'].name = 'ts2'
521    testplan.testsuites['ts2'].testcases[0].name = 'ts2.tc1'
522    testplan.testsuites['ts2'].testcases[1].name = 'ts2.tc2'
523    testplan.testsuites['ts2'].testcases[2].name = 'ts2.tc3'
524
525    with pytest.raises(TwisterRuntimeError) if expect_error else nullcontext():
526        testplan.find_subtests()
527
528    out, err = capfd.readouterr()
529    sys.stdout.write(out)
530    sys.stdout.write(err)
531
532    assert all([printout in out for printout in expected_outs])
533
534
535TESTDATA_3 = [
536    (0, 0, [], False, [], TwisterRuntimeError, []),
537    (1, 1, [], False, [], TwisterRuntimeError, []),
538    (1, 0, [], True, [], TwisterRuntimeError, ['No quarantine list given to be verified']),
539    (1, 0, ['qfile.yaml'], False, ['- platforms:\n  - demo_board_3\n  comment: "board_3"'], None, []),
540]
541
542@pytest.mark.parametrize(
543    'added_testsuite_count, load_errors, ql, qv, ql_data, exception, expected_logs',
544    TESTDATA_3,
545    ids=['no tests', 'load errors', 'quarantine verify without quarantine list',
546#         'empty quarantine file',
547         'valid quarantine file']
548)
549def test_testplan_discover(
550    tmp_path,
551    caplog,
552    added_testsuite_count,
553    load_errors,
554    ql,
555    qv,
556    ql_data,
557    exception,
558    expected_logs
559):
560    # Just a dummy test configuration file
561    tc = "options: {}\n"
562    tmp_tc = tmp_path / 'test_config.yaml'
563    tmp_tc.write_text(tc)
564
565    for qf, data in zip(ql, ql_data):
566        tmp_qf = tmp_path / qf
567        tmp_qf.write_text(data)
568
569    env = mock.Mock()
570    env.test_config = tmp_tc
571    testplan = TestPlan(env=env)
572    testplan.options = mock.Mock(
573        test_pattern=[],
574        test='ts1',
575        quarantine_list=[tmp_path / qf for qf in ql],
576        quarantine_verify=qv
577    )
578    testplan.testsuites = {
579        'ts1': mock.Mock(id=1),
580        'ts2': mock.Mock(id=2),
581    }
582    testplan.run_individual_testsuite = 'ts0'
583    testplan.load_errors = load_errors
584    testplan.add_testsuites = mock.Mock(return_value=added_testsuite_count)
585    testplan.find_subtests = mock.Mock()
586    testplan.report_duplicates = mock.Mock()
587    testplan.test_config = mock.Mock()
588    testplan.add_configurations = mock.Mock()
589
590    with pytest.raises(exception) if exception else nullcontext():
591        testplan.discover()
592
593    testplan.add_testsuites.assert_called_once_with(testsuite_filter='ts1', testsuite_pattern=[])
594    assert all([log in caplog.text for log in expected_logs])
595
596
597TESTDATA_4 = [
598    (None, None, None, None, '00',
599     TwisterRuntimeError, [], []),
600    (None, True, None, None, '6/4',
601     TwisterRuntimeError, set(['t-p3', 't-p4', 't-p1', 't-p2']), []),
602    (None, None, 'load_tests.json', None, '0/4',
603     TwisterRuntimeError, set(['lt-p1', 'lt-p3', 'lt-p4', 'lt-p2']), []),
604    ('suffix', None, None, True, '2/4',
605     None, set(['ts-p4', 'ts-p2', 'ts-p1', 'ts-p3']), [2, 4]),
606]
607
608@pytest.mark.parametrize(
609    'report_suffix, only_failed, load_tests, test_only, subset,' \
610    ' exception, expected_selected_platforms, expected_generate_subset_args',
611    TESTDATA_4,
612    ids=['apply_filters only', 'only failed', 'load tests', 'test only']
613)
614def test_testplan_load(
615    tmp_path,
616    report_suffix,
617    only_failed,
618    load_tests,
619    test_only,
620    subset,
621    exception,
622    expected_selected_platforms,
623    expected_generate_subset_args
624):
625    twister_json = """\
626{
627    "testsuites": [
628        {
629            "name": "ts1",
630            "platform": "t-p1",
631            "toolchain": "zephyr",
632            "testcases": []
633        },
634        {
635            "name": "ts1",
636            "platform": "t-p2",
637            "toolchain": "zephyr",
638            "testcases": []
639        },
640        {
641            "name": "ts2",
642            "platform": "t-p3",
643            "toolchain": "zephyr",
644            "testcases": []
645        },
646        {
647            "name": "ts2",
648            "platform": "t-p4",
649            "toolchain": "zephyr",
650            "testcases": []
651        }
652    ]
653}
654"""
655    twister_file = tmp_path / 'twister.json'
656    twister_file.write_text(twister_json)
657
658    twister_suffix_json = """\
659{
660    "testsuites": [
661        {
662            "name": "ts1",
663            "platform": "ts-p1",
664            "toolchain": "zephyr",
665            "testcases": []
666        },
667        {
668            "name": "ts1",
669            "platform": "ts-p2",
670            "toolchain": "zephyr",
671            "testcases": []
672        },
673        {
674            "name": "ts2",
675            "platform": "ts-p3",
676            "toolchain": "zephyr",
677            "testcases": []
678        },
679        {
680            "name": "ts2",
681            "platform": "ts-p4",
682            "toolchain": "zephyr",
683            "testcases": []
684        }
685    ]
686}
687"""
688    twister_suffix_file = tmp_path / 'twister_suffix.json'
689    twister_suffix_file.write_text(twister_suffix_json)
690
691    load_tests_json = """\
692{
693    "testsuites": [
694        {
695            "name": "ts1",
696            "platform": "lt-p1",
697            "toolchain": "zephyr",
698            "testcases": []
699        },
700        {
701            "name": "ts1",
702            "platform": "lt-p2",
703            "toolchain": "zephyr",
704            "testcases": []
705        },
706        {
707            "name": "ts2",
708            "platform": "lt-p3",
709            "toolchain": "zephyr",
710            \"testcases": []
711        },
712        {
713            "name": "ts2",
714            "platform": "lt-p4",
715            "toolchain": "zephyr",
716            "testcases": []
717        }
718    ]
719}
720"""
721    load_tests_file = tmp_path / 'load_tests.json'
722    load_tests_file.write_text(load_tests_json)
723
724    testplan = TestPlan(env=mock.Mock(outdir=tmp_path))
725    testplan.testsuites = {
726        'ts1': mock.Mock(testcases=[], extra_configs=[]),
727        'ts2': mock.Mock(testcases=[], extra_configs=[]),
728    }
729    testplan.testsuites['ts1'].name = 'ts1'
730    testplan.testsuites['ts2'].name = 'ts2'
731    testplan.options = mock.Mock(
732        report_summary=None,
733        outdir=tmp_path,
734        report_suffix=report_suffix,
735        only_failed=only_failed,
736        load_tests=tmp_path / load_tests if load_tests else None,
737        test_only=test_only,
738        exclude_platform=['t-p0', 't-p1',
739                          'ts-p0', 'ts-p1',
740                          'lt-p0', 'lt-p1'],
741        platform=['t-p1', 't-p2', 't-p3', 't-p4',
742                  'ts-p1', 'ts-p2', 'ts-p3', 'ts-p4',
743                  'lt-p1', 'lt-p2', 'lt-p3', 'lt-p4'],
744        subset=subset
745    )
746    testplan.platforms=[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
747                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock(),
748                        mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()]
749    testplan.platforms[0].name = 't-p1'
750    testplan.platforms[1].name = 't-p2'
751    testplan.platforms[2].name = 't-p3'
752    testplan.platforms[3].name = 't-p4'
753    testplan.platforms[4].name = 'ts-p1'
754    testplan.platforms[5].name = 'ts-p2'
755    testplan.platforms[6].name = 'ts-p3'
756    testplan.platforms[7].name = 'ts-p4'
757    testplan.platforms[8].name = 'lt-p1'
758    testplan.platforms[9].name = 'lt-p2'
759    testplan.platforms[10].name = 'lt-p3'
760    testplan.platforms[11].name = 'lt-p4'
761    testplan.platforms[0].aliases = ['t-p1']
762    testplan.platforms[1].aliases = ['t-p2']
763    testplan.platforms[2].aliases = ['t-p3']
764    testplan.platforms[3].aliases = ['t-p4']
765    testplan.platforms[4].aliases = ['ts-p1']
766    testplan.platforms[5].aliases = ['ts-p2']
767    testplan.platforms[6].aliases = ['ts-p3']
768    testplan.platforms[7].aliases = ['ts-p4']
769    testplan.platforms[8].aliases = ['lt-p1']
770    testplan.platforms[9].aliases = ['lt-p2']
771    testplan.platforms[10].aliases = ['lt-p3']
772    testplan.platforms[11].aliases = ['lt-p4']
773    testplan.platforms[0].normalized_name = 't-p1'
774    testplan.platforms[1].normalized_name = 't-p2'
775    testplan.platforms[2].normalized_name = 't-p3'
776    testplan.platforms[3].normalized_name = 't-p4'
777    testplan.platforms[4].normalized_name = 'ts-p1'
778    testplan.platforms[5].normalized_name = 'ts-p2'
779    testplan.platforms[6].normalized_name = 'ts-p3'
780    testplan.platforms[7].normalized_name = 'ts-p4'
781    testplan.platforms[8].normalized_name = 'lt-p1'
782    testplan.platforms[9].normalized_name = 'lt-p2'
783    testplan.platforms[10].normalized_name = 'lt-p3'
784    testplan.platforms[11].normalized_name = 'lt-p4'
785    testplan.generate_subset = mock.Mock()
786    testplan.apply_filters = mock.Mock()
787
788    with mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()), \
789         mock.patch('twisterlib.testinstance.TestInstance.check_runnable', return_value=True), \
790         pytest.raises(exception) if exception else nullcontext():
791        testplan.load()
792
793    assert testplan.selected_platforms == expected_selected_platforms
794    if expected_generate_subset_args:
795        testplan.generate_subset.assert_called_once_with(*expected_generate_subset_args)
796    else:
797        testplan.generate_subset.assert_not_called()
798
799
800TESTDATA_5 = [
801    (False, False, None, 1, 2,
802     ['plat1/testA', 'plat1/testB', 'plat1/testC',
803      'plat3/testA', 'plat3/testB', 'plat3/testC']),
804    (False, False, None, 1, 5,
805     ['plat1/testA',
806      'plat3/testA', 'plat3/testB', 'plat3/testC']),
807    (False, False, None, 2, 2,
808     ['plat2/testA', 'plat2/testB']),
809    (True, False, None, 1, 2,
810     ['plat1/testA', 'plat2/testA', 'plat1/testB',
811      'plat3/testA', 'plat3/testB', 'plat3/testC']),
812    (True, False, None, 2, 2,
813     ['plat2/testB', 'plat1/testC']),
814    (True, True, 123, 1, 2,
815     ['plat2/testA', 'plat2/testB', 'plat1/testC',
816      'plat3/testB', 'plat3/testA', 'plat3/testC']),
817    (True, True, 123, 2, 2,
818     ['plat1/testB', 'plat1/testA']),
819]
820
821@pytest.mark.parametrize(
822    'device_testing, shuffle, seed, subset, sets, expected_subset',
823    TESTDATA_5,
824    ids=['subset 1', 'subset 1 out of 5', 'subset 2',
825         'device testing, subset 1', 'device testing, subset 2',
826         'device testing, shuffle with seed, subset 1',
827         'device testing, shuffle with seed, subset 2']
828)
829def test_testplan_generate_subset(
830    device_testing,
831    shuffle,
832    seed,
833    subset,
834    sets,
835    expected_subset
836):
837    testplan = TestPlan(env=mock.Mock())
838    testplan.options = mock.Mock(
839        device_testing=device_testing,
840        shuffle_tests=shuffle,
841        shuffle_tests_seed=seed
842    )
843    testplan.instances = {
844        'plat1/testA': mock.Mock(status=TwisterStatus.NONE),
845        'plat1/testB': mock.Mock(status=TwisterStatus.NONE),
846        'plat1/testC': mock.Mock(status=TwisterStatus.NONE),
847        'plat2/testA': mock.Mock(status=TwisterStatus.NONE),
848        'plat2/testB': mock.Mock(status=TwisterStatus.NONE),
849        'plat3/testA': mock.Mock(status=TwisterStatus.SKIP),
850        'plat3/testB': mock.Mock(status=TwisterStatus.SKIP),
851        'plat3/testC': mock.Mock(status=TwisterStatus.ERROR),
852    }
853
854    testplan.generate_subset(subset, sets)
855
856    assert [instance for instance in testplan.instances.keys()] == \
857           expected_subset
858
859
860def test_testplan_handle_modules():
861    testplan = TestPlan(env=mock.Mock())
862
863    modules = [mock.Mock(meta={'name': 'name1'}),
864               mock.Mock(meta={'name': 'name2'})]
865
866    with mock.patch('twisterlib.testplan.parse_modules', return_value=modules):
867        testplan.handle_modules()
868
869    assert testplan.modules == ['name1', 'name2']
870
871
872TESTDATA_6 = [
873    (True, False, False, 0, 'report_test_tree'),
874    (True, True, False, 0, 'report_test_tree'),
875    (True, False, True, 0, 'report_test_tree'),
876    (True, True, True, 0, 'report_test_tree'),
877    (False, True, False, 0, 'report_test_list'),
878    (False, True, True, 0, 'report_test_list'),
879    (False, False, True, 0, 'report_tag_list'),
880    (False, False, False, 1, None),
881]
882
883@pytest.mark.parametrize(
884    'test_tree, list_tests, list_tags, expected_res, expected_method',
885    TESTDATA_6,
886    ids=['test tree', 'test tree + test list', 'test tree + tag list',
887         'test tree + test list + tag list', 'test list',
888         'test list + tag list', 'tag list', 'no report']
889)
890def test_testplan_report(
891    test_tree,
892    list_tests,
893    list_tags,
894    expected_res,
895    expected_method
896):
897    testplan = TestPlan(env=mock.Mock())
898    testplan.report_test_tree = mock.Mock()
899    testplan.report_test_list = mock.Mock()
900    testplan.report_tag_list = mock.Mock()
901
902    testplan.options = mock.Mock(
903        test_tree=test_tree,
904        list_tests=list_tests,
905        list_tags=list_tags,
906    )
907
908    res = testplan.report()
909
910    assert res == expected_res
911
912    methods = ['report_test_tree', 'report_test_list', 'report_tag_list']
913    if expected_method:
914        methods.remove(expected_method)
915        getattr(testplan, expected_method).assert_called_once()
916    for method in methods:
917        getattr(testplan, method).assert_not_called()
918
919
920TESTDATA_7 = [
921    (
922        [
923            mock.Mock(
924                yamlfile='a.yaml',
925                scenarios=['scenario1', 'scenario2']
926            ),
927            mock.Mock(
928                yamlfile='b.yaml',
929                scenarios=['scenario1']
930            )
931        ],
932        TwisterRuntimeError,
933        'Duplicated test scenarios found:\n' \
934        '- scenario1 found in:\n' \
935        '  - a.yaml\n' \
936        '  - b.yaml\n',
937        []
938    ),
939    (
940        [
941            mock.Mock(
942                yamlfile='a.yaml',
943                scenarios=['scenario.a.1', 'scenario.a.2']
944            ),
945            mock.Mock(
946                yamlfile='b.yaml',
947                scenarios=['scenario.b.1']
948            )
949        ],
950        None,
951        None,
952        ['No duplicates found.']
953    ),
954]
955
956@pytest.mark.parametrize(
957    'testsuites, expected_error, error_msg, expected_logs',
958    TESTDATA_7,
959    ids=['a duplicate', 'no duplicates']
960)
961def test_testplan_report_duplicates(
962    capfd,
963    caplog,
964    testsuites,
965    expected_error,
966    error_msg,
967    expected_logs
968):
969    def mock_get(name):
970        return list(filter(lambda x: name in x.scenarios, testsuites))
971
972    testplan = TestPlan(env=mock.Mock())
973    testplan.scenarios = [scenario for testsuite in testsuites \
974                                   for scenario in testsuite.scenarios]
975    testplan.get_testsuite = mock.Mock(side_effect=mock_get)
976
977    with pytest.raises(expected_error) if expected_error is not None else \
978         nullcontext() as err:
979        testplan.report_duplicates()
980
981    if expected_error:
982        assert str(err._excinfo[1]) == error_msg
983
984    assert all([log in caplog.text for log in expected_logs])
985
986
987def test_testplan_report_tag_list(capfd):
988    testplan = TestPlan(env=mock.Mock())
989    testplan.testsuites = {
990        'testsuite0': mock.Mock(tags=set(['tag1', 'tag2'])),
991        'testsuite1': mock.Mock(tags=set(['tag1', 'tag2', 'tag3'])),
992        'testsuite2': mock.Mock(tags=set(['tag1', 'tag3'])),
993        'testsuite3': mock.Mock(tags=set(['tag']))
994    }
995
996    testplan.report_tag_list()
997
998    out,err = capfd.readouterr()
999    sys.stdout.write(out)
1000    sys.stderr.write(err)
1001
1002    assert '- tag' in out
1003    assert '- tag1' in out
1004    assert '- tag2' in out
1005    assert '- tag3' in out
1006
1007
1008def test_testplan_report_test_tree(capfd):
1009    testplan = TestPlan(env=mock.Mock())
1010    testplan.get_tests_list = mock.Mock(
1011        return_value=['1.dummy.case.1', '1.dummy.case.2',
1012                      '2.dummy.case.1', '2.dummy.case.2',
1013                      '3.dummy.case.1', '3.dummy.case.2',
1014                      '4.dummy.case.1', '4.dummy.case.2',
1015                      '5.dummy.case.1', '5.dummy.case.2',
1016                      'sample.group1.case1', 'sample.group1.case2',
1017                      'sample.group2.case', 'sample.group3.case1',
1018                      'sample.group3.case2', 'sample.group3.case3']
1019    )
1020
1021    testplan.report_test_tree()
1022
1023    out,err = capfd.readouterr()
1024    sys.stdout.write(out)
1025    sys.stderr.write(err)
1026
1027    expected = """
1028Testsuite
1029├── Samples
1030│   ├── group1
1031│   │   ├── sample.group1.case1
1032│   │   └── sample.group1.case2
1033│   ├── group2
1034│   │   └── sample.group2.case
1035│   └── group3
1036│       ├── sample.group3.case1
1037│       ├── sample.group3.case2
1038│       └── sample.group3.case3
1039└── Tests
1040    ├── 1
1041    │   └── dummy
1042    │       ├── 1.dummy.case.1
1043    │       └── 1.dummy.case.2
1044    ├── 2
1045    │   └── dummy
1046    │       ├── 2.dummy.case.1
1047    │       └── 2.dummy.case.2
1048    ├── 3
1049    │   └── dummy
1050    │       ├── 3.dummy.case.1
1051    │       └── 3.dummy.case.2
1052    ├── 4
1053    │   └── dummy
1054    │       ├── 4.dummy.case.1
1055    │       └── 4.dummy.case.2
1056    └── 5
1057        └── dummy
1058            ├── 5.dummy.case.1
1059            └── 5.dummy.case.2
1060"""
1061    expected = expected[1:]
1062
1063    assert expected in out
1064
1065
1066def test_testplan_report_test_list(capfd):
1067    testplan = TestPlan(env=mock.Mock())
1068    testplan.get_tests_list = mock.Mock(
1069        return_value=['4.dummy.case.1', '4.dummy.case.2',
1070                      '3.dummy.case.2', '2.dummy.case.2',
1071                      '1.dummy.case.1', '1.dummy.case.2',
1072                      '3.dummy.case.1', '2.dummy.case.1',
1073                      '5.dummy.case.1', '5.dummy.case.2']
1074    )
1075
1076    testplan.report_test_list()
1077
1078    out,err = capfd.readouterr()
1079    sys.stdout.write(out)
1080    sys.stderr.write(err)
1081
1082    assert ' - 1.dummy.case.1\n' \
1083           ' - 1.dummy.case.2\n' \
1084           ' - 2.dummy.case.1\n' \
1085           ' - 2.dummy.case.2\n' \
1086           ' - 3.dummy.case.1\n' \
1087           ' - 3.dummy.case.2\n' \
1088           ' - 4.dummy.case.1\n' \
1089           ' - 4.dummy.case.2\n' \
1090           ' - 5.dummy.case.1\n' \
1091           ' - 5.dummy.case.2\n' \
1092           '10 total.' in out
1093
1094
1095def test_testplan_info(capfd):
1096    TestPlan.info('dummy text')
1097
1098    out, err = capfd.readouterr()
1099    sys.stdout.write(out)
1100    sys.stderr.write(err)
1101
1102    assert 'dummy text\n' in out
1103
1104
1105TESTDATA_8 = [
1106    (False, ['p1e2/unit_testing', 'p2/unit_testing', 'p3/unit_testing'], ['p2/unit_testing', 'p3/unit_testing']),
1107    (True, ['p1e2/unit_testing', 'p2/unit_testing', 'p3/unit_testing'], ['p3/unit_testing']),
1108]
1109
1110@pytest.mark.parametrize(
1111    'override_default_platforms, expected_platform_names, expected_defaults',
1112    TESTDATA_8,
1113    ids=['no override defaults', 'override defaults']
1114)
1115def test_testplan_add_configurations(
1116    tmp_path,
1117    override_default_platforms,
1118    expected_platform_names,
1119    expected_defaults
1120):
1121    env = mock.Mock(board_roots=[tmp_path / 'boards'], soc_roots=[tmp_path], arch_roots=[tmp_path])
1122
1123    testplan = TestPlan(env=env)
1124    testplan.test_config = mock.Mock()
1125    testplan.test_config.override_default_platforms = override_default_platforms
1126    testplan.test_config.default_platforms = ['p3', 'p1e1']
1127
1128    def mock_gen_plat(board_roots, soc_roots, arch_roots):
1129        assert [tmp_path] == board_roots
1130        assert [tmp_path] == soc_roots
1131        assert [tmp_path] == arch_roots
1132
1133        platforms = [
1134            mock.Mock(aliases=['p1e1/unit_testing', 'p1e1'], twister=False, default=False),
1135            mock.Mock(aliases=['p1e2/unit_testing', 'p1e2'], twister=True, default=False),
1136            mock.Mock(aliases=['p2/unit_testing', 'p2'], twister=True, default=True),
1137            mock.Mock(aliases=['p3/unit_testing', 'p3'], twister=True, default=True),
1138        ]
1139        for platform in platforms:
1140            type(platform).name = mock.PropertyMock(return_value=platform.aliases[0])
1141            yield platform
1142
1143    with mock.patch('twisterlib.testplan.generate_platforms', mock_gen_plat):
1144        testplan.add_configurations()
1145
1146    if expected_defaults is not None:
1147        print(expected_defaults)
1148        print(testplan.default_platforms)
1149        assert sorted(expected_defaults) == sorted(testplan.default_platforms)
1150    if expected_platform_names is not None:
1151        print(expected_platform_names)
1152        print(testplan.platform_names)
1153        platform_names = [p.name for p in testplan.platforms]
1154        assert sorted(expected_platform_names) == sorted(platform_names)
1155
1156
1157def test_testplan_get_all_tests():
1158    testplan = TestPlan(env=mock.Mock())
1159    tc1 = mock.Mock()
1160    tc1.name = 'tc1'
1161    tc2 = mock.Mock()
1162    tc2.name = 'tc2'
1163    tc3 = mock.Mock()
1164    tc3.name = 'tc3'
1165    tc4 = mock.Mock()
1166    tc4.name = 'tc4'
1167    tc5 = mock.Mock()
1168    tc5.name = 'tc5'
1169    ts1 = mock.Mock(testcases=[tc1, tc2])
1170    ts2 = mock.Mock(testcases=[tc3, tc4, tc5])
1171    testplan.testsuites = {
1172        'ts1': ts1,
1173        'ts2': ts2
1174    }
1175
1176    res = testplan.get_all_tests()
1177
1178    assert sorted(res) == ['tc1', 'tc2', 'tc3', 'tc4', 'tc5']
1179
1180
1181TESTDATA_9 = [
1182    ([], False, True, 11, 1),
1183    ([], False, False, 7, 2),
1184    ([], True, False, 9, 1),
1185    ([], True, True, 9, 1),
1186    ([], True, False, 9, 1),
1187    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], False, True, 3, 1),
1188    (['good_test/dummy.common.1', 'good_test/dummy.common.2',
1189      'duplicate_test/dummy.common.1', 'duplicate_test/dummy.common.2'], False, True, 4, 1),
1190    (['dummy.common.1', 'dummy.common.2'], False, False, 2, 2),
1191    (['good_test/dummy.common.1', 'good_test/dummy.common.2', 'good_test/dummy.common.3'], True, True, 0, 1),
1192]
1193
1194@pytest.mark.parametrize(
1195    'testsuite_filter, use_alt_root, detailed_id, expected_suite_count, expected_errors',
1196    TESTDATA_9,
1197    ids=[
1198        'no testsuite filter, detailed id',
1199        'no testsuite filter, short id',
1200        'no testsuite filter, alt root, detailed id',
1201        'no filter, alt root, detailed id',
1202        'no filter, alt root, short id',
1203        'testsuite filter',
1204        'testsuite filter and valid duplicate',
1205        'testsuite filter, short id and duplicate',
1206        'testsuite filter, alt root',
1207    ]
1208)
1209def test_testplan_add_testsuites(tmp_path, testsuite_filter, use_alt_root, detailed_id,
1210                                 expected_errors, expected_suite_count):
1211    # tmp_path
1212    # ├ tests  <- test root
1213    # │ ├ good_test
1214    # │ │ └ testcase.yaml
1215    # │ ├ wrong_test
1216    # │ │ └ testcase.yaml
1217    # │ ├ good_sample
1218    # │ │ └ sample.yaml
1219    # │ ├ duplicate_test
1220    # │ │ └ testcase.yaml
1221    # │ └ others
1222    # │   └ other.txt
1223    # └ other_tests  <- alternate test root
1224    #   └ good_test
1225    #     └ testcase.yaml
1226    tmp_test_root_dir = tmp_path / 'tests'
1227    tmp_test_root_dir.mkdir()
1228
1229    tmp_good_test_dir = tmp_test_root_dir / 'good_test'
1230    tmp_good_test_dir.mkdir()
1231    testcase_yaml_1 = """\
1232tests:
1233  dummy.common.1:
1234    build_on_all: true
1235  dummy.common.2:
1236    build_on_all: true
1237  dummy.common.3:
1238    build_on_all: true
1239  dummy.special:
1240    build_on_all: false
1241"""
1242    testfile_1 = tmp_good_test_dir / 'testcase.yaml'
1243    testfile_1.write_text(testcase_yaml_1)
1244
1245    tmp_bad_test_dir = tmp_test_root_dir / 'wrong_test'
1246    tmp_bad_test_dir.mkdir()
1247    testcase_yaml_2 = """\
1248tests:
1249 wrong:
1250  yaml: {]}
1251"""
1252    testfile_2 = tmp_bad_test_dir / 'testcase.yaml'
1253    testfile_2.write_text(testcase_yaml_2)
1254
1255    tmp_good_sample_dir = tmp_test_root_dir / 'good_sample'
1256    tmp_good_sample_dir.mkdir()
1257    samplecase_yaml_1 = """\
1258tests:
1259  sample.dummy.common.1:
1260    tags:
1261    - samples
1262  sample.dummy.common.2:
1263    tags:
1264    - samples
1265  sample.dummy.special.1:
1266    tags:
1267    - samples
1268"""
1269    samplefile_1 = tmp_good_sample_dir / 'sample.yaml'
1270    samplefile_1.write_text(samplecase_yaml_1)
1271
1272    tmp_duplicate_test_dir = tmp_test_root_dir / 'duplicate_test'
1273    tmp_duplicate_test_dir.mkdir()
1274    # The duplicate needs to have the same number of tests as these configurations
1275    # can be read either with duplicate_test first, or good_test first, so number
1276    # of selected tests needs to be the same in both situations.
1277    testcase_yaml_4 = """\
1278tests:
1279  dummy.common.1:
1280    build_on_all: true
1281  dummy.common.2:
1282    build_on_all: true
1283  dummy.common.3:
1284    build_on_all: true
1285  dummy.special:
1286    build_on_all: false
1287"""
1288    testfile_4 = tmp_duplicate_test_dir / 'testcase.yaml'
1289    testfile_4.write_text(testcase_yaml_4)
1290
1291    tmp_other_dir = tmp_test_root_dir / 'others'
1292    tmp_other_dir.mkdir()
1293    _ = tmp_other_dir / 'other.txt'
1294
1295    tmp_alt_test_root_dir = tmp_path / 'other_tests'
1296    tmp_alt_test_root_dir.mkdir()
1297
1298    tmp_alt_good_test_dir = tmp_alt_test_root_dir / 'good_test'
1299    tmp_alt_good_test_dir.mkdir()
1300    testcase_yaml_3 = """\
1301tests:
1302  dummy.alt.1:
1303    build_on_all: true
1304  dummy.alt.2:
1305    build_on_all: true
1306"""
1307    testfile_3 = tmp_alt_good_test_dir / 'testcase.yaml'
1308    testfile_3.write_text(testcase_yaml_3)
1309
1310    env = mock.Mock(
1311        test_roots=[tmp_test_root_dir],
1312        options=mock.Mock(detailed_test_id=detailed_id),
1313        alt_config_root=[tmp_alt_test_root_dir] if use_alt_root else []
1314    )
1315
1316    testplan = TestPlan(env=env)
1317
1318    res = testplan.add_testsuites(testsuite_filter, testsuite_pattern=[])
1319
1320    assert res == expected_suite_count
1321    assert testplan.load_errors == expected_errors
1322
1323
1324def test_testplan_str():
1325    testplan = TestPlan(env=mock.Mock())
1326    testplan.name = 'my name'
1327
1328    res = testplan.__str__()
1329
1330    assert res == 'my name'
1331
1332
1333TESTDATA_10 = [
1334    ('a platform', True),
1335    ('other platform', False),
1336]
1337
1338@pytest.mark.parametrize(
1339    'name, expect_found',
1340    TESTDATA_10,
1341    ids=['platform exists', 'no platform']
1342)
1343def test_testplan_get_platform(name, expect_found):
1344    testplan = TestPlan(env=mock.Mock())
1345    p1 = mock.Mock()
1346    p1.name = 'some platform'
1347    p1.aliases = [p1.name]
1348    p2 = mock.Mock()
1349    p2.name = 'a platform'
1350    p2.aliases = [p2.name]
1351    testplan.platforms = [p1, p2]
1352
1353    res = testplan.get_platform(name)
1354
1355    if expect_found:
1356        assert res.name == name
1357    else:
1358        assert res is None
1359
1360
1361TESTDATA_11 = [
1362    (True, 'runnable'),
1363    (False, 'buildable'),
1364]
1365
1366@pytest.mark.parametrize(
1367    'device_testing, expected_tfilter',
1368    TESTDATA_11,
1369    ids=['device testing', 'no device testing']
1370)
1371def test_testplan_load_from_file(caplog, device_testing, expected_tfilter):
1372    def get_platform(name):
1373        p = mock.Mock()
1374        p.name = name
1375        p.normalized_name = name
1376        return p
1377
1378    ts1tc1 = mock.Mock()
1379    ts1tc1.name = 'TS1.tc1'
1380    ts1 = mock.Mock(testcases=[ts1tc1])
1381    ts1.name = 'TestSuite 1'
1382    ts1.toolchain = 'zephyr'
1383    ts2 = mock.Mock(testcases=[])
1384    ts2.name = 'TestSuite 2'
1385    ts2.toolchain = 'zephyr'
1386    ts3tc1 = mock.Mock()
1387    ts3tc1.name = 'TS3.tc1'
1388    ts3tc2 = mock.Mock()
1389    ts3tc2.name = 'TS3.tc2'
1390    ts3 = mock.Mock(testcases=[ts3tc1, ts3tc2])
1391    ts3.name = 'TestSuite 3'
1392    ts3.toolchain = 'zephyr'
1393    ts4tc1 = mock.Mock()
1394    ts4tc1.name = 'TS4.tc1'
1395    ts4 = mock.Mock(testcases=[ts4tc1])
1396    ts4.name = 'TestSuite 4'
1397    ts4.toolchain = 'zephyr'
1398    ts5 = mock.Mock(testcases=[])
1399    ts5.name = 'TestSuite 5'
1400    ts5.toolchain = 'zephyr'
1401
1402    testplan = TestPlan(env=mock.Mock(outdir=os.path.join('out', 'dir')))
1403    testplan.options = mock.Mock(device_testing=device_testing, test_only=True, report_summary=None)
1404    testplan.testsuites = {
1405        'TestSuite 1': ts1,
1406        'TestSuite 2': ts2,
1407        'TestSuite 3': ts3,
1408        'TestSuite 4': ts4,
1409        'TestSuite 5': ts5
1410    }
1411
1412    testplan.get_platform = mock.Mock(side_effect=get_platform)
1413
1414    testplan_data = """\
1415{
1416    "testsuites": [
1417        {
1418            "name": "TestSuite 1",
1419            "platform": "Platform 1",
1420            "run_id": 1,
1421            "execution_time": 60.00,
1422            "used_ram": 4096,
1423            "available_ram": 12278,
1424            "used_rom": 1024,
1425            "available_rom": 1047552,
1426            "status": "passed",
1427            "toolchain": "zephyr",
1428            "reason": "OK",
1429            "testcases": [
1430                {
1431                    "identifier": "TS1.tc1",
1432                    "status": "passed",
1433                    "reason": "passed",
1434                    "execution_time": 60.00,
1435                    "log": ""
1436                }
1437            ]
1438        },
1439        {
1440            "name": "TestSuite 2",
1441            "platform": "Platform 1",
1442            "toolchain": "zephyr"
1443        },
1444        {
1445            "name": "TestSuite 3",
1446            "platform": "Platform 1",
1447            "run_id": 1,
1448            "execution_time": 360.00,
1449            "used_ram": 4096,
1450            "available_ram": 12278,
1451            "used_rom": 1024,
1452            "available_rom": 1047552,
1453            "status": "error",
1454            "toolchain": "zephyr",
1455            "reason": "File Not Found Error",
1456            "testcases": [
1457                {
1458                    "identifier": "TS3.tc1",
1459                    "status": "error",
1460                    "reason": "File Not Found Error.",
1461                    "execution_time": 360.00,
1462                    "log": "[ERROR]: File 'dummy.yaml' not found!\\nClosing..."
1463                },
1464                {
1465                    "identifier": "TS3.tc2"
1466                }
1467            ]
1468        },
1469        {
1470            "name": "TestSuite 4",
1471            "platform": "Platform 1",
1472            "execution_time": 360.00,
1473            "used_ram": 4096,
1474            "available_ram": 12278,
1475            "used_rom": 1024,
1476            "available_rom": 1047552,
1477            "status": "skipped",
1478            "toolchain": "zephyr",
1479            "reason": "Not in requested test list.",
1480            "testcases": [
1481                {
1482                    "identifier": "TS4.tc1",
1483                    "status": "skipped",
1484                    "reason": "Not in requested test list.",
1485                    "execution_time": 360.00,
1486                    "log": "[INFO] Parsing..."
1487                },
1488                {
1489                    "identifier": "TS3.tc2"
1490                }
1491            ]
1492        },
1493        {
1494            "name": "TestSuite 5",
1495            "platform": "Platform 2",
1496            "toolchain": "zephyr"
1497        }
1498    ]
1499}
1500"""
1501
1502    filter_platform = ['Platform 1']
1503
1504    check_runnable_mock = mock.Mock(return_value=True)
1505
1506    with mock.patch('builtins.open', mock.mock_open(read_data=testplan_data)), \
1507         mock.patch('twisterlib.testinstance.TestInstance.check_runnable', check_runnable_mock), \
1508         mock.patch('twisterlib.testinstance.TestInstance.create_overlay', mock.Mock()):
1509        testplan.load_from_file('dummy.yaml', filter_platform)
1510
1511    expected_instances = {
1512        'Platform 1/zephyr/TestSuite 1': {
1513            'metrics': {
1514                'handler_time': 60.0,
1515                'used_ram': 4096,
1516                'used_rom': 1024,
1517                'available_ram': 12278,
1518                'available_rom': 1047552
1519            },
1520            'retries': 0,
1521            'toolchain': 'zephyr',
1522            'testcases': {
1523                'TS1.tc1': {
1524                    'status': TwisterStatus.PASS,
1525                    'reason': 'passed',
1526                    'duration': 60.0,
1527                    'output': ''
1528                }
1529            }
1530        },
1531        'Platform 1/zephyr/TestSuite 2': {
1532            'metrics': {
1533                'handler_time': 0,
1534                'used_ram': 0,
1535                'used_rom': 0,
1536                'available_ram': 0,
1537                'available_rom': 0
1538            },
1539            'retries': 0,
1540            'toolchain': 'zephyr',
1541            'testcases': []
1542        },
1543        'Platform 1/zephyr/TestSuite 3': {
1544            'metrics': {
1545                'handler_time': 360.0,
1546                'used_ram': 4096,
1547                'used_rom': 1024,
1548                'available_ram': 12278,
1549                'available_rom': 1047552
1550            },
1551            'retries': 1,
1552            'toolchain': 'zephyr',
1553            'testcases': {
1554                    'TS3.tc1': {
1555                        'status': TwisterStatus.ERROR,
1556                        'reason': None,
1557                        'duration': 360.0,
1558                        'output': '[ERROR]: File \'dummy.yaml\' not found!\nClosing...'
1559                    },
1560                    'TS3.tc2': {
1561                        'status': TwisterStatus.NONE,
1562                        'reason': None,
1563                        'duration': 0,
1564                        'output': ''
1565                    }
1566            }
1567        },
1568        'Platform 1/zephyr/TestSuite 4': {
1569            'metrics': {
1570                'handler_time': 360.0,
1571                'used_ram': 4096,
1572                'used_rom': 1024,
1573                'available_ram': 12278,
1574                'available_rom': 1047552
1575            },
1576            'retries': 0,
1577            'toolchain': 'zephyr',
1578            'testcases': {
1579                'TS4.tc1': {
1580                    'status': TwisterStatus.SKIP,
1581                    'reason': 'Not in requested test list.',
1582                    'duration': 360.0,
1583                    'output': '[INFO] Parsing...'
1584                }
1585            }
1586        },
1587    }
1588
1589    for n, i in testplan.instances.items():
1590        assert expected_instances[n]['metrics'] == i.metrics
1591        assert expected_instances[n]['retries'] == i.retries
1592        for t in i.testcases:
1593            assert expected_instances[n]['testcases'][str(t)]['status'] == t.status
1594            assert expected_instances[n]['testcases'][str(t)]['reason'] == t.reason
1595            assert expected_instances[n]['testcases'][str(t)]['duration'] == t.duration
1596            assert expected_instances[n]['testcases'][str(t)]['output'] == t.output
1597
1598    check_runnable_mock.assert_called_with(mock.ANY, mock.ANY)
1599
1600    expected_logs = [
1601        'loading TestSuite 1...',
1602        'loading TestSuite 2...',
1603        'loading TestSuite 3...',
1604        'loading TestSuite 4...',
1605    ]
1606    assert all([log in caplog.text for log in expected_logs])
1607
1608
1609def test_testplan_add_instances():
1610    testplan = TestPlan(env=mock.Mock())
1611    instance1 = mock.Mock()
1612    instance1.name = 'instance 1'
1613    instance2 = mock.Mock()
1614    instance2.name = 'instance 2'
1615    instance_list = [instance1, instance2]
1616
1617    testplan.add_instances(instance_list)
1618
1619    assert testplan.instances == {
1620        'instance 1': instance1,
1621        'instance 2': instance2,
1622    }
1623
1624
1625def test_testplan_get_testcase():
1626    testplan = TestPlan(env=mock.Mock())
1627    testplan.testsuites = {
1628        'test1.suite0': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1629        'test1.suite1': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1630        'test1.suite2': mock.Mock(testcases=[mock.Mock(), mock.Mock()]),
1631        'test1.suite3': mock.Mock(testcases=[])
1632    }
1633
1634    testplan.testsuites['test1.suite0'].testcases[0].name = 'test1.suite0.case0'
1635    testplan.testsuites['test1.suite0'].testcases[1].name = 'test1.suite0.case1'
1636    #
1637    testplan.testsuites['test1.suite1'].testcases[0].name = 'test1.suite1.case0'
1638    testplan.testsuites['test1.suite1'].testcases[1].name = 'test1.suite1.case0'  # in suite duplicate
1639    #
1640    testplan.testsuites['test1.suite2'].testcases[0].name = 'test1.suite2.case0'
1641    testplan.testsuites['test1.suite2'].testcases[1].name = 'test1.suite1.case0'  # out suite duplicate
1642
1643    id = 'test1.suite1.case0'
1644
1645    res = testplan.get_testcase(id)
1646
1647    assert len(res) == 3
1648    assert testplan.testsuites['test1.suite1'] in res
1649    assert testplan.testsuites['test1.suite2'] in res
1650
1651
1652def test_testplan_verify_platforms_existence(caplog):
1653    testplan = TestPlan(env=mock.Mock())
1654    testplan.platform_names = ['a platform', 'other platform']
1655
1656    platform_names = ['other platform', 'some platform']
1657    log_info = 'PLATFORM ERROR'
1658
1659    with pytest.raises(SystemExit) as se:
1660        testplan.verify_platforms_existence(platform_names, log_info)
1661
1662    assert str(se.value) == '2'
1663    assert 'PLATFORM ERROR - unrecognized platform - some platform'
1664
1665
1666TESTDATA_12 = [
1667    (True),
1668    (False)
1669]
1670
1671@pytest.mark.parametrize(
1672    'exists',
1673    TESTDATA_12,
1674    ids=['links dir exists', 'links dir does not exist']
1675)
1676def test_testplan_create_build_dir_links(exists):
1677    outdir = os.path.join('out', 'dir')
1678    instances_linked = []
1679
1680    def mock_link(links_dir_path, instance):
1681        assert links_dir_path == os.path.join(outdir, 'twister_links')
1682        instances_linked.append(instance)
1683
1684    instances = {
1685        'inst0': mock.Mock(status=TwisterStatus.PASS),
1686        'inst1': mock.Mock(status=TwisterStatus.SKIP),
1687        'inst2': mock.Mock(status=TwisterStatus.ERROR),
1688    }
1689    expected_instances = [instances['inst0'], instances['inst2']]
1690
1691    testplan = TestPlan(env=mock.Mock(outdir=outdir))
1692    testplan._create_build_dir_link = mock.Mock(side_effect=mock_link)
1693    testplan.instances = instances
1694
1695    with mock.patch('os.path.exists', return_value=exists), \
1696         mock.patch('os.mkdir', mock.Mock()) as mkdir_mock:
1697        testplan.create_build_dir_links()
1698
1699    if not exists:
1700        mkdir_mock.assert_called_once()
1701
1702    assert expected_instances == instances_linked
1703
1704
1705TESTDATA_13 = [
1706    ('nt'),
1707    ('Linux')
1708]
1709
1710@pytest.mark.parametrize(
1711    'os_name',
1712    TESTDATA_13,
1713)
1714def test_testplan_create_build_dir_link(os_name):
1715    def mock_makedirs(path, exist_ok=False):
1716        assert exist_ok
1717        assert path == instance_build_dir
1718
1719    def mock_symlink(source, target):
1720        assert source == instance_build_dir
1721        assert target == os.path.join('links', 'path', 'test_0')
1722
1723    def mock_call(cmd, shell=False):
1724        assert shell
1725        assert cmd == ['mklink', '/J', os.path.join('links', 'path', 'test_0'),
1726                       instance_build_dir]
1727
1728    def mock_join(*paths):
1729        slash = "\\" if os.name == 'nt' else "/"
1730        return slash.join(paths)
1731
1732    with mock.patch('os.name', os_name), \
1733         mock.patch('os.symlink', side_effect=mock_symlink), \
1734         mock.patch('os.makedirs', side_effect=mock_makedirs), \
1735         mock.patch('subprocess.call', side_effect=mock_call), \
1736         mock.patch('os.path.join', side_effect=mock_join):
1737
1738        testplan = TestPlan(env=mock.Mock())
1739        links_dir_path = os.path.join('links', 'path')
1740        instance_build_dir = os.path.join('some', 'far', 'off', 'build', 'dir')
1741        instance = mock.Mock(build_dir=instance_build_dir)
1742        testplan._create_build_dir_link(links_dir_path, instance)
1743
1744        assert instance.build_dir == os.path.join('links', 'path', 'test_0')
1745        assert testplan.link_dir_counter == 1
1746
1747
1748TESTDATA_14 = [
1749    ('bad platform', 'dummy reason', [],
1750     'dummy status', 'dummy reason'),
1751    ('good platform', 'quarantined', [],
1752     'dummy status', 'quarantined'),
1753    ('good platform', 'dummy reason', [{'type': 'command line filter'}],
1754     'dummy status', 'dummy reason'),
1755    ('good platform', 'dummy reason', [{'type': 'Skip filter'}],
1756     'dummy status', 'dummy reason'),
1757    ('good platform', 'dummy reason', [{'type': 'platform key filter'}],
1758     'dummy status', 'dummy reason'),
1759    ('good platform', 'dummy reason', [{'type': 'Toolchain filter'}],
1760     'dummy status', 'dummy reason'),
1761    ('good platform', 'dummy reason', [{'type': 'Module filter'}],
1762     'dummy status', 'dummy reason'),
1763    ('good platform', 'dummy reason', [{'type': 'testsuite filter'}],
1764     TwisterStatus.ERROR, 'dummy reason but is one of the integration platforms'),
1765]
1766
1767@pytest.mark.parametrize(
1768    'platform_name, reason, filters,' \
1769    ' expected_status, expected_reason',
1770    TESTDATA_14,
1771    ids=['wrong platform', 'quarantined', 'command line filtered',
1772         'skip filtered', 'platform key filtered', 'toolchain filtered',
1773         'module filtered', 'skip to error change']
1774)
1775def test_change_skip_to_error_if_integration(
1776    platform_name,
1777    reason,
1778    filters,
1779    expected_status,
1780    expected_reason
1781):
1782    options = mock.Mock()
1783    platform = mock.Mock()
1784    platform.name = platform_name
1785    testsuite = mock.Mock(integration_platforms=['good platform', 'a platform'])
1786    instance = mock.Mock(
1787        testsuite=testsuite,
1788        platform=platform,
1789        filters=filters,
1790        status='dummy status',
1791        reason=reason
1792    )
1793
1794    change_skip_to_error_if_integration(options, instance)
1795
1796    assert instance.status == expected_status
1797    assert instance.reason == expected_reason
1798