1#!/usr/bin/env python3
2# Copyright (c) 2023-2024 Intel Corporation
3#
4# SPDX-License-Identifier: Apache-2.0
5"""
6Blackbox tests for twister's command line functions
7"""
8
9import importlib
10import json
11from unittest import mock
12import os
13import pytest
14import shutil
15import sys
16import re
17import xml.etree.ElementTree as etree
18
19# pylint: disable=no-name-in-module
20from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock, clear_log_in_test
21from twisterlib.statuses import TwisterStatus
22from twisterlib.testplan import TestPlan
23
24
25@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
26class TestReport:
27    TESTDATA_1 = [
28        (
29            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
30            ['qemu_x86/atom', 'mps2/an385'],
31            [
32                'qemu_x86_atom.xml', 'mps2_an385.xml',
33                'testplan.json', 'twister.json',
34                'twister.log', 'twister_report.xml',
35                'twister_suite_report.xml', 'twister.xml'
36            ]
37        ),
38    ]
39    TESTDATA_2 = [
40        (
41            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
42            ['qemu_x86/atom', 'mps2/an385'],
43            [
44                'mps2_an385_TEST.xml', 'qemu_x86_atom_TEST.xml',
45                'twister_TEST.json', 'twister_TEST_report.xml',
46                'twister_TEST_suite_report.xml', 'twister_TEST.xml'
47            ]
48        ),
49    ]
50    TESTDATA_3 = [
51        (
52            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
53            ['qemu_x86/atom', 'mps2/an385'],
54            ['--report-name', 'abcd'],
55            [
56                'abcd.json', 'abcd_report.xml',
57                'abcd_suite_report.xml', 'abcd.xml'
58            ]
59        ),
60        (
61            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
62            ['qemu_x86/atom', 'mps2/an385'],
63            ['--report-name', '1234', '--platform-reports'],
64            [
65                'mps2_an385.xml', 'qemu_x86_atom.xml',
66                '1234.json', '1234_report.xml',
67                '1234_suite_report.xml', '1234.xml'
68            ]
69        ),
70        (
71            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
72            ['qemu_x86/atom', 'mps2/an385'],
73            ['--report-name', 'Final', '--platform-reports', '--report-suffix=Test'],
74            [
75                'mps2_an385_Test.xml', 'qemu_x86_atom_Test.xml',
76                'Final_Test.json', 'Final_Test_report.xml',
77                'Final_Test_suite_report.xml', 'Final_Test.xml'
78            ]
79        ),
80    ]
81    TESTDATA_4 = [
82        (
83            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
84            ['qemu_x86/atom'],
85            [
86                'twister.json', 'twister_report.xml',
87                'twister_suite_report.xml', 'twister.xml'
88            ],
89            "TEST_DIR"
90        ),
91    ]
92    TESTDATA_5 = [
93        (
94            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
95            ['qemu_x86/atom'],
96            [
97                'testplan.json', 'twister.log',
98                'twister.json', 'twister_report.xml',
99                'twister_suite_report.xml', 'twister.xml'
100            ],
101            "OUT_DIR"
102        ),
103    ]
104    TESTDATA_6 = [
105        (
106            os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
107            ['qemu_x86/atom'],
108            "TEST_LOG_FILE.log"
109        ),
110    ]
111    TESTDATA_7 = [
112        (
113            os.path.join(TEST_DATA, 'tests', 'one_fail_two_error_one_pass'),
114            ['qemu_x86/atom'],
115            [r'one_fail_two_error_one_pass.agnostic.group1.subgroup2 on qemu_x86/atom FAILED \(.*\)',
116            r'one_fail_two_error_one_pass.agnostic.group1.subgroup3 on qemu_x86/atom ERROR \(Build failure.*\)',
117            r'one_fail_two_error_one_pass.agnostic.group1.subgroup4 on qemu_x86/atom ERROR \(Build failure.*\)'],
118        )
119    ]
120
121    @classmethod
122    def setup_class(cls):
123        apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
124        cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
125        cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
126        cls.twister_module = importlib.util.module_from_spec(cls.spec)
127
128    @classmethod
129    def teardown_class(cls):
130        pass
131
132    @pytest.mark.parametrize(
133        'test_path, test_platforms, file_name',
134        TESTDATA_1,
135        ids=[
136            'platform_reports'
137        ]
138    )
139    def test_platform_reports(self, capfd, out_path, test_path, test_platforms, file_name):
140        args = ['-i', '--outdir', out_path, '-T', test_path, '--platform-reports'] + \
141               [val for pair in zip(
142                   ['-p'] * len(test_platforms), test_platforms
143               ) for val in pair]
144
145        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
146                pytest.raises(SystemExit) as sys_exit:
147            self.loader.exec_module(self.twister_module)
148
149        out, err = capfd.readouterr()
150        sys.stdout.write(out)
151        sys.stderr.write(err)
152
153        for f_name in file_name:
154            path = os.path.join(out_path, f_name)
155            assert os.path.exists(path), 'file not found'
156
157            if path.endswith(".json"):
158                with open(path, "r") as json_file:
159                    data = json.load(json_file)
160                    assert data, f"JSON file '{path}' is empty"
161
162            elif path.endswith(".xml"):
163                tree = etree.parse(path)
164                xml_text = etree.tostring(tree.getroot(), encoding="unicode")
165                assert xml_text.strip(), f"XML file '{path}' is empty"
166
167            elif path.endswith(".log"):
168                with open(path, "r") as log_file:
169                    text_content = log_file.read()
170                    assert text_content.strip(), f"LOG file '{path}' is empty"
171
172            else:
173                pytest.fail(f"Unsupported file type: '{path}'")
174
175        for f_platform in test_platforms:
176            platform_path = os.path.join(out_path, f_platform.replace("/", "_") + ".json", )
177            assert os.path.exists(platform_path), f'file not found {f_platform}'
178
179        assert str(sys_exit.value) == '0'
180
181    @pytest.mark.parametrize(
182        'test_path, test_platforms, file_name',
183        TESTDATA_2,
184        ids=[
185            'report_suffix',
186        ]
187    )
188    def test_report_suffix(self, capfd, out_path, test_path, test_platforms, file_name):
189        args = ['-i', '--outdir', out_path, '-T', test_path, '--platform-reports', '--report-suffix=TEST'] + \
190               [val for pair in zip(
191                   ['-p'] * len(test_platforms), test_platforms
192               ) for val in pair]
193
194        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
195                pytest.raises(SystemExit) as sys_exit:
196            self.loader.exec_module(self.twister_module)
197
198        out, err = capfd.readouterr()
199        sys.stdout.write(out)
200        sys.stderr.write(err)
201
202        for f_name in file_name:
203            path = os.path.join(out_path, f_name)
204            assert os.path.exists(path), f'file not found {f_name}'
205
206        assert str(sys_exit.value) == '0'
207
208    @pytest.mark.parametrize(
209        'test_path, test_platforms, report_arg, file_name',
210        TESTDATA_3,
211        ids=[
212            'only_report_name',
213            'report_name + platform_reports',
214            'report-name + platform-reports + report-suffix'
215        ]
216    )
217    def test_report_name(self, capfd, out_path, test_path, test_platforms, report_arg, file_name):
218        args = ['-i', '--outdir', out_path, '-T', test_path] + \
219               [val for pair in zip(
220                   ['-p'] * len(test_platforms), test_platforms
221               ) for val in pair] + \
222               [val for pair in zip(
223                   report_arg
224               ) for val in pair]
225
226        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
227                pytest.raises(SystemExit) as sys_exit:
228            self.loader.exec_module(self.twister_module)
229
230        out, err = capfd.readouterr()
231        sys.stdout.write(out)
232        sys.stderr.write(err)
233
234        for f_name in file_name:
235            path = os.path.join(out_path, f_name)
236            assert os.path.exists(path), f'file not found {f_name}'
237
238        assert str(sys_exit.value) == '0'
239
240    @pytest.mark.parametrize(
241        'test_path, test_platforms, file_name, dir_name',
242        TESTDATA_4,
243        ids=[
244            'report_dir',
245        ]
246    )
247    def test_report_dir(self, capfd, out_path, test_path, test_platforms, file_name, dir_name):
248        args = ['-i', '--outdir', out_path, '-T', test_path, "--report-dir", dir_name] + \
249               [val for pair in zip(
250                   ['-p'] * len(test_platforms), test_platforms
251               ) for val in pair]
252
253        twister_path = os.path.join(ZEPHYR_BASE, dir_name)
254        if os.path.exists(twister_path):
255            shutil.rmtree(twister_path)
256
257        try:
258            with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
259                    pytest.raises(SystemExit) as sys_exit:
260                self.loader.exec_module(self.twister_module)
261
262            out, err = capfd.readouterr()
263            sys.stdout.write(out)
264            sys.stderr.write(err)
265
266            for f_name in file_name:
267                path = os.path.join(twister_path, f_name)
268                assert os.path.exists(path), f'file not found {f_name}'
269
270            assert str(sys_exit.value) == '0'
271        finally:
272            twister_path = os.path.join(ZEPHYR_BASE, dir_name)
273            if os.path.exists(twister_path):
274                shutil.rmtree(twister_path)
275
276    @pytest.mark.noclearout
277    @pytest.mark.parametrize(
278        'test_path, test_platforms, file_name, dir_name',
279        TESTDATA_5,
280        ids=[
281            'outdir',
282        ]
283    )
284    def test_outdir(self, capfd, test_path, test_platforms, file_name, dir_name):
285        args = ['-i', '-T', test_path, "--outdir", dir_name] + \
286               [val for pair in zip(
287                   ['-p'] * len(test_platforms), test_platforms
288               ) for val in pair]
289
290        twister_path = os.path.join(ZEPHYR_BASE, dir_name)
291        if os.path.exists(twister_path):
292            shutil.rmtree(twister_path)
293
294        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
295                pytest.raises(SystemExit) as sys_exit:
296            self.loader.exec_module(self.twister_module)
297
298        out, err = capfd.readouterr()
299        sys.stdout.write(out)
300        sys.stderr.write(err)
301
302        try:
303            for f_name in file_name:
304                path = os.path.join(twister_path, f_name)
305                assert os.path.exists(path), 'file not found {f_name}'
306
307            for f_platform in test_platforms:
308                platform_path = os.path.join(twister_path, f_platform.replace("/", "_"))
309                assert os.path.exists(platform_path), f'file not found {f_platform}'
310
311            assert str(sys_exit.value) == '0'
312        finally:
313            twister_path = os.path.join(ZEPHYR_BASE, dir_name)
314            if os.path.exists(twister_path):
315                shutil.rmtree(twister_path)
316
317    @pytest.mark.parametrize(
318        'test_path, test_platforms, file_name',
319        TESTDATA_6,
320        ids=[
321            'log_file',
322        ]
323    )
324    def test_log_file(self, capfd, test_path, test_platforms, out_path, file_name):
325        args = ['-i','--outdir', out_path, '-T', test_path, "--log-file", file_name] + \
326               [val for pair in zip(
327                   ['-p'] * len(test_platforms), test_platforms
328               ) for val in pair]
329
330        file_path = os.path.join(ZEPHYR_BASE, file_name)
331        if os.path.exists(file_path):
332            os.remove(file_path)
333
334        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
335                pytest.raises(SystemExit) as sys_exit:
336            self.loader.exec_module(self.twister_module)
337
338        out, err = capfd.readouterr()
339        sys.stdout.write(out)
340        sys.stderr.write(err)
341
342        assert os.path.exists(file_path), 'file not found {f_name}'
343
344        assert str(sys_exit.value) == '0'
345
346    @pytest.mark.parametrize(
347        'test_path, flags, expected_testcase_counts',
348        [
349            (
350                os.path.join(TEST_DATA, 'tests', 'dummy'),
351                ['--detailed-skipped-report'],
352                {'qemu_x86/atom': 6, 'intel_adl_crb/alder_lake': 1}
353            ),
354            (
355                os.path.join(TEST_DATA, 'tests', 'dummy'),
356                ['--detailed-skipped-report', '--report-filtered'],
357                {'qemu_x86/atom': 13, 'intel_adl_crb/alder_lake': 13}
358            ),
359        ],
360        ids=['dummy tests', 'dummy tests with filtered']
361    )
362    def test_detailed_skipped_report(self, out_path, test_path, flags, expected_testcase_counts):
363        test_platforms = ['qemu_x86/atom', 'intel_adl_crb/alder_lake']
364        args = ['-i', '--outdir', out_path, '-T', test_path] + \
365               flags + \
366               [val for pair in zip(
367                   ['-p'] * len(test_platforms), test_platforms
368               ) for val in pair]
369
370        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
371                pytest.raises(SystemExit) as sys_exit:
372            self.loader.exec_module(self.twister_module)
373
374        assert str(sys_exit.value) == '0'
375
376        testsuite_counter = 0
377        xml_data = etree.parse(os.path.join(out_path, 'twister_report.xml')).getroot()
378        for ts in xml_data.iter('testsuite'):
379            testsuite_counter += 1
380            # Without the tested flag, filtered testcases would be missing from the report
381            testcase_count = len(list(ts.iter('testcase')))
382            expected_tc_count = expected_testcase_counts[ts.get('name')]
383            assert testcase_count == expected_tc_count, \
384                   f'Not all expected testcases appear in the report.' \
385                   f' (In {ts.get("name")}, expected {expected_tc_count}, got {testcase_count}.)'
386
387        assert testsuite_counter == len(test_platforms), \
388               'Some platforms are missing from the XML report.'
389
390    @pytest.mark.parametrize(
391        'test_path, report_filtered, expected_filtered_count',
392        [
393            (os.path.join(TEST_DATA, 'tests', 'dummy'), False, 0),
394            (os.path.join(TEST_DATA, 'tests', 'dummy'), True, 10),
395        ],
396        ids=['no filtered', 'with filtered']
397    )
398    def test_report_filtered(self, out_path, test_path, report_filtered, expected_filtered_count):
399        test_platforms = ['qemu_x86', 'intel_adl_crb']
400        args = ['-i', '--outdir', out_path, '-T', test_path] + \
401               (['--report-filtered'] if report_filtered else []) + \
402               [val for pair in zip(
403                   ['-p'] * len(test_platforms), test_platforms
404               ) for val in pair]
405
406        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
407                pytest.raises(SystemExit) as sys_exit:
408            self.loader.exec_module(self.twister_module)
409
410        assert str(sys_exit.value) == '0'
411
412        with open(os.path.join(out_path, 'twister.json')) as f:
413            j = json.load(f)
414
415        testsuites = j.get('testsuites')
416        assert testsuites, 'No testsuites found.'
417        statuses = [TwisterStatus(testsuite.get('status')) for testsuite in testsuites]
418        filtered_status_count = statuses.count("filtered")
419        assert filtered_status_count == expected_filtered_count, \
420            f'Expected {expected_filtered_count} filtered statuses, got {filtered_status_count}.'
421
422    def test_enable_size_report(self, out_path):
423        test_platforms = ['qemu_x86', 'intel_adl_crb']
424        path = os.path.join(TEST_DATA, 'tests', 'dummy', 'device', 'group')
425        args = ['-i', '--outdir', out_path, '-T', path] + \
426               ['--enable-size-report'] + \
427               [val for pair in zip(
428                   ['-p'] * len(test_platforms), test_platforms
429               ) for val in pair]
430
431        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
432                pytest.raises(SystemExit) as sys_exit:
433            self.loader.exec_module(self.twister_module)
434
435        assert str(sys_exit.value) == '0'
436
437        with open(os.path.join(out_path, 'twister.json')) as f:
438            j = json.load(f)
439
440        expected_rel_path = os.path.relpath(os.path.join(path, 'dummy.device.group'), ZEPHYR_BASE)
441
442        # twister.json will contain [used/available]_[ram/rom] keys if the flag works
443        # except for those keys that would have values of 0.
444        # In this testcase, availables are equal to 0, so they are missing.
445        assert all(
446            [
447                'used_ram' in ts for ts in j['testsuites'] \
448                if ts['name'] == expected_rel_path and not 'reason' in ts
449            ]
450        )
451        assert all(
452            [
453                'used_rom' in ts for ts in j['testsuites'] \
454                if ts['name'] == expected_rel_path and not 'reason' in ts
455            ]
456        )
457
458    @pytest.mark.parametrize(
459        'test_path, test_platforms, expected_content',
460        TESTDATA_7,
461        ids=[
462            'Report summary test'
463        ]
464    )
465
466    def test_report_summary(self, out_path, capfd, test_path, test_platforms, expected_content):
467        args = ['-i', '--outdir', out_path, '-T', test_path] + \
468               [val for pair in zip(
469                   ['-p'] * len(test_platforms), test_platforms
470               ) for val in pair]
471
472        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
473                pytest.raises(SystemExit) as sys_exit:
474            self.loader.exec_module(self.twister_module)
475
476        assert str(sys_exit.value) == '1'
477
478        capfd.readouterr()
479
480        clear_log_in_test()
481
482        args += ['--report-summary']
483
484        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
485                pytest.raises(SystemExit) as sys_exit:
486            self.loader.exec_module(self.twister_module)
487
488        out, err = capfd.readouterr()
489        sys.stdout.write(out)
490        sys.stderr.write(err)
491
492        for line in expected_content:
493            result = re.search(line, err)
494            assert result, f'missing information in log: {line}'
495
496        capfd.readouterr()
497
498        clear_log_in_test()
499
500        args = ['-i', '--outdir', out_path, '-T', test_path] + \
501               ['--report-summary', '2'] + \
502               [val for pair in zip(
503                   ['-p'] * len(test_platforms), test_platforms
504               ) for val in pair]
505
506        with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
507                pytest.raises(SystemExit) as sys_exit:
508            self.loader.exec_module(self.twister_module)
509
510        out, err = capfd.readouterr()
511        sys.stdout.write(out)
512        sys.stderr.write(err)
513
514        lines=0
515        for line in expected_content:
516            result = re.search(line, err)
517            if result: lines += 1
518        assert lines == 2, f'too many or too few lines'
519