1#!/usr/bin/env python3 2# vim: set syntax=python ts=4 : 3# 4# Copyright (c) 2018 Intel Corporation 5# SPDX-License-Identifier: Apache-2.0 6 7import json 8import logging 9import os 10import string 11import xml.etree.ElementTree as ET 12from datetime import datetime 13from enum import Enum 14from pathlib import Path 15 16from colorama import Fore 17from twisterlib.statuses import TwisterStatus 18 19logger = logging.getLogger('twister') 20 21 22class ReportStatus(str, Enum): 23 def __str__(self): 24 return str(self.value) 25 26 ERROR = 'error' 27 FAIL = 'failure' 28 SKIP = 'skipped' 29 30 31class ReportingJSONEncoder(json.JSONEncoder): 32 def default(self, obj): 33 if isinstance(obj, Path): 34 return str(obj) 35 return super().default(obj) 36 37 38class Reporting: 39 40 json_filters = { 41 'twister.json': { 42 'deny_suite': ['footprint'] 43 }, 44 'footprint.json': { 45 'deny_status': ['FILTER'], 46 'deny_suite': ['testcases', 'execution_time', 'recording', 'retries', 'runnable'] 47 } 48 } 49 50 def __init__(self, plan, env) -> None: 51 self.plan = plan #FIXME 52 self.instances = plan.instances 53 self.platforms = plan.platforms 54 self.selected_platforms = plan.selected_platforms 55 self.env = env 56 self.timestamp = datetime.now().isoformat() 57 self.outdir = os.path.abspath(env.options.outdir) 58 self.instance_fail_count = plan.instance_fail_count 59 self.footprint = None 60 self.coverage_status = None 61 62 63 @staticmethod 64 def process_log(log_file): 65 filtered_string = "" 66 if os.path.exists(log_file): 67 with open(log_file, "rb") as f: 68 log = f.read().decode("utf-8") 69 filtered_string = ''.join(filter(lambda x: x in string.printable, log)) 70 71 return filtered_string 72 73 74 @staticmethod 75 def xunit_testcase( 76 eleTestsuite, 77 name, 78 classname, 79 status: TwisterStatus, 80 ts_status: TwisterStatus, 81 reason, 82 duration, 83 runnable, 84 stats, 85 log, 86 build_only_as_skip 87 ): 88 fails, passes, errors, skips = stats 89 90 if status in [TwisterStatus.SKIP, TwisterStatus.FILTER]: 91 duration = 0 92 93 eleTestcase = ET.SubElement( 94 eleTestsuite, "testcase", 95 classname=classname, 96 name=f"{name}", 97 time=f"{duration}") 98 99 if status in [TwisterStatus.SKIP, TwisterStatus.FILTER]: 100 skips += 1 101 # temporarily add build_only_as_skip to restore existing CI report behaviour 102 if ts_status == TwisterStatus.PASS and not runnable: 103 tc_type = "build" 104 else: 105 tc_type = status 106 ET.SubElement(eleTestcase, ReportStatus.SKIP, type=f"{tc_type}", message=f"{reason}") 107 elif status in [TwisterStatus.FAIL, TwisterStatus.BLOCK]: 108 fails += 1 109 el = ET.SubElement(eleTestcase, ReportStatus.FAIL, type="failure", message=f"{reason}") 110 if log: 111 el.text = log 112 elif status == TwisterStatus.ERROR: 113 errors += 1 114 el = ET.SubElement(eleTestcase, ReportStatus.ERROR, type="failure", message=f"{reason}") 115 if log: 116 el.text = log 117 elif status == TwisterStatus.PASS: 118 passes += 1 119 elif status == TwisterStatus.NOTRUN: 120 if build_only_as_skip: 121 ET.SubElement(eleTestcase, ReportStatus.SKIP, type="build", message="built only") 122 skips += 1 123 else: 124 passes += 1 125 else: 126 if status == TwisterStatus.NONE: 127 logger.debug(f"{name}: No status") 128 ET.SubElement( 129 eleTestcase, 130 ReportStatus.SKIP, 131 type="untested", 132 message="No results captured, testsuite misconfiguration?" 133 ) 134 else: 135 logger.error(f"{name}: Unknown status '{status}'") 136 137 return (fails, passes, errors, skips) 138 139 # Generate a report with all testsuites instead of doing this per platform 140 def xunit_report_suites(self, json_file, filename): 141 142 json_data = {} 143 with open(json_file) as json_results: 144 json_data = json.load(json_results) 145 146 147 env = json_data.get('environment', {}) 148 version = env.get('zephyr_version', None) 149 150 eleTestsuites = ET.Element('testsuites') 151 all_suites = json_data.get("testsuites", []) 152 153 suites_to_report = all_suites 154 # do not create entry if everything is filtered out 155 if not self.env.options.detailed_skipped_report: 156 suites_to_report = list( 157 filter(lambda d: TwisterStatus(d.get('status')) != TwisterStatus.FILTER, all_suites) 158 ) 159 160 for suite in suites_to_report: 161 duration = 0 162 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 163 name=suite.get("name"), time="0", 164 timestamp = self.timestamp, 165 tests="0", 166 failures="0", 167 errors="0", skipped="0") 168 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 169 # Multiple 'property' can be added to 'properties' 170 # differing by name and value 171 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 172 ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform")) 173 ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch")) 174 175 total = 0 176 fails = passes = errors = skips = 0 177 handler_time = suite.get('execution_time', 0) 178 runnable = suite.get('runnable', 0) 179 duration += float(handler_time) 180 ts_status = TwisterStatus(suite.get('status')) 181 classname = Path(suite.get("name","")).name 182 for tc in suite.get("testcases", []): 183 status = TwisterStatus(tc.get('status')) 184 reason = tc.get('reason', suite.get('reason', 'Unknown')) 185 log = tc.get("log", suite.get("log")) 186 187 tc_duration = tc.get('execution_time', handler_time) 188 name = tc.get("identifier") 189 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 190 name, classname, status, ts_status, reason, tc_duration, runnable, 191 (fails, passes, errors, skips), log, True) 192 193 total = errors + passes + fails + skips 194 195 eleTestsuite.attrib['time'] = f"{duration}" 196 eleTestsuite.attrib['failures'] = f"{fails}" 197 eleTestsuite.attrib['errors'] = f"{errors}" 198 eleTestsuite.attrib['skipped'] = f"{skips}" 199 eleTestsuite.attrib['tests'] = f"{total}" 200 201 ET.indent(eleTestsuites, space="\t", level=0) 202 result = ET.tostring(eleTestsuites) 203 with open(filename, 'wb') as report: 204 report.write(result) 205 206 def xunit_report(self, json_file, filename, selected_platform=None, full_report=False): 207 if selected_platform: 208 selected = [selected_platform] 209 logger.info(f"Writing target report for {selected_platform}...") 210 else: 211 logger.info(f"Writing xunit report {filename}...") 212 selected = self.selected_platforms 213 214 json_data = {} 215 with open(json_file) as json_results: 216 json_data = json.load(json_results) 217 218 219 env = json_data.get('environment', {}) 220 version = env.get('zephyr_version', None) 221 222 eleTestsuites = ET.Element('testsuites') 223 all_suites = json_data.get("testsuites", []) 224 225 for platform in selected: 226 suites = list(filter(lambda d: d['platform'] == platform, all_suites)) 227 # do not create entry if everything is filtered out 228 if not self.env.options.detailed_skipped_report: 229 non_filtered = list( 230 filter(lambda d: TwisterStatus(d.get('status')) != TwisterStatus.FILTER, suites) 231 ) 232 if not non_filtered: 233 continue 234 235 duration = 0 236 eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite', 237 name=platform, 238 timestamp = self.timestamp, 239 time="0", 240 tests="0", 241 failures="0", 242 errors="0", skipped="0") 243 eleTSPropetries = ET.SubElement(eleTestsuite, 'properties') 244 # Multiple 'property' can be added to 'properties' 245 # differing by name and value 246 ET.SubElement(eleTSPropetries, 'property', name="version", value=version) 247 248 total = 0 249 fails = passes = errors = skips = 0 250 for ts in suites: 251 handler_time = ts.get('execution_time', 0) 252 runnable = ts.get('runnable', 0) 253 duration += float(handler_time) 254 255 ts_status = TwisterStatus(ts.get('status')) 256 # Do not report filtered testcases 257 if ( 258 ts_status == TwisterStatus.FILTER 259 and not self.env.options.detailed_skipped_report 260 ): 261 continue 262 if full_report: 263 classname = Path(ts.get("name","")).name 264 for tc in ts.get("testcases", []): 265 status = TwisterStatus(tc.get('status')) 266 reason = tc.get('reason', ts.get('reason', 'Unknown')) 267 log = tc.get("log", ts.get("log")) 268 269 tc_duration = tc.get('execution_time', handler_time) 270 name = tc.get("identifier") 271 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 272 name, classname, status, ts_status, reason, tc_duration, runnable, 273 (fails, passes, errors, skips), log, True) 274 else: 275 reason = ts.get('reason', 'Unknown') 276 name = ts.get("name") 277 classname = f"{platform}:{name}" 278 log = ts.get("log") 279 fails, passes, errors, skips = self.xunit_testcase(eleTestsuite, 280 name, classname, ts_status, ts_status, reason, handler_time, runnable, 281 (fails, passes, errors, skips), log, False) 282 283 total = errors + passes + fails + skips 284 285 eleTestsuite.attrib['time'] = f"{duration}" 286 eleTestsuite.attrib['failures'] = f"{fails}" 287 eleTestsuite.attrib['errors'] = f"{errors}" 288 eleTestsuite.attrib['skipped'] = f"{skips}" 289 eleTestsuite.attrib['tests'] = f"{total}" 290 291 ET.indent(eleTestsuites, space="\t", level=0) 292 result = ET.tostring(eleTestsuites) 293 with open(filename, 'wb') as report: 294 report.write(result) 295 296 def json_report(self, filename, version="NA", platform=None, filters=None): 297 logger.info(f"Writing JSON report {filename}") 298 299 if self.env.options.report_all_options: 300 report_options = vars(self.env.options) 301 else: 302 report_options = self.env.non_default_options() 303 304 report = {} 305 report["environment"] = {"os": os.name, 306 "zephyr_version": version, 307 "toolchain": self.env.toolchain, 308 "commit_date": self.env.commit_date, 309 "run_date": self.env.run_date, 310 "options": report_options 311 } 312 suites = [] 313 314 for instance in self.instances.values(): 315 if platform and platform != instance.platform.name: 316 continue 317 if instance.status == TwisterStatus.FILTER and not self.env.options.report_filtered: 318 continue 319 if (filters and 'allow_status' in filters and \ 320 instance.status not in [TwisterStatus[s] for s in filters['allow_status']]): 321 logger.debug( 322 f"Skip test suite '{instance.testsuite.name}'" 323 f" status '{instance.status}' not allowed for {filename}" 324 ) 325 continue 326 if (filters and 'deny_status' in filters and \ 327 instance.status in [TwisterStatus[s] for s in filters['deny_status']]): 328 logger.debug( 329 f"Skip test suite '{instance.testsuite.name}'" 330 f" status '{instance.status}' denied for {filename}" 331 ) 332 continue 333 suite = {} 334 handler_log = os.path.join(instance.build_dir, "handler.log") 335 pytest_log = os.path.join(instance.build_dir, "twister_harness.log") 336 build_log = os.path.join(instance.build_dir, "build.log") 337 device_log = os.path.join(instance.build_dir, "device.log") 338 339 handler_time = instance.metrics.get('handler_time', 0) 340 used_ram = instance.metrics.get ("used_ram", 0) 341 used_rom = instance.metrics.get("used_rom",0) 342 available_ram = instance.metrics.get("available_ram", 0) 343 available_rom = instance.metrics.get("available_rom", 0) 344 suite = { 345 "name": instance.testsuite.name, 346 "arch": instance.platform.arch, 347 "platform": instance.platform.name, 348 "path": instance.testsuite.source_dir_rel 349 } 350 if instance.run_id: 351 suite['run_id'] = instance.run_id 352 353 suite["runnable"] = False 354 if instance.status != TwisterStatus.FILTER: 355 suite["runnable"] = instance.run 356 357 if used_ram: 358 suite["used_ram"] = used_ram 359 if used_rom: 360 suite["used_rom"] = used_rom 361 362 suite['retries'] = instance.retries 363 if instance.toolchain: 364 suite['toolchain'] = instance.toolchain 365 366 if instance.dut: 367 suite["dut"] = instance.dut 368 if available_ram: 369 suite["available_ram"] = available_ram 370 if available_rom: 371 suite["available_rom"] = available_rom 372 if instance.status in [TwisterStatus.ERROR, TwisterStatus.FAIL]: 373 suite['status'] = instance.status 374 # FIXME 375 if os.path.exists(pytest_log): 376 suite["log"] = self.process_log(pytest_log) 377 elif os.path.exists(handler_log): 378 suite["log"] = self.process_log(handler_log) 379 elif os.path.exists(device_log): 380 suite["log"] = self.process_log(device_log) 381 else: 382 suite["log"] = self.process_log(build_log) 383 384 suite["reason"] = self.get_detailed_reason(instance.reason, suite["log"]) 385 # update the reason to get more details also in other reports (e.g. junit) 386 # where build log is not available 387 instance.reason = suite["reason"] 388 elif instance.status == TwisterStatus.FILTER: 389 suite["status"] = TwisterStatus.FILTER 390 suite["reason"] = instance.reason 391 elif instance.status == TwisterStatus.PASS: 392 suite["status"] = TwisterStatus.PASS 393 elif instance.status == TwisterStatus.SKIP: 394 suite["status"] = TwisterStatus.SKIP 395 suite["reason"] = instance.reason 396 elif instance.status == TwisterStatus.NOTRUN: 397 suite["status"] = TwisterStatus.NOTRUN 398 suite["reason"] = instance.reason 399 else: 400 suite["status"] = TwisterStatus.NONE 401 suite["reason"] = 'Unknown Instance status' 402 403 if instance.status != TwisterStatus.NONE: 404 suite["execution_time"] = f"{float(handler_time):.2f}" 405 suite["build_time"] = f"{float(instance.build_time):.2f}" 406 407 testcases = [] 408 409 if len(instance.testcases) == 1: 410 single_case_duration = f"{float(handler_time):.2f}" 411 else: 412 single_case_duration = 0 413 414 for case in instance.testcases: 415 # freeform was set when no sub testcases were parsed, however, 416 # if we discover those at runtime, the fallback testcase wont be 417 # needed anymore and can be removed from the output, it does 418 # not have a status and would otherwise be reported as skipped. 419 if ( 420 case.freeform 421 and case.status == TwisterStatus.NONE 422 and len(instance.testcases) > 1 423 ): 424 continue 425 testcase = {} 426 testcase['identifier'] = case.name 427 if instance.status != TwisterStatus.NONE: 428 if single_case_duration: 429 testcase['execution_time'] = single_case_duration 430 else: 431 testcase['execution_time'] = f"{float(case.duration):.2f}" 432 433 if case.output != "": 434 testcase['log'] = case.output 435 436 if case.status == TwisterStatus.SKIP: 437 if instance.status == TwisterStatus.FILTER: 438 testcase["status"] = TwisterStatus.FILTER 439 else: 440 testcase["status"] = TwisterStatus.SKIP 441 testcase["reason"] = case.reason or instance.reason 442 else: 443 testcase["status"] = case.status 444 if case.reason: 445 testcase["reason"] = case.reason 446 447 testcases.append(testcase) 448 449 suite['testcases'] = testcases 450 451 if instance.recording is not None: 452 suite['recording'] = instance.recording 453 454 if ( 455 instance.status not in [ 456 TwisterStatus.NONE, 457 TwisterStatus.ERROR, 458 TwisterStatus.FILTER 459 ] 460 and self.env.options.create_rom_ram_report 461 and self.env.options.footprint_report is not None 462 ): 463 # Init as empty data preparing for filtering properties. 464 suite['footprint'] = {} 465 466 # Pass suite properties through the context filters. 467 if filters and 'allow_suite' in filters: 468 suite = {k:v for k,v in suite.items() if k in filters['allow_suite']} 469 470 if filters and 'deny_suite' in filters: 471 suite = {k:v for k,v in suite.items() if k not in filters['deny_suite']} 472 473 # Compose external data only to these properties which pass filtering. 474 if 'footprint' in suite: 475 do_all = 'all' in self.env.options.footprint_report 476 footprint_files = { 'ROM': 'rom.json', 'RAM': 'ram.json' } 477 for k,v in footprint_files.items(): 478 if do_all or k in self.env.options.footprint_report: 479 footprint_fname = os.path.join(instance.build_dir, v) 480 try: 481 with open(footprint_fname) as footprint_json: 482 logger.debug(f"Collect footprint.{k} for '{instance.name}'") 483 suite['footprint'][k] = json.load(footprint_json) 484 except FileNotFoundError: 485 logger.error(f"Missing footprint.{k} for '{instance.name}'") 486 # 487 # 488 489 suites.append(suite) 490 491 report["testsuites"] = suites 492 with open(filename, 'w') as json_file: 493 json.dump(report, json_file, indent=4, separators=(',',':'), cls=ReportingJSONEncoder) 494 495 496 def compare_metrics(self, filename): 497 # name, datatype, lower results better 498 interesting_metrics = [("used_ram", int, True), 499 ("used_rom", int, True)] 500 501 if not os.path.exists(filename): 502 logger.error(f"Cannot compare metrics, {filename} not found") 503 return [] 504 505 results = [] 506 saved_metrics = {} 507 with open(filename) as fp: 508 jt = json.load(fp) 509 for ts in jt.get("testsuites", []): 510 d = {} 511 for m, _, _ in interesting_metrics: 512 d[m] = ts.get(m, 0) 513 ts_name = ts.get('name') 514 ts_platform = ts.get('platform') 515 saved_metrics[(ts_name, ts_platform)] = d 516 517 for instance in self.instances.values(): 518 mkey = (instance.testsuite.name, instance.platform.name) 519 if mkey not in saved_metrics: 520 continue 521 sm = saved_metrics[mkey] 522 for metric, mtype, lower_better in interesting_metrics: 523 if metric not in instance.metrics: 524 continue 525 if sm[metric] == "": 526 continue 527 delta = instance.metrics.get(metric, 0) - mtype(sm[metric]) 528 if delta == 0: 529 continue 530 results.append((instance, metric, instance.metrics.get(metric, 0), delta, 531 lower_better)) 532 return results 533 534 def footprint_reports(self, report, show_footprint, all_deltas, 535 footprint_threshold, last_metrics): 536 if not report: 537 return 538 539 logger.debug("running footprint_reports") 540 deltas = self.compare_metrics(report) 541 warnings = 0 542 if deltas: 543 for i, metric, value, delta, lower_better in deltas: 544 if not all_deltas and ((delta < 0 and lower_better) or 545 (delta > 0 and not lower_better)): 546 continue 547 548 percentage = 0 549 if value > delta: 550 percentage = (float(delta) / float(value - delta)) 551 552 if not all_deltas and (percentage < (footprint_threshold / 100.0)): 553 continue 554 555 if show_footprint: 556 logger.log( 557 logging.INFO if all_deltas else logging.WARNING, 558 f"{i.platform.name:<25} {i.testsuite.name:<60} {metric} {delta:<+4}," 559 f" is now {value:6} {percentage:+.2%}" 560 ) 561 562 warnings += 1 563 564 if warnings: 565 logger.warning("Found {} footprint deltas to {} as a baseline.".format( 566 warnings, 567 (report if not last_metrics else "the last twister run."))) 568 569 def synopsis(self): 570 if self.env.options.report_summary == 0: 571 count = self.instance_fail_count 572 log_txt = f"The following issues were found (showing the all {count} items):" 573 elif self.env.options.report_summary: 574 count = self.env.options.report_summary 575 log_txt = "The following issues were found " 576 if count > self.instance_fail_count: 577 log_txt += ( 578 f"(presenting {self.instance_fail_count} out of the {count} items requested):" 579 ) 580 else: 581 log_txt += f"(showing the {count} of {self.instance_fail_count} items):" 582 else: 583 count = 10 584 log_txt = f"The following issues were found (showing the top {count} items):" 585 cnt = 0 586 example_instance = None 587 detailed_test_id = self.env.options.detailed_test_id 588 for instance in self.instances.values(): 589 if instance.status not in [ 590 TwisterStatus.PASS, 591 TwisterStatus.FILTER, 592 TwisterStatus.SKIP, 593 TwisterStatus.NOTRUN 594 ]: 595 cnt += 1 596 if cnt == 1: 597 logger.info("-+" * 40) 598 logger.info(log_txt) 599 600 status = instance.status 601 if self.env.options.report_summary is not None and \ 602 status in [TwisterStatus.ERROR, TwisterStatus.FAIL]: 603 status = Fore.RED + status.upper() + Fore.RESET 604 logger.info( 605 f"{cnt}) {instance.testsuite.name} on {instance.platform.name}" 606 f" {status} ({instance.reason})" 607 ) 608 example_instance = instance 609 if cnt == count: 610 break 611 if cnt == 0 and self.env.options.report_summary is not None: 612 logger.info("-+" * 40) 613 logger.info("No errors/fails found") 614 615 if cnt and example_instance: 616 cwd_rel_path = os.path.relpath(example_instance.testsuite.source_dir, start=os.getcwd()) 617 618 logger.info("") 619 logger.info("To rerun the tests, call twister using the following commandline:") 620 extra_parameters = '' if not detailed_test_id else ' --detailed-test-id' 621 logger.info(f"west twister -p <PLATFORM> -s <TEST ID>{extra_parameters}, for example:") 622 logger.info("") 623 logger.info( 624 f"west twister -p {example_instance.platform.name}" 625 f" -s {example_instance.testsuite.name}" 626 f"{extra_parameters}" 627 ) 628 logger.info("or with west:") 629 logger.info( 630 f"west build -p -b {example_instance.platform.name} {cwd_rel_path}" 631 f" -T {example_instance.testsuite.id}" 632 ) 633 logger.info("-+" * 40) 634 635 def summary(self, results, duration): 636 failed = 0 637 run = 0 638 for instance in self.instances.values(): 639 if instance.status == TwisterStatus.FAIL: 640 failed += 1 641 642 # FIXME: need a better way to identify executed tests 643 handler_time = instance.metrics.get('handler_time', 0) 644 if float(handler_time) > 0: 645 run += 1 646 647 if results.total and results.total != results.filtered_configs: 648 pass_rate = (float(results.passed) / float(results.total - results.filtered_configs)) 649 else: 650 pass_rate = 0 651 652 passed_color = ( 653 TwisterStatus.get_color(TwisterStatus.FAIL) 654 if failed 655 else TwisterStatus.get_color(TwisterStatus.PASS) 656 ) 657 unfiltered_configs = results.total - results.filtered_configs 658 notrun_number_section = ( 659 f'{TwisterStatus.get_color(TwisterStatus.NOTRUN)}{results.notrun}{Fore.RESET}' 660 if results.notrun 661 else f'{results.notrun}' 662 ) 663 failed_number_section = ( 664 f'{TwisterStatus.get_color(TwisterStatus.FAIL)}{results.failed}{Fore.RESET}' 665 if results.failed 666 else f'{results.failed}' 667 ) 668 error_number_section = ( 669 f'{TwisterStatus.get_color(TwisterStatus.ERROR)}{results.error}{Fore.RESET}' 670 if results.error 671 else f'{results.error}' 672 ) 673 warnings_number_section = ( 674 f'{Fore.YELLOW}{self.plan.warnings + results.warnings}{Fore.RESET}' 675 if (self.plan.warnings + results.warnings) 676 else 'no' 677 ) 678 logger.info( 679 f"{passed_color}{results.passed} of {unfiltered_configs}{Fore.RESET}" 680 f" executed test configurations passed ({pass_rate:.2%})," 681 f" {notrun_number_section} built (not run)," 682 f" {failed_number_section} failed," 683 f" {error_number_section} errored," 684 f" with {warnings_number_section} warnings" 685 f" in {duration:.2f} seconds." 686 ) 687 688 total_platforms = len(self.platforms) 689 filtered_platforms = set( 690 instance.platform.name for instance in self.instances.values() 691 if instance.status not in [ 692 TwisterStatus.FILTER, 693 TwisterStatus.NOTRUN, 694 TwisterStatus.SKIP 695 ] 696 ) 697 # if we are only building, do not report about tests being executed. 698 if self.platforms and not self.env.options.build_only: 699 executed_cases = ( 700 results.cases 701 - results.filtered_cases 702 - results.skipped_cases 703 - results.notrun_cases 704 ) 705 pass_rate = 100 * (float(results.passed_cases) / float(executed_cases)) \ 706 if executed_cases != 0 else 0 707 platform_rate = (100 * len(filtered_platforms) / len(self.platforms)) 708 blocked_after_comma = ", " + str(results.blocked_cases) + " blocked" 709 failed_after_comma = ", " + str(results.failed_cases) + " failed" 710 error_after_comma = ", " + str(results.error_cases) + " errored" 711 none_after_comma = ", " + str(results.none_cases) + " without a status" 712 logger.info( 713 f'{results.passed_cases} of {executed_cases} executed test cases passed' 714 f' ({pass_rate:02.2f}%)' 715 f'{blocked_after_comma if results.blocked_cases else ""}' 716 f'{failed_after_comma if results.failed_cases else ""}' 717 f'{error_after_comma if results.error_cases else ""}' 718 f'{none_after_comma if results.none_cases else ""}' 719 f' on {len(filtered_platforms)} out of total {total_platforms} platforms' 720 f' ({platform_rate:02.2f}%).' 721 ) 722 if results.skipped_cases or results.notrun_cases: 723 not_executed = results.skipped_cases + results.notrun_cases 724 skipped_after_colon = " " + str(results.skipped_cases) + " skipped" 725 notrun_after_comma = ( 726 (", " if results.skipped_cases else " ") 727 + str(results.notrun_cases) 728 + " not run (built only)" 729 ) 730 logger.info( 731 f'{not_executed} selected test cases not executed:' \ 732 f'{skipped_after_colon if results.skipped_cases else ""}' \ 733 f'{notrun_after_comma if results.notrun_cases else ""}' \ 734 f'.' 735 ) 736 737 built_only = results.total - run - results.filtered_configs - results.skipped 738 logger.info( 739 f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms," 740 f" {TwisterStatus.get_color(TwisterStatus.NOTRUN)}{built_only}{Fore.RESET}" 741 " test configurations were only built." 742 ) 743 744 def save_reports(self, name, suffix, report_dir, no_update, platform_reports): 745 if not self.instances: 746 return 747 748 logger.info("Saving reports...") 749 if name: 750 report_name = name 751 else: 752 report_name = "twister" 753 754 if report_dir: 755 os.makedirs(report_dir, exist_ok=True) 756 filename = os.path.join(report_dir, report_name) 757 outdir = report_dir 758 else: 759 outdir = self.outdir 760 filename = os.path.join(outdir, report_name) 761 762 if suffix: 763 filename = f"{filename}_{suffix}" 764 765 if not no_update: 766 json_file = filename + ".json" 767 self.json_report(json_file, version=self.env.version, 768 filters=self.json_filters['twister.json']) 769 if self.env.options.footprint_report is not None: 770 self.json_report(filename + "_footprint.json", version=self.env.version, 771 filters=self.json_filters['footprint.json']) 772 self.xunit_report(json_file, filename + ".xml", full_report=False) 773 self.xunit_report(json_file, filename + "_report.xml", full_report=True) 774 self.xunit_report_suites(json_file, filename + "_suite_report.xml") 775 776 if platform_reports: 777 self.target_report(json_file, outdir, suffix) 778 779 780 def target_report(self, json_file, outdir, suffix): 781 platforms = {repr(inst.platform):inst.platform for _, inst in self.instances.items()} 782 for platform in platforms.values(): 783 if suffix: 784 filename = os.path.join(outdir,f"{platform.normalized_name}_{suffix}.xml") 785 json_platform_file = os.path.join(outdir,f"{platform.normalized_name}_{suffix}") 786 else: 787 filename = os.path.join(outdir,f"{platform.normalized_name}.xml") 788 json_platform_file = os.path.join(outdir, platform.normalized_name) 789 self.xunit_report(json_file, filename, platform.name, full_report=True) 790 self.json_report(json_platform_file + ".json", 791 version=self.env.version, platform=platform.name, 792 filters=self.json_filters['twister.json']) 793 if self.env.options.footprint_report is not None: 794 self.json_report(json_platform_file + "_footprint.json", 795 version=self.env.version, platform=platform.name, 796 filters=self.json_filters['footprint.json']) 797 798 def get_detailed_reason(self, reason: str, log: str) -> str: 799 if reason == 'CMake build failure': 800 if error_key := self._parse_cmake_build_failure(log): 801 return f"{reason} - {error_key}" 802 elif reason == 'Build failure': # noqa SIM102 803 if error_key := self._parse_build_failure(log): 804 return f"{reason} - {error_key}" 805 return reason 806 807 @staticmethod 808 def _parse_cmake_build_failure(log: str) -> str | None: 809 last_warning = 'no warning found' 810 lines = log.splitlines() 811 for i, line in enumerate(lines): 812 if "warning: " in line: 813 last_warning = line 814 elif "devicetree error: " in line: 815 return "devicetree error" 816 elif "fatal error: " in line: 817 return line[line.index('fatal error: ') :].strip() 818 elif "error: " in line: # error: Aborting due to Kconfig warnings 819 if "undefined symbol" in last_warning: 820 return last_warning[last_warning.index('undefined symbol') :].strip() 821 return last_warning 822 elif "CMake Error at" in line: 823 for next_line in lines[i + 1 :]: 824 if next_line.strip(): 825 return line + ' ' + next_line 826 return line 827 return None 828 829 @staticmethod 830 def _parse_build_failure(log: str) -> str | None: 831 last_warning = '' 832 lines = log.splitlines() 833 for i, line in enumerate(lines): 834 if "undefined reference" in line: 835 return line[line.index('undefined reference') :].strip() 836 elif "error: ld returned" in line: 837 if last_warning: 838 return last_warning 839 elif "overflowed by" in lines[i - 1]: 840 return "ld.bfd: region overflowed" 841 elif "ld.bfd: warning: " in lines[i - 1]: 842 return "ld.bfd:" + lines[i - 1].split("ld.bfd:", 1)[-1] 843 return line 844 elif "error: " in line: 845 return line[line.index('error: ') :].strip() 846 elif ": in function " in line: 847 last_warning = line[line.index('in function') :].strip() 848 elif "CMake Error at" in line: 849 for next_line in lines[i + 1 :]: 850 if next_line.strip(): 851 return line + ' ' + next_line 852 return line 853 return None 854