| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318 | 
							- #!/usr/bin/env python3
 
- # vim: set syntax=python ts=4 :
 
- # Copyright (c) 2020 Intel Corporation
 
- # SPDX-License-Identifier: Apache-2.0
 
- """Zephyr Test Runner (twister)
 
- Also check the "User and Developer Guides" at https://docs.zephyrproject.org/
 
- This script scans for the set of unit test applications in the git
 
- repository and attempts to execute them. By default, it tries to
 
- build each test case on one platform per architecture, using a precedence
 
- list defined in an architecture configuration file, and if possible
 
- run the tests in any available emulators or simulators on the system.
 
- Test cases are detected by the presence of a 'testcase.yaml' or a sample.yaml
 
- files in the application's project directory. This file may contain one or more
 
- blocks, each identifying a test scenario. The title of the block is a name for
 
- the test case, which only needs to be unique for the test cases specified in
 
- that testcase meta-data. The full canonical name for each test case is <path to
 
- test case>/<block>.
 
- Each test block in the testcase meta data can define the following key/value
 
- pairs:
 
-   tags: <list of tags> (required)
 
-     A set of string tags for the testcase. Usually pertains to
 
-     functional domains but can be anything. Command line invocations
 
-     of this script can filter the set of tests to run based on tag.
 
-   skip: <True|False> (default False)
 
-     skip testcase unconditionally. This can be used for broken tests.
 
-   slow: <True|False> (default False)
 
-     Don't build or run this test case unless --enable-slow was passed
 
-     in on the command line. Intended for time-consuming test cases
 
-     that are only run under certain circumstances, like daily
 
-     builds.
 
-   extra_args: <list of extra arguments>
 
-     Extra cache entries to pass to CMake when building or running the
 
-     test case.
 
-   extra_configs: <list of extra configurations>
 
-     Extra configuration options to be merged with a master prj.conf
 
-     when building or running the test case.
 
-   build_only: <True|False> (default False)
 
-     If true, don't try to run the test even if the selected platform
 
-     supports it.
 
-   build_on_all: <True|False> (default False)
 
-     If true, attempt to build test on all available platforms.
 
-   depends_on: <list of features>
 
-     A board or platform can announce what features it supports, this option
 
-     will enable the test only those platforms that provide this feature.
 
-   min_ram: <integer>
 
-     minimum amount of RAM needed for this test to build and run. This is
 
-     compared with information provided by the board metadata.
 
-   min_flash: <integer>
 
-     minimum amount of ROM needed for this test to build and run. This is
 
-     compared with information provided by the board metadata.
 
-   timeout: <number of seconds>
 
-     Length of time to run test in emulator before automatically killing it.
 
-     Default to 60 seconds.
 
-   arch_allow: <list of arches, such as x86, arm, arc>
 
-     Set of architectures that this test case should only be run for.
 
-   arch_exclude: <list of arches, such as x86, arm, arc>
 
-     Set of architectures that this test case should not run on.
 
-   platform_allow: <list of platforms>
 
-     Set of platforms that this test case should only be run for.
 
-   platform_exclude: <list of platforms>
 
-     Set of platforms that this test case should not run on.
 
-   extra_sections: <list of extra binary sections>
 
-     When computing sizes, twister will report errors if it finds
 
-     extra, unexpected sections in the Zephyr binary unless they are named
 
-     here. They will not be included in the size calculation.
 
-   filter: <expression>
 
-     Filter whether the testcase should be run by evaluating an expression
 
-     against an environment containing the following values:
 
-     { ARCH : <architecture>,
 
-       PLATFORM : <platform>,
 
-       <all CONFIG_* key/value pairs in the test's generated defconfig>,
 
-       <all DT_* key/value pairs in the test's generated device tree file>,
 
-       <all CMake key/value pairs in the test's generated CMakeCache.txt file>,
 
-       *<env>: any environment variable available
 
-     }
 
-     The grammar for the expression language is as follows:
 
-     expression ::= expression "and" expression
 
-                  | expression "or" expression
 
-                  | "not" expression
 
-                  | "(" expression ")"
 
-                  | symbol "==" constant
 
-                  | symbol "!=" constant
 
-                  | symbol "<" number
 
-                  | symbol ">" number
 
-                  | symbol ">=" number
 
-                  | symbol "<=" number
 
-                  | symbol "in" list
 
-                  | symbol ":" string
 
-                  | symbol
 
-     list ::= "[" list_contents "]"
 
-     list_contents ::= constant
 
-                     | list_contents "," constant
 
-     constant ::= number
 
-                | string
 
-     For the case where expression ::= symbol, it evaluates to true
 
-     if the symbol is defined to a non-empty string.
 
-     Operator precedence, starting from lowest to highest:
 
-         or (left associative)
 
-         and (left associative)
 
-         not (right associative)
 
-         all comparison operators (non-associative)
 
-     arch_allow, arch_exclude, platform_allow, platform_exclude
 
-     are all syntactic sugar for these expressions. For instance
 
-         arch_exclude = x86 arc
 
-     Is the same as:
 
-         filter = not ARCH in ["x86", "arc"]
 
-     The ':' operator compiles the string argument as a regular expression,
 
-     and then returns a true value only if the symbol's value in the environment
 
-     matches. For example, if CONFIG_SOC="stm32f107xc" then
 
-         filter = CONFIG_SOC : "stm.*"
 
-     Would match it.
 
- The set of test cases that actually run depends on directives in the testcase
 
- filed and options passed in on the command line. If there is any confusion,
 
- running with -v or examining the discard report (twister_discard.csv)
 
- can help show why particular test cases were skipped.
 
- Metrics (such as pass/fail state and binary size) for the last code
 
- release are stored in scripts/release/twister_last_release.csv.
 
- To update this, pass the --all --release options.
 
- To load arguments from a file, write '+' before the file name, e.g.,
 
- +file_name. File content must be one or more valid arguments separated by
 
- line break instead of white spaces.
 
- Most everyday users will run with no arguments.
 
- """
 
- import os
 
- import argparse
 
- import sys
 
- import logging
 
- import time
 
- import itertools
 
- import shutil
 
- from collections import OrderedDict
 
- import multiprocessing
 
- from itertools import islice
 
- import csv
 
- from colorama import Fore
 
- from pathlib import Path
 
- from multiprocessing.managers import BaseManager
 
- import queue
 
- ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
 
- if not ZEPHYR_BASE:
 
-     # This file has been zephyr/scripts/twister for years,
 
-     # and that is not going to change anytime soon. Let the user
 
-     # run this script as ./scripts/twister without making them
 
-     # set ZEPHYR_BASE.
 
-     ZEPHYR_BASE = str(Path(__file__).resolve().parents[1])
 
-     # Propagate this decision to child processes.
 
-     os.environ['ZEPHYR_BASE'] = ZEPHYR_BASE
 
-     print(f'ZEPHYR_BASE unset, using "{ZEPHYR_BASE}"')
 
- try:
 
-     from anytree import RenderTree, Node, find
 
- except ImportError:
 
-     print("Install the anytree module to use the --test-tree option")
 
- try:
 
-     from tabulate import tabulate
 
- except ImportError:
 
-     print("Install tabulate python module with pip to use --device-testing option.")
 
- sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
 
- from twisterlib import HardwareMap, TestSuite, SizeCalculator, CoverageTool, ExecutionCounter
 
- logger = logging.getLogger('twister')
 
- logger.setLevel(logging.DEBUG)
 
- def size_report(sc):
 
-     logger.info(sc.filename)
 
-     logger.info("SECTION NAME             VMA        LMA     SIZE  HEX SZ TYPE")
 
-     for i in range(len(sc.sections)):
 
-         v = sc.sections[i]
 
-         logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
 
-                     (v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
 
-                      v["type"]))
 
-     logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
 
-                 (sc.rom_size, sc.ram_size))
 
-     logger.info("")
 
- def export_tests(filename, tests):
 
-     with open(filename, "wt") as csvfile:
 
-         fieldnames = ['section', 'subsection', 'title', 'reference']
 
-         cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
 
-         for test in tests:
 
-             data = test.split(".")
 
-             if len(data) > 1:
 
-                 subsec = " ".join(data[1].split("_")).title()
 
-                 rowdict = {
 
-                     "section": data[0].capitalize(),
 
-                     "subsection": subsec,
 
-                     "title": test,
 
-                     "reference": test
 
-                 }
 
-                 cw.writerow(rowdict)
 
-             else:
 
-                 logger.error("{} can't be exported: ".format(test))
 
- def parse_arguments():
 
-     parser = argparse.ArgumentParser(
 
-         description=__doc__,
 
-         formatter_class=argparse.RawDescriptionHelpFormatter)
 
-     parser.fromfile_prefix_chars = "+"
 
-     case_select = parser.add_argument_group("Test case selection",
 
-                                             """
 
- Artificially long but functional example:
 
-     $ ./scripts/twister -v     \\
 
-       --testcase-root tests/ztest/base    \\
 
-       --testcase-root tests/kernel   \\
 
-       --test      tests/ztest/base/testing.ztest.verbose_0  \\
 
-       --test      tests/kernel/fifo/fifo_api/kernel.fifo.poll
 
-    "kernel.fifo.poll" is one of the test section names in
 
-                                  __/fifo_api/testcase.yaml
 
-     """)
 
-     parser.add_argument("--force-toolchain", action="store_true",
 
-                         help="Do not filter based on toolchain, use the set "
 
-                              " toolchain unconditionally")
 
-     parser.add_argument(
 
-         "-p", "--platform", action="append",
 
-         help="Platform filter for testing. This option may be used multiple "
 
-              "times. Testcases will only be built/run on the platforms "
 
-              "specified. If this option is not used, then platforms marked "
 
-              "as default in the platform metadata file will be chosen "
 
-              "to build and test. ")
 
-     parser.add_argument("-P", "--exclude-platform", action="append", default=[],
 
-         help="""Exclude platforms and do not build or run any tests
 
-         on those platforms. This option can be called multiple times.
 
-         """
 
-         )
 
-     parser.add_argument(
 
-         "-a", "--arch", action="append",
 
-         help="Arch filter for testing. Takes precedence over --platform. "
 
-              "If unspecified, test all arches. Multiple invocations "
 
-              "are treated as a logical 'or' relationship")
 
-     parser.add_argument(
 
-         "-t", "--tag", action="append",
 
-         help="Specify tags to restrict which tests to run by tag value. "
 
-              "Default is to not do any tag filtering. Multiple invocations "
 
-              "are treated as a logical 'or' relationship")
 
-     parser.add_argument("-e", "--exclude-tag", action="append",
 
-                         help="Specify tags of tests that should not run. "
 
-                              "Default is to run all tests with all tags.")
 
-     case_select.add_argument(
 
-         "-f",
 
-         "--only-failed",
 
-         action="store_true",
 
-         help="Run only those tests that failed the previous twister run "
 
-              "invocation.")
 
-     parser.add_argument(
 
-         "--retry-failed", type=int, default=0,
 
-         help="Retry failing tests again, up to the number of times specified.")
 
-     parser.add_argument(
 
-         "--retry-interval", type=int, default=60,
 
-         help="Retry failing tests after specified period of time.")
 
-     test_xor_subtest = case_select.add_mutually_exclusive_group()
 
-     test_xor_subtest.add_argument(
 
-         "-s", "--test", action="append",
 
-         help="Run only the specified test cases. These are named by "
 
-              "<path/relative/to/Zephyr/base/section.name.in.testcase.yaml>")
 
-     test_xor_subtest.add_argument(
 
-         "--sub-test", action="append",
 
-         help="""Recursively find sub-test functions and run the entire
 
-         test section where they were found, including all sibling test
 
-         functions. Sub-tests are named by:
 
-         section.name.in.testcase.yaml.function_name_without_test_prefix
 
-         Example: kernel.fifo.poll.fifo_loop
 
-         """)
 
-     parser.add_argument(
 
-         "-l", "--all", action="store_true",
 
-         help="Build/test on all platforms. Any --platform arguments "
 
-              "ignored.")
 
-     parser.add_argument(
 
-         "-o", "--report-dir",
 
-         help="""Output reports containing results of the test run into the
 
-         specified directory.
 
-         The output will be both in CSV and JUNIT format
 
-         (twister.csv and twister.xml).
 
-         """)
 
-     parser.add_argument(
 
-         "--json-report", action="store_true",
 
-         help="""Generate a JSON file with all test results. [Experimental]
 
-         """)
 
-     parser.add_argument(
 
-         "--platform-reports", action="store_true",
 
-         help="""Create individual reports for each platform.
 
-         """)
 
-     parser.add_argument(
 
-         "--report-name",
 
-         help="""Create a report with a custom name.
 
-         """)
 
-     parser.add_argument(
 
-         "--report-suffix",
 
-         help="""Add a suffix to all generated file names, for example to add a
 
-         version or a commit ID.
 
-         """)
 
-     parser.add_argument("--report-excluded",
 
-                         action="store_true",
 
-                         help="""List all tests that are never run based on current scope and
 
-             coverage. If you are looking for accurate results, run this with
 
-             --all, but this will take a while...""")
 
-     parser.add_argument("--compare-report",
 
-                         help="Use this report file for size comparison")
 
-     parser.add_argument(
 
-         "-B", "--subset",
 
-         help="Only run a subset of the tests, 1/4 for running the first 25%%, "
 
-              "3/5 means run the 3rd fifth of the total. "
 
-              "This option is useful when running a large number of tests on "
 
-              "different hosts to speed up execution time.")
 
-     parser.add_argument(
 
-         "-N", "--ninja", action="store_true",
 
-         help="Use the Ninja generator with CMake")
 
-     parser.add_argument(
 
-         "-y", "--dry-run", action="store_true",
 
-         help="""Create the filtered list of test cases, but don't actually
 
-         run them. Useful if you're just interested in the discard report
 
-         generated for every run and saved in the specified output
 
-         directory (twister_discard.csv).
 
-         """)
 
-     parser.add_argument("--list-tags", action="store_true",
 
-                         help="list all tags in selected tests")
 
-     case_select.add_argument("--list-tests", action="store_true",
 
-                              help="""List of all sub-test functions recursively found in
 
-         all --testcase-root arguments. Note different sub-tests can share
 
-         the same section name and come from different directories.
 
-         The output is flattened and reports --sub-test names only,
 
-         not their directories. For instance net.socket.getaddrinfo_ok
 
-         and net.socket.fd_set belong to different directories.
 
-         """)
 
-     case_select.add_argument("--test-tree", action="store_true",
 
-                              help="""Output the testsuite in a tree form""")
 
-     case_select.add_argument("--list-test-duplicates", action="store_true",
 
-                              help="""List tests with duplicate identifiers.
 
-         """)
 
-     parser.add_argument("--export-tests", action="store",
 
-                         metavar="FILENAME",
 
-                         help="Export tests case meta-data to a file in CSV format."
 
-                              "Test instances can be exported per target by supplying "
 
-                              "the platform name using --platform option. (tests for only "
 
-                              " one platform can be exported at a time)")
 
-     parser.add_argument("--timestamps",
 
-                         action="store_true",
 
-                         help="Print all messages with time stamps")
 
-     parser.add_argument(
 
-         "-r", "--release", action="store_true",
 
-         help="Update the benchmark database with the results of this test "
 
-              "run. Intended to be run by CI when tagging an official "
 
-              "release. This database is used as a basis for comparison "
 
-              "when looking for deltas in metrics such as footprint")
 
-     parser.add_argument("-W", "--disable-warnings-as-errors", action="store_true",
 
-                         help="Treat warning conditions as errors")
 
-     parser.add_argument("--overflow-as-errors", action="store_true",
 
-                         help="Treat RAM/SRAM overflows as errors")
 
-     parser.add_argument(
 
-         "-v",
 
-         "--verbose",
 
-         action="count",
 
-         default=0,
 
-         help="Emit debugging information, call multiple times to increase "
 
-              "verbosity")
 
-     parser.add_argument(
 
-         "-i", "--inline-logs", action="store_true",
 
-         help="Upon test failure, print relevant log data to stdout "
 
-              "instead of just a path to it")
 
-     parser.add_argument("--log-file", metavar="FILENAME", action="store",
 
-                         help="log also to file")
 
-     parser.add_argument(
 
-         "-m", "--last-metrics", action="store_true",
 
-         help="Instead of comparing metrics from the last --release, "
 
-              "compare with the results of the previous twister "
 
-              "invocation")
 
-     parser.add_argument(
 
-         "-u",
 
-         "--no-update",
 
-         action="store_true",
 
-         help="do not update the results of the last run of the twister run")
 
-     parser.add_argument(
 
-         "-G",
 
-         "--integration",
 
-         action="store_true",
 
-         help="Run integration tests")
 
-     case_select.add_argument(
 
-         "-F",
 
-         "--load-tests",
 
-         metavar="FILENAME",
 
-         action="store",
 
-         help="Load list of tests and platforms to be run from file.")
 
-     parser.add_argument(
 
-         "--quarantine-list",
 
-         metavar="FILENAME",
 
-         help="Load list of test scenarios under quarantine. The entries in "
 
-              "the file need to correspond to the test scenarios names as in"
 
-              "corresponding tests .yaml files. These scenarios"
 
-              "will be skipped with quarantine as the reason")
 
-     parser.add_argument(
 
-         "--quarantine-verify",
 
-         action="store_true",
 
-         help="Use the list of test scenarios under quarantine and run them"
 
-              "to verify their current status")
 
-     case_select.add_argument(
 
-         "-E",
 
-         "--save-tests",
 
-         metavar="FILENAME",
 
-         action="store",
 
-         help="Append list of tests and platforms to be run to file.")
 
-     test_or_build = parser.add_mutually_exclusive_group()
 
-     test_or_build.add_argument(
 
-         "-b", "--build-only", action="store_true",
 
-         help="Only build the code, do not execute any of it in QEMU")
 
-     test_or_build.add_argument(
 
-         "--test-only", action="store_true",
 
-         help="""Only run device tests with current artifacts, do not build
 
-              the code""")
 
-     parser.add_argument(
 
-         "--cmake-only", action="store_true",
 
-         help="Only run cmake, do not build or run.")
 
-     parser.add_argument(
 
-         "--filter", choices=['buildable', 'runnable'],
 
-         default='buildable',
 
-         help="""Filter tests to be built and executed. By default everything is
 
-         built and if a test is runnable (emulation or a connected device), it
 
-         is run. This option allows for example to only build tests that can
 
-         actually be run. Runnable is a subset of buildable.""")
 
-     parser.add_argument(
 
-         "-M", "--runtime-artifact-cleanup", action="store_true",
 
-         help="Delete artifacts of passing tests.")
 
-     parser.add_argument(
 
-         "-j", "--jobs", type=int,
 
-         help="Number of jobs for building, defaults to number of CPU threads, "
 
-              "overcommited by factor 2 when --build-only")
 
-     parser.add_argument(
 
-         "--show-footprint", action="store_true",
 
-         help="Show footprint statistics and deltas since last release."
 
-     )
 
-     parser.add_argument(
 
-         "-H", "--footprint-threshold", type=float, default=5,
 
-         help="When checking test case footprint sizes, warn the user if "
 
-              "the new app size is greater then the specified percentage "
 
-              "from the last release. Default is 5. 0 to warn on any "
 
-              "increase on app size")
 
-     parser.add_argument(
 
-         "-D", "--all-deltas", action="store_true",
 
-         help="Show all footprint deltas, positive or negative. Implies "
 
-              "--footprint-threshold=0")
 
-     parser.add_argument(
 
-         "-O", "--outdir",
 
-         default=os.path.join(os.getcwd(), "twister-out"),
 
-         help="Output directory for logs and binaries. "
 
-              "Default is 'twister-out' in the current directory. "
 
-              "This directory will be cleaned unless '--no-clean' is set. "
 
-              "The '--clobber-output' option controls what cleaning does.")
 
-     parser.add_argument(
 
-         "-c", "--clobber-output", action="store_true",
 
-         help="Cleaning the output directory will simply delete it instead "
 
-              "of the default policy of renaming.")
 
-     parser.add_argument(
 
-         "-n", "--no-clean", action="store_true",
 
-         help="Re-use the outdir before building. Will result in "
 
-              "faster compilation since builds will be incremental.")
 
-     case_select.add_argument(
 
-         "-T", "--testcase-root", action="append", default=[],
 
-         help="Base directory to recursively search for test cases. All "
 
-              "testcase.yaml files under here will be processed. May be "
 
-              "called multiple times. Defaults to the 'samples/' and "
 
-              "'tests/' directories at the base of the Zephyr tree.")
 
-     board_root_list = ["%s/boards" % ZEPHYR_BASE,
 
-                        "%s/scripts/pylib/twister/boards" % ZEPHYR_BASE]
 
-     parser.add_argument(
 
-         "-A", "--board-root", action="append", default=board_root_list,
 
-         help="""Directory to search for board configuration files. All .yaml
 
- files in the directory will be processed. The directory should have the same
 
- structure in the main Zephyr tree: boards/<arch>/<board_name>/""")
 
-     parser.add_argument(
 
-         "-z", "--size", action="append",
 
-         help="Don't run twister. Instead, produce a report to "
 
-              "stdout detailing RAM/ROM sizes on the specified filenames. "
 
-              "All other command line arguments ignored.")
 
-     parser.add_argument(
 
-         "-S", "--enable-slow", action="store_true",
 
-         help="Execute time-consuming test cases that have been marked "
 
-              "as 'slow' in testcase.yaml. Normally these are only built.")
 
-     parser.add_argument(
 
-         "-K", "--force-platform", action="store_true",
 
-         help="""Force testing on selected platforms,
 
-         even if they are excluded in the test configuration (testcase.yaml)"""
 
-     )
 
-     parser.add_argument(
 
-         "--disable-unrecognized-section-test", action="store_true",
 
-         default=False,
 
-         help="Skip the 'unrecognized section' test.")
 
-     parser.add_argument("-R", "--enable-asserts", action="store_true",
 
-                         default=True,
 
-                         help="deprecated, left for compatibility")
 
-     parser.add_argument("--disable-asserts", action="store_false",
 
-                         dest="enable_asserts",
 
-                         help="deprecated, left for compatibility")
 
-     parser.add_argument("-Q", "--error-on-deprecations", action="store_false",
 
-                         help="Error on deprecation warnings.")
 
-     parser.add_argument("--enable-size-report", action="store_true",
 
-                         help="Enable expensive computation of RAM/ROM segment sizes.")
 
-     parser.add_argument(
 
-         "-x", "--extra-args", action="append", default=[],
 
-         help="""Extra CMake cache entries to define when building test cases.
 
-         May be called multiple times. The key-value entries will be
 
-         prefixed with -D before being passed to CMake.
 
-         E.g
 
-         "twister -x=USE_CCACHE=0"
 
-         will translate to
 
-         "cmake -DUSE_CCACHE=0"
 
-         which will ultimately disable ccache.
 
-         """
 
-     )
 
-     parser.add_argument(
 
-         "--emulation-only", action="store_true",
 
-         help="Only build and run emulation platforms")
 
-     parser.add_argument(
 
-         "--device-testing", action="store_true",
 
-         help="Test on device directly. Specify the serial device to "
 
-              "use with the --device-serial option.")
 
-     parser.add_argument(
 
-         "-X", "--fixture", action="append", default=[],
 
-         help="Specify a fixture that a board might support")
 
-     serial = parser.add_mutually_exclusive_group()
 
-     serial.add_argument("--device-serial",
 
-                         help="""Serial device for accessing the board
 
-                         (e.g., /dev/ttyACM0)
 
-                         """)
 
-     serial.add_argument("--device-serial-pty",
 
-                         help="""Script for controlling pseudoterminal.
 
-                         Twister believes that it interacts with a terminal
 
-                         when it actually interacts with the script.
 
-                         E.g "twister --device-testing
 
-                         --device-serial-pty <script>
 
-                         """)
 
-     parser.add_argument("--generate-hardware-map",
 
-                         help="""Probe serial devices connected to this platform
 
-                         and create a hardware map file to be used with
 
-                         --device-testing
 
-                         """)
 
-     parser.add_argument("--persistent-hardware-map", action='store_true',
 
-                         help="""With --generate-hardware-map, tries to use
 
-                         persistent names for serial devices on platforms
 
-                         that support this feature (currently only Linux).
 
-                         """)
 
-     parser.add_argument("--hardware-map",
 
-                         help="""Load hardware map from a file. This will be used
 
-                         for testing on hardware that is listed in the file.
 
-                         """)
 
-     parser.add_argument("--pre-script",
 
-                         help="""specify a pre script. This will be executed
 
-                         before device handler open serial port and invoke runner.
 
-                         """)
 
-     parser.add_argument(
 
-         "--west-flash", nargs='?', const=[],
 
-         help="""Uses west instead of ninja or make to flash when running with
 
-              --device-testing. Supports comma-separated argument list.
 
-         E.g "twister --device-testing --device-serial /dev/ttyACM0
 
-                          --west-flash="--board-id=foobar,--erase"
 
-         will translate to "west flash -- --board-id=foobar --erase"
 
-         NOTE: device-testing must be enabled to use this option.
 
-         """
 
-     )
 
-     parser.add_argument(
 
-         "--west-runner",
 
-         help="""Uses the specified west runner instead of default when running
 
-              with --west-flash.
 
-         E.g "twister --device-testing --device-serial /dev/ttyACM0
 
-                          --west-flash --west-runner=pyocd"
 
-         will translate to "west flash --runner pyocd"
 
-         NOTE: west-flash must be enabled to use this option.
 
-         """
 
-     )
 
-     valgrind_asan_group = parser.add_mutually_exclusive_group()
 
-     valgrind_asan_group.add_argument(
 
-         "--enable-valgrind", action="store_true",
 
-         help="""Run binary through valgrind and check for several memory access
 
-         errors. Valgrind needs to be installed on the host. This option only
 
-         works with host binaries such as those generated for the native_posix
 
-         configuration and is mutual exclusive with --enable-asan.
 
-         """)
 
-     valgrind_asan_group.add_argument(
 
-         "--enable-asan", action="store_true",
 
-         help="""Enable address sanitizer to check for several memory access
 
-         errors. Libasan needs to be installed on the host. This option only
 
-         works with host binaries such as those generated for the native_posix
 
-         configuration and is mutual exclusive with --enable-valgrind.
 
-         """)
 
-     parser.add_argument(
 
-         "--enable-lsan", action="store_true",
 
-         help="""Enable leak sanitizer to check for heap memory leaks.
 
-         Libasan needs to be installed on the host. This option only
 
-         works with host binaries such as those generated for the native_posix
 
-         configuration and when --enable-asan is given.
 
-         """)
 
-     parser.add_argument(
 
-         "--enable-ubsan", action="store_true",
 
-         help="""Enable undefined behavior sanitizer to check for undefined
 
-         behaviour during program execution. It uses an optional runtime library
 
-         to provide better error diagnostics. This option only works with host
 
-         binaries such as those generated for the native_posix configuration.
 
-         """)
 
-     parser.add_argument("--enable-coverage", action="store_true",
 
-                         help="Enable code coverage using gcov.")
 
-     parser.add_argument("-C", "--coverage", action="store_true",
 
-                         help="Generate coverage reports. Implies "
 
-                              "--enable-coverage.")
 
-     parser.add_argument("--coverage-platform", action="append", default=[],
 
-                         help="Plarforms to run coverage reports on. "
 
-                              "This option may be used multiple times. "
 
-                              "Default to what was selected with --platform.")
 
-     parser.add_argument("--gcov-tool", default=None,
 
-                         help="Path to the gcov tool to use for code coverage "
 
-                              "reports")
 
-     parser.add_argument("--coverage-tool", choices=['lcov', 'gcovr'], default='lcov',
 
-                         help="Tool to use to generate coverage report.")
 
-     parser.add_argument("--coverage-basedir", default=ZEPHYR_BASE,
 
-                         help="Base source directory for coverage report.")
 
-     return parser.parse_args()
 
- def main():
 
-     start_time = time.time()
 
-     options = parse_arguments()
 
-     previous_results = None
 
-     # Cleanup
 
-     if options.no_clean or options.only_failed or options.test_only:
 
-         if os.path.exists(options.outdir):
 
-             print("Keeping artifacts untouched")
 
-     elif options.last_metrics:
 
-         ls = os.path.join(options.outdir, "twister.csv")
 
-         if os.path.exists(ls):
 
-             with open(ls, "r") as fp:
 
-                 previous_results = fp.read()
 
-         else:
 
-             sys.exit(f"Can't compare metrics with non existing file {ls}")
 
-     elif os.path.exists(options.outdir):
 
-         if options.clobber_output:
 
-             print("Deleting output directory {}".format(options.outdir))
 
-             shutil.rmtree(options.outdir)
 
-         else:
 
-             for i in range(1, 100):
 
-                 new_out = options.outdir + ".{}".format(i)
 
-                 if not os.path.exists(new_out):
 
-                     print("Renaming output directory to {}".format(new_out))
 
-                     shutil.move(options.outdir, new_out)
 
-                     break
 
-     previous_results_file = None
 
-     os.makedirs(options.outdir, exist_ok=True)
 
-     if options.last_metrics and previous_results:
 
-         previous_results_file = os.path.join(options.outdir, "baseline.csv")
 
-         with open(previous_results_file, "w") as fp:
 
-             fp.write(previous_results)
 
-     # create file handler which logs even debug messages
 
-     if options.log_file:
 
-         fh = logging.FileHandler(options.log_file)
 
-     else:
 
-         fh = logging.FileHandler(os.path.join(options.outdir, "twister.log"))
 
-     fh.setLevel(logging.DEBUG)
 
-     # create console handler with a higher log level
 
-     ch = logging.StreamHandler()
 
-     VERBOSE = options.verbose
 
-     if VERBOSE > 1:
 
-         ch.setLevel(logging.DEBUG)
 
-     else:
 
-         ch.setLevel(logging.INFO)
 
-     # create formatter and add it to the handlers
 
-     if options.timestamps:
 
-         formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
 
-     else:
 
-         formatter = logging.Formatter('%(levelname)-7s - %(message)s')
 
-     formatter_file = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 
-     ch.setFormatter(formatter)
 
-     fh.setFormatter(formatter_file)
 
-     # add the handlers to logger
 
-     logger.addHandler(ch)
 
-     logger.addHandler(fh)
 
-     hwm = HardwareMap()
 
-     if options.generate_hardware_map:
 
-         hwm.scan(persistent=options.persistent_hardware_map)
 
-         hwm.save(options.generate_hardware_map)
 
-         return
 
-     if not options.device_testing and options.hardware_map:
 
-         hwm.load(options.hardware_map)
 
-         logger.info("Available devices:")
 
-         table = []
 
-         hwm.dump(connected_only=True)
 
-         return
 
-     if options.west_runner and options.west_flash is None:
 
-         logger.error("west-runner requires west-flash to be enabled")
 
-         sys.exit(1)
 
-     if options.west_flash and not options.device_testing:
 
-         logger.error("west-flash requires device-testing to be enabled")
 
-         sys.exit(1)
 
-     if options.coverage:
 
-         options.enable_coverage = True
 
-     if not options.coverage_platform:
 
-         options.coverage_platform = options.platform
 
-     if options.size:
 
-         for fn in options.size:
 
-             size_report(SizeCalculator(fn, []))
 
-         sys.exit(0)
 
-     if options.subset:
 
-         subset, sets = options.subset.split("/")
 
-         if int(subset) > 0 and int(sets) >= int(subset):
 
-             logger.info("Running only a subset: %s/%s" % (subset, sets))
 
-         else:
 
-             logger.error("You have provided a wrong subset value: %s." % options.subset)
 
-             return
 
-     if not options.testcase_root:
 
-         options.testcase_root = [os.path.join(ZEPHYR_BASE, "tests"),
 
-                                  os.path.join(ZEPHYR_BASE, "samples")]
 
-     if options.show_footprint or options.compare_report or options.release:
 
-         options.enable_size_report = True
 
-     suite = TestSuite(options.board_root, options.testcase_root, options.outdir)
 
-     # Check version of zephyr repo
 
-     suite.check_zephyr_version()
 
-     # Set testsuite options from command line.
 
-     suite.build_only = options.build_only
 
-     suite.cmake_only = options.cmake_only
 
-     suite.cleanup = options.runtime_artifact_cleanup
 
-     suite.test_only = options.test_only
 
-     suite.enable_slow = options.enable_slow
 
-     suite.device_testing = options.device_testing
 
-     suite.fixtures = options.fixture
 
-     suite.enable_asan = options.enable_asan
 
-     suite.enable_lsan = options.enable_lsan
 
-     suite.enable_ubsan = options.enable_ubsan
 
-     suite.enable_coverage = options.enable_coverage
 
-     suite.enable_valgrind = options.enable_valgrind
 
-     suite.coverage_platform = options.coverage_platform
 
-     suite.inline_logs = options.inline_logs
 
-     suite.enable_size_report = options.enable_size_report
 
-     suite.extra_args = options.extra_args
 
-     suite.west_flash = options.west_flash
 
-     suite.west_runner = options.west_runner
 
-     suite.verbose = VERBOSE
 
-     suite.warnings_as_errors = not options.disable_warnings_as_errors
 
-     suite.integration = options.integration
 
-     suite.overflow_as_errors = options.overflow_as_errors
 
-     if options.ninja:
 
-         suite.generator_cmd = "ninja"
 
-         suite.generator = "Ninja"
 
-     else:
 
-         suite.generator_cmd = "make"
 
-         suite.generator = "Unix Makefiles"
 
-     # Set number of jobs
 
-     if options.jobs:
 
-         suite.jobs = options.jobs
 
-     elif options.build_only:
 
-         suite.jobs = multiprocessing.cpu_count() * 2
 
-     else:
 
-         suite.jobs = multiprocessing.cpu_count()
 
-     logger.info("JOBS: %d" % suite.jobs)
 
-     run_individual_tests = []
 
-     if options.test:
 
-         run_individual_tests = options.test
 
-     num = suite.add_testcases(testcase_filter=run_individual_tests)
 
-     if num == 0:
 
-         logger.error("No test cases found at the specified location...")
 
-         sys.exit(1)
 
-     suite.add_configurations()
 
-     if options.device_testing:
 
-         if options.hardware_map:
 
-             hwm.load(options.hardware_map)
 
-             suite.duts = hwm.duts
 
-             if not options.platform:
 
-                 options.platform = []
 
-                 for d in hwm.duts:
 
-                     if d.connected:
 
-                         options.platform.append(d.platform)
 
-         elif options.device_serial or options.device_serial_pty:
 
-             if options.platform and len(options.platform) == 1:
 
-                 if options.device_serial:
 
-                     hwm.add_device(options.device_serial,
 
-                                                  options.platform[0],
 
-                                                  options.pre_script,
 
-                                                  False)
 
-                 else:
 
-                     hwm.add_device(options.device_serial_pty,
 
-                                                  options.platform[0],
 
-                                                  options.pre_script,
 
-                                                  True)
 
-                 suite.duts = hwm.duts
 
-             else:
 
-                 logger.error("""When --device-testing is used with
 
-                              --device-serial or --device-serial-pty,
 
-                              only one platform is allowed""")
 
-     if suite.load_errors:
 
-         sys.exit(1)
 
-     if options.list_tags:
 
-         tags = set()
 
-         for _, tc in suite.testcases.items():
 
-             tags = tags.union(tc.tags)
 
-         for t in tags:
 
-             print("- {}".format(t))
 
-         return
 
-     if not options.platform and (options.list_tests or options.test_tree or options.list_test_duplicates \
 
-         or options.sub_test or options.export_tests):
 
-         cnt = 0
 
-         all_tests = suite.get_all_tests()
 
-         if options.export_tests:
 
-             export_tests(options.export_tests, all_tests)
 
-             return
 
-         if options.list_test_duplicates:
 
-             import collections
 
-             dupes = [item for item, count in collections.Counter(all_tests).items() if count > 1]
 
-             if dupes:
 
-                 print("Tests with duplicate identifiers:")
 
-                 for dupe in dupes:
 
-                     print("- {}".format(dupe))
 
-                     for dc in suite.get_testcase(dupe):
 
-                         print("  - {}".format(dc))
 
-             else:
 
-                 print("No duplicates found.")
 
-             return
 
-         if options.sub_test:
 
-             for st in options.sub_test:
 
-                 subtests = suite.get_testcase(st)
 
-                 for sti in subtests:
 
-                     run_individual_tests.append(sti.name)
 
-             if run_individual_tests:
 
-                 logger.info("Running the following tests:")
 
-                 for test in run_individual_tests:
 
-                     print(" - {}".format(test))
 
-             else:
 
-                 logger.info("Tests not found")
 
-                 return
 
-         elif options.list_tests or options.test_tree:
 
-             if options.test_tree:
 
-                 testsuite = Node("Testsuite")
 
-                 samples = Node("Samples", parent=testsuite)
 
-                 tests = Node("Tests", parent=testsuite)
 
-             for test in sorted(all_tests):
 
-                 cnt = cnt + 1
 
-                 if options.list_tests:
 
-                     print(" - {}".format(test))
 
-                 if options.test_tree:
 
-                     if test.startswith("sample."):
 
-                         sec = test.split(".")
 
-                         area = find(samples, lambda node: node.name == sec[1] and node.parent == samples)
 
-                         if not area:
 
-                             area = Node(sec[1], parent=samples)
 
-                         t = Node(test, parent=area)
 
-                     else:
 
-                         sec = test.split(".")
 
-                         area = find(tests, lambda node: node.name == sec[0] and node.parent == tests)
 
-                         if not area:
 
-                             area = Node(sec[0], parent=tests)
 
-                         if area and len(sec) > 2:
 
-                             subarea = find(area, lambda node: node.name == sec[1] and node.parent == area)
 
-                             if not subarea:
 
-                                 subarea = Node(sec[1], parent=area)
 
-                             t = Node(test, parent=subarea)
 
-             if options.list_tests:
 
-                 print("{} total.".format(cnt))
 
-             if options.test_tree:
 
-                 for pre, _, node in RenderTree(testsuite):
 
-                     print("%s%s" % (pre, node.name))
 
-             return
 
-     discards = []
 
-     if options.report_suffix:
 
-         last_run = os.path.join(options.outdir, "twister_{}.csv".format(options.report_suffix))
 
-     else:
 
-         last_run = os.path.join(options.outdir, "twister.csv")
 
-     if options.quarantine_list:
 
-         suite.load_quarantine(options.quarantine_list)
 
-     if options.quarantine_verify:
 
-         if not options.quarantine_list:
 
-             logger.error("No quarantine list given to be verified")
 
-             sys.exit(1)
 
-         suite.quarantine_verify = options.quarantine_verify
 
-     if options.only_failed:
 
-         suite.load_from_file(last_run, filter_status=['skipped', 'passed'])
 
-         suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
 
-     elif options.load_tests:
 
-         suite.load_from_file(options.load_tests, filter_status=['skipped', 'error'])
 
-         suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
 
-     elif options.test_only:
 
-         # Get list of connected hardware and filter tests to only be run on connected hardware
 
-         # in cases where no platform was specified when runn the tests.
 
-         # If the platform does not exist in the hardware map, just skip it.
 
-         connected_list = []
 
-         if not options.platform:
 
-             for connected in hwm.connected_hardware:
 
-                 if connected['connected']:
 
-                     connected_list.append(connected['platform'])
 
-         suite.load_from_file(last_run, filter_status=['skipped', 'error'],
 
-                              filter_platform=connected_list)
 
-         suite.selected_platforms = set(p.platform.name for p in suite.instances.values())
 
-     else:
 
-         discards = suite.apply_filters(
 
-             enable_slow=options.enable_slow,
 
-             platform=options.platform,
 
-             exclude_platform=options.exclude_platform,
 
-             arch=options.arch,
 
-             tag=options.tag,
 
-             exclude_tag=options.exclude_tag,
 
-             force_toolchain=options.force_toolchain,
 
-             all=options.all,
 
-             emulation_only=options.emulation_only,
 
-             run_individual_tests=run_individual_tests,
 
-             runnable=(options.device_testing or options.filter == 'runnable'),
 
-             force_platform=options.force_platform
 
-         )
 
-     if (options.export_tests or options.list_tests) and options.platform:
 
-         if len(options.platform) > 1:
 
-             logger.error("When exporting tests, only one platform "
 
-                          "should be specified.")
 
-             return
 
-         for p in options.platform:
 
-             inst = suite.get_platform_instances(p)
 
-             if options.export_tests:
 
-                 tests = [x.testcase.cases for x in inst.values()]
 
-                 merged = list(itertools.chain(*tests))
 
-                 export_tests(options.export_tests, merged)
 
-                 return
 
-             count = 0
 
-             for i in inst.values():
 
-                 for c in i.testcase.cases:
 
-                     print(f"- {c}")
 
-                     count += 1
 
-             print(f"Tests found: {count}")
 
-         return
 
-     if VERBOSE > 1 and discards:
 
-         # if we are using command line platform filter, no need to list every
 
-         # other platform as excluded, we know that already.
 
-         # Show only the discards that apply to the selected platforms on the
 
-         # command line
 
-         for i, reason in discards.items():
 
-             if options.platform and i.platform.name not in options.platform:
 
-                 continue
 
-             logger.debug(
 
-                 "{:<25} {:<50} {}SKIPPED{}: {}".format(
 
-                     i.platform.name,
 
-                     i.testcase.name,
 
-                     Fore.YELLOW,
 
-                     Fore.RESET,
 
-                     reason))
 
-     if options.report_excluded:
 
-         all_tests = suite.get_all_tests()
 
-         to_be_run = set()
 
-         for i, p in suite.instances.items():
 
-             to_be_run.update(p.testcase.cases)
 
-         if all_tests - to_be_run:
 
-             print("Tests that never build or run:")
 
-             for not_run in all_tests - to_be_run:
 
-                 print("- {}".format(not_run))
 
-         return
 
-     if options.subset:
 
-         # Test instances are sorted depending on the context. For CI runs
 
-         # the execution order is: "plat1-testA, plat1-testB, ...,
 
-         # plat1-testZ, plat2-testA, ...". For hardware tests
 
-         # (device_testing), were multiple physical platforms can run the tests
 
-         # in parallel, it is more efficient to run in the order:
 
-         # "plat1-testA, plat2-testA, ..., plat1-testB, plat2-testB, ..."
 
-         if options.device_testing:
 
-             suite.instances = OrderedDict(sorted(suite.instances.items(),
 
-                                 key=lambda x: x[0][x[0].find("/") + 1:]))
 
-         else:
 
-             suite.instances = OrderedDict(sorted(suite.instances.items()))
 
-         # Do calculation based on what is actually going to be run and evaluated
 
-         # at runtime, ignore the cases we already know going to be skipped.
 
-         # This fixes an issue where some sets would get majority of skips and
 
-         # basically run nothing beside filtering.
 
-         to_run = {k : v for k,v in suite.instances.items() if v.status is None}
 
-         subset, sets = options.subset.split("/")
 
-         subset = int(subset)
 
-         sets = int(sets)
 
-         total = len(to_run)
 
-         per_set = int(total / sets)
 
-         num_extra_sets = total - (per_set * sets)
 
-         # Try and be more fair for rounding error with integer division
 
-         # so the last subset doesn't get overloaded, we add 1 extra to
 
-         # subsets 1..num_extra_sets.
 
-         if subset <= num_extra_sets:
 
-             start = (subset - 1) * (per_set + 1)
 
-             end = start + per_set + 1
 
-         else:
 
-             base = num_extra_sets * (per_set + 1)
 
-             start = ((subset - num_extra_sets - 1) * per_set) + base
 
-             end = start + per_set
 
-         sliced_instances = islice(to_run.items(), start, end)
 
-         skipped = {k : v for k,v in suite.instances.items() if v.status == 'skipped'}
 
-         suite.instances = OrderedDict(sliced_instances)
 
-         if subset == 1:
 
-             # add all pre-filtered tests that are skipped to the first set to
 
-             # allow for better distribution among all sets.
 
-             suite.instances.update(skipped)
 
-     if options.save_tests:
 
-         suite.csv_report(options.save_tests)
 
-         return
 
-     logger.info("%d test scenarios (%d configurations) selected, %d configurations discarded due to filters." %
 
-                 (len(suite.testcases), len(suite.instances), len(discards)))
 
-     if options.device_testing and not options.build_only:
 
-         print("\nDevice testing on:")
 
-         hwm.dump(filtered=suite.selected_platforms)
 
-         print("")
 
-     if options.dry_run:
 
-         duration = time.time() - start_time
 
-         logger.info("Completed in %d seconds" % (duration))
 
-         return
 
-     retries = options.retry_failed + 1
 
-     completed = 0
 
-     BaseManager.register('LifoQueue', queue.LifoQueue)
 
-     manager = BaseManager()
 
-     manager.start()
 
-     results = ExecutionCounter(total=len(suite.instances))
 
-     pipeline = manager.LifoQueue()
 
-     done_queue = manager.LifoQueue()
 
-     suite.update_counting(results, initial=True)
 
-     suite.start_time = start_time
 
-     while True:
 
-         completed += 1
 
-         if completed > 1:
 
-             logger.info("%d Iteration:" % (completed))
 
-             time.sleep(options.retry_interval)  # waiting for the system to settle down
 
-             results.done = results.total - results.failed
 
-             results.failed = results.error
 
-         results = suite.execute(pipeline, done_queue, results)
 
-         while True:
 
-             try:
 
-                 inst = done_queue.get_nowait()
 
-             except queue.Empty:
 
-                 break
 
-             else:
 
-                 inst.metrics.update(suite.instances[inst.name].metrics)
 
-                 inst.metrics["handler_time"] = inst.handler.duration if inst.handler else 0
 
-                 inst.metrics["unrecognized"] = []
 
-                 suite.instances[inst.name] = inst
 
-         print("")
 
-         retries = retries - 1
 
-         # There are cases where failed == error (only build failures),
 
-         # we do not try build failures.
 
-         if retries == 0 or results.failed == results.error:
 
-             break
 
-     # figure out which report to use for size comparison
 
-     if options.compare_report:
 
-         report_to_use = options.compare_report
 
-     elif options.last_metrics:
 
-         report_to_use = previous_results_file
 
-     else:
 
-         report_to_use = suite.RELEASE_DATA
 
-     suite.footprint_reports(report_to_use,
 
-                             options.show_footprint,
 
-                             options.all_deltas,
 
-                             options.footprint_threshold,
 
-                             options.last_metrics)
 
-     suite.duration = time.time() - start_time
 
-     suite.update_counting(results)
 
-     suite.summary(results, options.disable_unrecognized_section_test)
 
-     if options.coverage:
 
-         if not options.gcov_tool:
 
-             use_system_gcov = False
 
-             for plat in options.coverage_platform:
 
-                 ts_plat = suite.get_platform(plat)
 
-                 if ts_plat and (ts_plat.type in {"native", "unit"}):
 
-                     use_system_gcov = True
 
-             if use_system_gcov or "ZEPHYR_SDK_INSTALL_DIR" not in os.environ:
 
-                 options.gcov_tool = "gcov"
 
-             else:
 
-                 options.gcov_tool = os.path.join(os.environ["ZEPHYR_SDK_INSTALL_DIR"],
 
-                                                  "x86_64-zephyr-elf/bin/x86_64-zephyr-elf-gcov")
 
-         logger.info("Generating coverage files...")
 
-         coverage_tool = CoverageTool.factory(options.coverage_tool)
 
-         coverage_tool.gcov_tool = options.gcov_tool
 
-         coverage_tool.base_dir = os.path.abspath(options.coverage_basedir)
 
-         coverage_tool.add_ignore_file('generated')
 
-         coverage_tool.add_ignore_directory('tests')
 
-         coverage_tool.add_ignore_directory('samples')
 
-         coverage_tool.generate(options.outdir)
 
-     if options.device_testing and not options.build_only:
 
-         print("\nHardware distribution summary:\n")
 
-         table = []
 
-         header = ['Board', 'ID', 'Counter']
 
-         for d in hwm.duts:
 
-             if d.connected and d.platform in suite.selected_platforms:
 
-                 row = [d.platform, d.id, d.counter]
 
-                 table.append(row)
 
-         print(tabulate(table, headers=header, tablefmt="github"))
 
-     suite.save_reports(options.report_name,
 
-                        options.report_suffix,
 
-                        options.report_dir,
 
-                        options.no_update,
 
-                        options.release,
 
-                        options.only_failed,
 
-                        options.platform_reports,
 
-                        options.json_report
 
-                        )
 
-     # FIXME: remove later
 
-     #logger.info(f"failed: {results.failed}, cases: {results.cases}, skipped configurations: {results.skipped_configs}, skipped_cases: {results.skipped_cases}, skipped(runtime): {results.skipped_runtime}, passed: {results.passed}, total: {results.total}, done: {results.done}")
 
-     logger.info("Run completed")
 
-     if results.failed or (suite.warnings and options.warnings_as_errors):
 
-         sys.exit(1)
 
- if __name__ == "__main__":
 
-     try:
 
-         main()
 
-     finally:
 
-         if os.isatty(1): # stdout is interactive
 
-             os.system("stty sane")
 
 
  |