test_testinstance.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2020 Intel Corporation
  3. #
  4. # SPDX-License-Identifier: Apache-2.0
  5. # pylint: disable=line-too-long
  6. """
  7. Tests for testinstance class
  8. """
  9. import os
  10. import sys
  11. import pytest
  12. ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
  13. sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
  14. from twisterlib import TestInstance, BuildError, TestCase, TwisterException
  15. TESTDATA_1 = [
  16. (False, False, "console", "na", "qemu", False, [], (False, True)),
  17. (False, False, "console", "native", "qemu", False, [], (False, True)),
  18. (True, False, "console", "native", "nsim", False, [], (True, False)),
  19. (True, True, "console", "native", "renode", False, [], (True, False)),
  20. (False, False, "sensor", "native", "", False, [], (True, False)),
  21. (False, False, "sensor", "na", "", False, [], (True, False)),
  22. (False, True, "sensor", "native", "", True, [], (True, False)),
  23. ]
  24. @pytest.mark.parametrize("build_only, slow, harness, platform_type, platform_sim, device_testing,fixture, expected", TESTDATA_1)
  25. def test_check_build_or_run(class_testsuite, monkeypatch, all_testcases_dict, platforms_list, build_only, slow, harness, platform_type, platform_sim, device_testing, fixture, expected):
  26. """" Test to check the conditions for build_only and run scenarios
  27. Scenario 1: Test when different parameters are passed, build_only and run are set correctly
  28. Sceanrio 2: Test if build_only is enabled when the OS is Windows"""
  29. class_testsuite.testcases = all_testcases_dict
  30. testcase = class_testsuite.testcases.get('scripts/tests/twister/test_data/testcases/tests/test_a/test_a.check_1')
  31. class_testsuite.platforms = platforms_list
  32. platform = class_testsuite.get_platform("demo_board_2")
  33. platform.type = platform_type
  34. platform.simulation = platform_sim
  35. testcase.harness = harness
  36. testcase.build_only = build_only
  37. testcase.slow = slow
  38. testinstance = TestInstance(testcase, platform, class_testsuite.outdir)
  39. run = testinstance.check_runnable(slow, device_testing, fixture)
  40. _, r = expected
  41. assert run == r
  42. monkeypatch.setattr("os.name", "nt")
  43. run = testinstance.check_runnable()
  44. assert not run
  45. TESTDATA_2 = [
  46. (True, True, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y\nCONFIG_UBSAN=y'),
  47. (True, False, True, ["demo_board_2"], "native", '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y\nCONFIG_ASAN=y'),
  48. (False, False, True, ["demo_board_2"], 'native', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
  49. (True, False, True, ["demo_board_2"], 'mcu', '\nCONFIG_COVERAGE=y\nCONFIG_COVERAGE_DUMP=y'),
  50. (False, False, False, ["demo_board_2"], 'native', ''),
  51. (False, False, True, ['demo_board_1'], 'native', ''),
  52. (True, False, False, ["demo_board_2"], 'native', '\nCONFIG_ASAN=y'),
  53. (False, True, False, ["demo_board_2"], 'native', '\nCONFIG_UBSAN=y'),
  54. ]
  55. @pytest.mark.parametrize("enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2)
  56. def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, enable_asan, enable_ubsan, enable_coverage, coverage_platform, platform_type, expected_content):
  57. """Test correct content is written to testcase_extra.conf based on if conditions
  58. TO DO: Add extra_configs to the input list"""
  59. class_testsuite.testcases = all_testcases_dict
  60. testcase = class_testsuite.testcases.get('scripts/tests/twister/test_data/testcases/samples/test_app/sample_test.app')
  61. class_testsuite.platforms = platforms_list
  62. platform = class_testsuite.get_platform("demo_board_2")
  63. testinstance = TestInstance(testcase, platform, class_testsuite.outdir)
  64. platform.type = platform_type
  65. assert testinstance.create_overlay(platform, enable_asan, enable_ubsan, enable_coverage, coverage_platform) == expected_content
  66. def test_calculate_sizes(class_testsuite, all_testcases_dict, platforms_list):
  67. """ Test Calculate sizes method for zephyr elf"""
  68. class_testsuite.testcases = all_testcases_dict
  69. testcase = class_testsuite.testcases.get('scripts/tests/twister/test_data/testcases/samples/test_app/sample_test.app')
  70. class_testsuite.platforms = platforms_list
  71. platform = class_testsuite.get_platform("demo_board_2")
  72. testinstance = TestInstance(testcase, platform, class_testsuite.outdir)
  73. with pytest.raises(BuildError):
  74. assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
  75. TESTDATA_3 = [
  76. (ZEPHYR_BASE + '/scripts/tests/twister/test_data/testcases', ZEPHYR_BASE, '/scripts/tests/twister/test_data/testcases/tests/test_a/test_a.check_1', '/scripts/tests/twister/test_data/testcases/tests/test_a/test_a.check_1'),
  77. (ZEPHYR_BASE, '.', 'test_a.check_1', 'test_a.check_1'),
  78. (ZEPHYR_BASE, '/scripts/tests/twister/test_data/testcases/test_b', 'test_b.check_1', '/scripts/tests/twister/test_data/testcases/test_b/test_b.check_1'),
  79. (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', 'test_b.check_1', 'test_b.check_1'),
  80. (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '.', '.'),
  81. (ZEPHYR_BASE, '.', 'test_a.check_1.check_2', 'test_a.check_1.check_2'),
  82. ]
  83. @pytest.mark.parametrize("testcase_root, workdir, name, expected", TESTDATA_3)
  84. def test_get_unique(testcase_root, workdir, name, expected):
  85. '''Test to check if the unique name is given for each testcase root and workdir'''
  86. unique = TestCase(testcase_root, workdir, name)
  87. assert unique.name == expected
  88. TESTDATA_4 = [
  89. (ZEPHYR_BASE, '.', 'test_c', 'Tests should reference the category and subsystem with a dot as a separator.'),
  90. (os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '', 'Tests should reference the category and subsystem with a dot as a separator.'),
  91. ]
  92. @pytest.mark.parametrize("testcase_root, workdir, name, exception", TESTDATA_4)
  93. def test_get_unique_exception(testcase_root, workdir, name, exception):
  94. '''Test to check if tests reference the category and subsystem with a dot as a separator'''
  95. with pytest.raises(TwisterException):
  96. unique = TestCase(testcase_root, workdir, name)
  97. assert unique == exception
  98. TESTDATA_5 = [
  99. ("testcases/tests/test_ztest.c", None, ['a', 'c', 'unit_a', 'newline', 'test_test_aa', 'user', 'last']),
  100. ("testcases/tests/test_a/test_ztest_error.c", "Found a test that does not start with test_", ['1a', '1c', '2a', '2b']),
  101. ("testcases/tests/test_a/test_ztest_error_1.c", "found invalid #ifdef, #endif in ztest_test_suite()", ['unit_1a', 'unit_1b', 'Unit_1c']),
  102. ]
  103. @pytest.mark.parametrize("test_file, expected_warnings, expected_subcases", TESTDATA_5)
  104. def test_scan_file(test_data, test_file, expected_warnings, expected_subcases):
  105. '''Testing scan_file method with different ztest files for warnings and results'''
  106. testcase = TestCase("/scripts/tests/twister/test_data/testcases/tests", ".", "test_a.check_1")
  107. results, warnings = testcase.scan_file(os.path.join(test_data, test_file))
  108. assert sorted(results) == sorted(expected_subcases)
  109. assert warnings == expected_warnings
  110. TESTDATA_6 = [
  111. ("testcases/tests", ['a', 'c', 'unit_a', 'newline', 'test_test_aa', 'user', 'last']),
  112. ("testcases/tests/test_a", ['unit_1a', 'unit_1b', 'Unit_1c', '1a', '1c', '2a', '2b']),
  113. ]
  114. @pytest.mark.parametrize("test_path, expected_subcases", TESTDATA_6)
  115. def test_subcases(test_data, test_path, expected_subcases):
  116. '''Testing scan path and parse subcases methods for expected subcases'''
  117. testcase = TestCase("/scripts/tests/twister/test_data/testcases/tests", ".", "test_a.check_1")
  118. subcases = testcase.scan_path(os.path.join(test_data, test_path))
  119. assert sorted(subcases) == sorted(expected_subcases)
  120. testcase.id = "test_id"
  121. testcase.parse_subcases(test_data + test_path)
  122. assert sorted(testcase.cases) == [testcase.id + '.' + x for x in sorted(expected_subcases)]