Skip to content

Commit 5f04481

Browse files
committed
twister: remove duplicated code. Simplify logic for test "runnability".
There is no point in executing check_runnable() twice: when loading test data from yaml and when filters are applied during test plan creation. The check is left only in the latter case. "tfilter" variable is removed since the same logic can be preserved without it by directly using the "runnable" one. Checks for harness and fixtures are encapsulated as individual methods. Now checking fixtures both from cli and hwmap happens in a single place. The check for runnability is tangled with apply_filter() method. This commit makes the apply_filter() to be executed for all twisters workflows to improve consitency of the results. Before, e.g. the filters were not applied when --test-only was used. This would cause errors if tests are first build with --build-only on one setup and then ported to another setup with different configuration, where some requirements (e.g. fixture) are not met. The changes are followed with an update to twister unit tests to reflect the modifications. Signed-off-by: Maciej Perkowski <[email protected]>
1 parent 4f75848 commit 5f04481

File tree

4 files changed

+82
-57
lines changed

4 files changed

+82
-57
lines changed

scripts/pylib/twister/twisterlib/testinstance.py

Lines changed: 62 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -119,19 +119,39 @@ def get_case_or_create(self, name):
119119
self.testcases.append(tc)
120120
return tc
121121

122-
@staticmethod
123-
def testsuite_runnable(testsuite, fixtures):
124-
can_run = False
122+
def is_fixture_available(self, fixtures_cli=None, duts_from_hwmap=None):
123+
fixture_status = False # if true: test can be run on a given configuration
124+
verdict = "NA"
125+
fixture_required = self.testsuite.harness_config.get('fixture')
126+
127+
if fixture_required:
128+
verdict = f"Fixture {fixture_required} is not available."
129+
# There are 2 places were available fixtures can be defined: from CLI and in hw map
130+
# Fixtures from CLI applies to all devices
131+
if fixture_required in fixtures_cli:
132+
fixture_status = True
133+
verdict = f"Fixture {fixture_required} is available."
134+
# If fixture was not given in CLI but a hw map was provided we check if it is available in the hw map
135+
elif duts_from_hwmap:
136+
for h in duts_from_hwmap:
137+
if h.platform == self.platform.name:
138+
if fixture_required in h.fixtures:
139+
fixture_status = True
140+
verdict = f"Fixture {fixture_required} is available."
141+
else:
142+
fixture_status = True
143+
verdict = "No fixture required"
144+
return fixture_status, verdict
145+
146+
def is_harness_supported(self):
147+
harness_supported = False
148+
verdict = f"Harness {self.testsuite.harness} is not supported."
125149
# console harness allows us to run the test and capture data.
126-
if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test']:
127-
can_run = True
128-
# if we have a fixture that is also being supplied on the
129-
# command-line, then we need to run the test, not just build it.
130-
fixture = testsuite.harness_config.get('fixture')
131-
if fixture:
132-
can_run = (fixture in fixtures)
150+
if self.testsuite.harness in [ 'console', 'ztest', 'pytest', 'test']:
151+
harness_supported = True
152+
verdict = f"Harness {self.testsuite.harness} is supported."
133153

134-
return can_run
154+
return harness_supported, verdict
135155

136156
def setup_handler(self, env):
137157
if self.handler:
@@ -182,45 +202,61 @@ def setup_handler(self, env):
182202
self.handler = handler
183203

184204
# Global testsuite parameters
185-
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
186-
205+
def check_runnable(self, enable_slow=False, fixtures_cli=None, duts_from_hwmap=None):
206+
verdict = "NA"
187207
# running on simulators is currently not supported on Windows
188208
if os.name == 'nt' and self.platform.simulation != 'na':
189-
return False
209+
verdict = "Simulators not supported on Windows"
210+
return False, verdict
190211

191212
# we asked for build-only on the command line
192213
if self.testsuite.build_only:
193-
return False
214+
verdict = "test is marked as build-only."
215+
return False, verdict
194216

195217
# Do not run slow tests:
196218
skip_slow = self.testsuite.slow and not enable_slow
197219
if skip_slow:
198-
return False
220+
verdict = "test is marked as slow."
221+
return False, verdict
222+
223+
harness_supported, verdict = self.is_harness_supported()
224+
if not harness_supported:
225+
return False, verdict
226+
227+
fixture_runnable, verdict = self.is_fixture_available(fixtures_cli, duts_from_hwmap)
228+
if not fixture_runnable:
229+
return False, verdict
199230

200231
target_ready = bool(self.testsuite.type == "unit" or \
201-
self.platform.type == "native" or \
202-
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
203-
filter == 'runnable')
232+
self.platform.type in ["native", "mcu"] or \
233+
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"])
234+
235+
if not target_ready:
236+
verdict = "target type not supported."
237+
return False, verdict
204238

205239
if self.platform.simulation == "nsim":
206240
if not shutil.which("nsimdrv"):
207-
target_ready = False
241+
verdict = "nsimdrv not found."
242+
return False, verdict
208243

209244
if self.platform.simulation == "mdb-nsim":
210245
if not shutil.which("mdb"):
211-
target_ready = False
246+
verdict = "mdb not found."
247+
return False, verdict
212248

213249
if self.platform.simulation == "renode":
214250
if not shutil.which("renode"):
215-
target_ready = False
251+
verdict = "renode not found."
252+
return False, verdict
216253

217254
if self.platform.simulation == "tsim":
218255
if not shutil.which("tsim-leon3"):
219-
target_ready = False
220-
221-
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
256+
verdict = "tsim-leon3 not found."
257+
return False, verdict
222258

223-
return testsuite_runnable and target_ready
259+
return target_ready, verdict
224260

225261
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
226262
# Create this in a "twister/" subdirectory otherwise this

scripts/pylib/twister/twisterlib/testplan.py

Lines changed: 16 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -162,8 +162,8 @@ def load(self):
162162

163163
self.load_from_file(last_run, filter_platform=connected_list)
164164
self.selected_platforms = set(p.platform.name for p in self.instances.values())
165-
else:
166-
self.apply_filters()
165+
166+
self.apply_filters()
167167

168168
if self.options.subset:
169169
s = self.options.subset
@@ -505,16 +505,6 @@ def load_from_file(self, file, filter_platform=[]):
505505
if ts.get("run_id"):
506506
instance.run_id = ts.get("run_id")
507507

508-
if self.options.device_testing:
509-
tfilter = 'runnable'
510-
else:
511-
tfilter = 'buildable'
512-
instance.run = instance.check_runnable(
513-
self.options.enable_slow,
514-
tfilter,
515-
self.options.fixture
516-
)
517-
518508
instance.metrics['handler_time'] = ts.get('execution_time', 0)
519509
instance.metrics['ram_size'] = ts.get("ram_size", 0)
520510
instance.metrics['rom_size'] = ts.get("rom_size",0)
@@ -526,7 +516,7 @@ def load_from_file(self, file, filter_platform=[]):
526516
instance.reason = None
527517
# test marked as passed (built only) but can run when
528518
# --test-only is used. Reset status to capture new results.
529-
elif status == 'passed' and instance.run and self.options.test_only:
519+
elif status == 'passed' and self.options.test_only:
530520
instance.status = None
531521
instance.reason = None
532522
else:
@@ -566,6 +556,7 @@ def apply_filters(self, **kwargs):
566556
force_toolchain = self.options.force_toolchain
567557
force_platform = self.options.force_platform
568558
emu_filter = self.options.emulation_only
559+
instances_were_inherited = len(self.instances)!=0
569560

570561
logger.debug("platform filter: " + str(platform_filter))
571562
logger.debug(" arch_filter: " + str(arch_filter))
@@ -631,21 +622,14 @@ def apply_filters(self, **kwargs):
631622
instance_list = []
632623
for plat in platform_scope:
633624
instance = TestInstance(ts, plat, self.env.outdir)
634-
if runnable:
635-
tfilter = 'runnable'
636-
else:
637-
tfilter = 'buildable'
638-
639-
instance.run = instance.check_runnable(
640-
self.options.enable_slow,
641-
tfilter,
642-
self.options.fixture
643-
)
644-
if runnable and self.hwm.duts:
645-
for h in self.hwm.duts:
646-
if h.platform == plat.name:
647-
if ts.harness_config.get('fixture') in h.fixtures:
648-
instance.run = True
625+
# If instances were inherited (existing testplan used) don't add new nor evaluate filtered instances
626+
if instances_were_inherited:
627+
if instance.name not in self.instances:
628+
continue
629+
elif self.instances[instance.name].status == "filtered":
630+
continue
631+
else:
632+
instance.run_id = self.instances[instance.name].run_id
649633

650634
if not force_platform and plat.name in exclude_platform:
651635
instance.add_filter("Platform is excluded on command line.", Filters.CMD_LINE)
@@ -658,8 +642,11 @@ def apply_filters(self, **kwargs):
658642
if not set(ts.modules).issubset(set(self.modules)):
659643
instance.add_filter(f"one or more required modules not available: {','.join(ts.modules)}", Filters.TESTSUITE)
660644

645+
# instance.run tells twister if a test can be build and executed (if true) or only build (if false)
646+
instance.run, verdict = instance.check_runnable(self.options.enable_slow, self.options.fixture, self.hwm.duts)
647+
661648
if runnable and not instance.run:
662-
instance.add_filter("Not runnable on device", Filters.PLATFORM)
649+
instance.add_filter(f"Not runnable on device: {verdict}", Filters.PLATFORM)
663650

664651
if self.options.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
665652
instance.add_filter("Not part of integration platforms", Filters.TESTSUITE)

scripts/tests/twister/conftest.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from twisterlib.testplan import TestPlan
1616
from twisterlib.testinstance import TestInstance
1717
from twisterlib.environment import TwisterEnv, parse_arguments
18+
from twisterlib.hardwaremap import HardwareMap
1819

1920
def new_get_toolchain(*args, **kwargs):
2021
return 'zephyr'
@@ -40,6 +41,7 @@ def tesenv_obj(test_data, testsuites_dir, tmpdir_factory):
4041
env.board_roots = [test_data +"board_config/1_level/2_level/"]
4142
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
4243
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
44+
env.hwm = HardwareMap(env)
4345
return env
4446

4547

scripts/tests/twister/test_testinstance.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,12 +45,12 @@ def test_check_build_or_run(class_testplan, monkeypatch, all_testsuites_dict, pl
4545
testsuite.slow = slow
4646

4747
testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
48-
run = testinstance.check_runnable(slow, device_testing, fixture)
48+
run, _ = testinstance.check_runnable(slow, device_testing, fixture)
4949
_, r = expected
5050
assert run == r
5151

5252
monkeypatch.setattr("os.name", "nt")
53-
run = testinstance.check_runnable()
53+
run, _ = testinstance.check_runnable()
5454
assert not run
5555

5656
TESTDATA_2 = [

0 commit comments

Comments
 (0)