Skip to content

Patch/make skip reason clearer #3

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 62 additions & 26 deletions scripts/pylib/twister/twisterlib/testinstance.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,19 +119,39 @@ def get_case_or_create(self, name):
self.testcases.append(tc)
return tc

@staticmethod
def testsuite_runnable(testsuite, fixtures):
can_run = False
def is_fixture_available(self, fixtures_cli=None, duts_from_hwmap=None):
fixture_status = False # if true: test can be run on a given configuration
verdict = "NA"
fixture_required = self.testsuite.harness_config.get('fixture')

if fixture_required:
verdict = f"Fixture {fixture_required} is not available."
# There are 2 places were available fixtures can be defined: from CLI and in hw map
# Fixtures from CLI applies to all devices
if fixture_required in fixtures_cli:
fixture_status = True
verdict = f"Fixture {fixture_required} is available."
# If fixture was not given in CLI but a hw map was provided we check if it is available in the hw map
elif duts_from_hwmap:
for h in duts_from_hwmap:
if h.platform == self.platform.name:
if fixture_required in h.fixtures:
fixture_status = True
verdict = f"Fixture {fixture_required} is available."
else:
fixture_status = True
verdict = "No fixture required"
return fixture_status, verdict

def is_harness_supported(self):
harness_supported = False
verdict = f"Harness {self.testsuite.harness} is not supported."
# console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest', 'test']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testsuite.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
if self.testsuite.harness in [ 'console', 'ztest', 'pytest', 'test']:
harness_supported = True
verdict = f"Harness {self.testsuite.harness} is supported."

return can_run
return harness_supported, verdict

def setup_handler(self, env):
if self.handler:
Expand Down Expand Up @@ -182,45 +202,61 @@ def setup_handler(self, env):
self.handler = handler

# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):

def check_runnable(self, enable_slow=False, fixtures_cli=None, duts_from_hwmap=None):
verdict = "NA"
# running on simulators is currently not supported on Windows
if os.name == 'nt' and self.platform.simulation != 'na':
return False
verdict = "Simulators not supported on Windows"
return False, verdict

# we asked for build-only on the command line
if self.testsuite.build_only:
return False
verdict = "test is marked as build-only."
return False, verdict

# Do not run slow tests:
skip_slow = self.testsuite.slow and not enable_slow
if skip_slow:
return False
verdict = "test is marked as slow."
return False, verdict

harness_supported, verdict = self.is_harness_supported()
if not harness_supported:
return False, verdict

fixture_runnable, verdict = self.is_fixture_available(fixtures_cli, duts_from_hwmap)
if not fixture_runnable:
return False, verdict

target_ready = bool(self.testsuite.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
self.platform.type in ["native", "mcu"] or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"])

if not target_ready:
verdict = "target type not supported."
return False, verdict

if self.platform.simulation == "nsim":
if not shutil.which("nsimdrv"):
target_ready = False
verdict = "nsimdrv not found."
return False, verdict

if self.platform.simulation == "mdb-nsim":
if not shutil.which("mdb"):
target_ready = False
verdict = "mdb not found."
return False, verdict

if self.platform.simulation == "renode":
if not shutil.which("renode"):
target_ready = False
verdict = "renode not found."
return False, verdict

if self.platform.simulation == "tsim":
if not shutil.which("tsim-leon3"):
target_ready = False

testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
verdict = "tsim-leon3 not found."
return False, verdict

return testsuite_runnable and target_ready
return target_ready, verdict

def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
Expand Down
47 changes: 17 additions & 30 deletions scripts/pylib/twister/twisterlib/testplan.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,8 @@ def load(self):

self.load_from_file(last_run, filter_platform=connected_list)
self.selected_platforms = set(p.platform.name for p in self.instances.values())
else:
self.apply_filters()

self.apply_filters()

if self.options.subset:
s = self.options.subset
Expand Down Expand Up @@ -505,16 +505,6 @@ def load_from_file(self, file, filter_platform=[]):
if ts.get("run_id"):
instance.run_id = ts.get("run_id")

if self.options.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.options.enable_slow,
tfilter,
self.options.fixture
)

instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['ram_size'] = ts.get("ram_size", 0)
instance.metrics['rom_size'] = ts.get("rom_size",0)
Expand All @@ -526,7 +516,7 @@ def load_from_file(self, file, filter_platform=[]):
instance.reason = None
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == 'passed' and instance.run and self.options.test_only:
elif status == 'passed' and self.options.test_only:
instance.status = None
instance.reason = None
else:
Expand Down Expand Up @@ -566,6 +556,7 @@ def apply_filters(self, **kwargs):
force_toolchain = self.options.force_toolchain
force_platform = self.options.force_platform
emu_filter = self.options.emulation_only
instances_were_inherited = len(self.instances)!=0

logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
Expand Down Expand Up @@ -631,21 +622,14 @@ def apply_filters(self, **kwargs):
instance_list = []
for plat in platform_scope:
instance = TestInstance(ts, plat, self.env.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'

instance.run = instance.check_runnable(
self.options.enable_slow,
tfilter,
self.options.fixture
)
if runnable and self.hwm.duts:
for h in self.hwm.duts:
if h.platform == plat.name:
if ts.harness_config.get('fixture') in h.fixtures:
instance.run = True
# If instances were inherited (existing testplan used) don't add new nor evaluate filtered instances
if instances_were_inherited:
if instance.name not in self.instances:
continue
elif self.instances[instance.name].status == "filtered":
continue
else:
instance.run_id = self.instances[instance.name].run_id

if not force_platform and plat.name in exclude_platform:
instance.add_filter("Platform is excluded on command line.", Filters.CMD_LINE)
Expand All @@ -658,8 +642,11 @@ def apply_filters(self, **kwargs):
if not set(ts.modules).issubset(set(self.modules)):
instance.add_filter(f"one or more required modules not available: {','.join(ts.modules)}", Filters.TESTSUITE)

# instance.run tells twister if a test can be build and executed (if true) or only build (if false)
instance.run, verdict = instance.check_runnable(self.options.enable_slow, self.options.fixture, self.hwm.duts)

if runnable and not instance.run:
instance.add_filter("Not runnable on device", Filters.PLATFORM)
instance.add_filter(f"Not runnable on device: {verdict}", Filters.PLATFORM)

if self.options.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
instance.add_filter("Not part of integration platforms", Filters.TESTSUITE)
Expand Down Expand Up @@ -720,7 +707,7 @@ def apply_filters(self, **kwargs):
if ts.depends_on:
dep_intersection = ts.depends_on.intersection(set(plat.supported))
if dep_intersection != set(ts.depends_on):
instance.add_filter("No hardware support", Filters.PLATFORM)
instance.add_filter(f"No hardware support, lacking {set(ts.depends_on) - dep_intersection}.", Filters.PLATFORM)

if plat.flash < ts.min_flash:
instance.add_filter("Not enough FLASH", Filters.PLATFORM)
Expand Down
2 changes: 2 additions & 0 deletions scripts/tests/twister/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from twisterlib.testplan import TestPlan
from twisterlib.testinstance import TestInstance
from twisterlib.environment import TwisterEnv, parse_arguments
from twisterlib.hardwaremap import HardwareMap

def new_get_toolchain(*args, **kwargs):
return 'zephyr'
Expand All @@ -40,6 +41,7 @@ def tesenv_obj(test_data, testsuites_dir, tmpdir_factory):
env.board_roots = [test_data +"board_config/1_level/2_level/"]
env.test_roots = [testsuites_dir + '/tests', testsuites_dir + '/samples']
env.outdir = tmpdir_factory.mktemp("sanity_out_demo")
env.hwm = HardwareMap(env)
return env


Expand Down
4 changes: 2 additions & 2 deletions scripts/tests/twister/test_testinstance.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ def test_check_build_or_run(class_testplan, monkeypatch, all_testsuites_dict, pl
testsuite.slow = slow

testinstance = TestInstance(testsuite, platform, class_testplan.env.outdir)
run = testinstance.check_runnable(slow, device_testing, fixture)
run, _ = testinstance.check_runnable(slow, device_testing, fixture)
_, r = expected
assert run == r

monkeypatch.setattr("os.name", "nt")
run = testinstance.check_runnable()
run, _ = testinstance.check_runnable()
assert not run

TESTDATA_2 = [
Expand Down