diff --git a/cts/Makefile.am b/cts/Makefile.am
index 3e128dfc81b..3ad7d204fe6 100644
--- a/cts/Makefile.am
+++ b/cts/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2001-2024 the Pacemaker project contributors
+# Copyright 2001-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -67,7 +67,10 @@ cts-support-uninstall:
# Everything listed here is a python script, typically generated from a .in file
# (though that is not a requirement). We want to run pylint on all of these
# things after they've been built.
+# FIXME: When cts-schemas is converted to python, this can be removed because
+# it will duplicate test_SCRIPTS above.
python_files = cts-attrd \
+ cts-cli \
cts-exec \
cts-fencing \
cts-lab \
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index bf1e0807440..a59b6aa6870 100644
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -54,6 +54,9 @@ other_tests = ["agents"]
# The directory containing this program
test_home = os.path.dirname(os.path.realpath(__file__))
+# Where test data is stored
+cts_cli_data = f"{test_home}/cli"
+
# The name of the shadow CIB
SHADOW_NAME = "cts-cli"
@@ -93,7 +96,6 @@ def apply_substitutions(s, extra=None):
substitutions = {
"cts_cli_data": "%s/cli" % test_home,
"shadow": SHADOW_NAME,
- "test_home": test_home,
}
if extra is not None:
@@ -1485,7 +1487,7 @@ class CrmAttributeRegressionTest(RegressionTest):
ShadowTestGroup(query_set_tests),
TestGroup(promotable_tests + ocf_rsc_instance_tests,
env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"},
- cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")),
+ cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
]
@@ -1511,7 +1513,7 @@ class CrmStandbyRegressionTest(RegressionTest):
return [
ShadowTestGroup(basic_tests,
- setup="""cibadmin -C -o nodes --xml-text ''"""),
+ setup="""cibadmin -C -o nodes --xml-text ''"""),
]
@@ -1811,14 +1813,14 @@ class CrmResourceRegressionTest(RegressionTest):
return options_tests + [
ShadowTestGroup(basic_tests, setup=basic_tests_setup),
- TestGroup(constraint_tests, env={"CIB_file": "{cts_cli_data}/constraints.xml"}),
- TestGroup(colocation_tests, cib_gen=partial(copy_existing_cib, "{cts_cli_data}/constraints.xml")),
- TestGroup(digest_tests, env={"CIB_file": "{cts_cli_data}/crm_resource_digests.xml"}),
- TestGroup(basic2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}),
+ TestGroup(constraint_tests, env={"CIB_file": f"{cts_cli_data}/constraints.xml"}),
+ TestGroup(colocation_tests, cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/constraints.xml")),
+ TestGroup(digest_tests, env={"CIB_file": f"{cts_cli_data}/crm_resource_digests.xml"}),
+ TestGroup(basic2_tests, env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
ValidatingTest("Check that CIB_file=\"-\" works - crm_resource",
"crm_resource --digests -r rsc1 -N node1 --output-as=xml",
env={"CIB_file": "-"},
- stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_resource_digests.xml"))),
+ stdin=pathlib.Path(f"{cts_cli_data}/crm_resource_digests.xml")),
]
@@ -1923,10 +1925,10 @@ class CrmadminRegressionTest(RegressionTest):
return [
TestGroup(basic_tests,
- env={"CIB_file": "{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"}),
Test("Check that CIB_file=\"-\" works", "crmadmin -N",
env={"CIB_file": "-"},
- stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"))),
+ stdin=pathlib.Path(f"{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml")),
]
@@ -1962,7 +1964,7 @@ class CrmShadowRegressionTest(RegressionTest):
make_test_group("Create copied shadow instance",
"crm_shadow --create {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force"),
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
# Query shadow instance based on active CIB
make_test_group("Get active shadow instance (copied)",
"crm_shadow --which {fmt}",
@@ -1998,31 +2000,31 @@ class CrmShadowRegressionTest(RegressionTest):
TestGroup([
# Commit the modified shadow CIB to a temp active CIB file
Test("Commit shadow instance",
- "crm_shadow --commit {shadow}",
+ f"crm_shadow --commit {SHADOW_NAME}",
expected_rc=ExitStatus.USAGE),
Test("Commit shadow instance (force)",
- "crm_shadow --commit {shadow} --force"),
+ f"crm_shadow --commit {SHADOW_NAME} --force"),
Test("Get active shadow instance's diff (after commit)",
"crm_shadow --diff",
expected_rc=ExitStatus.ERROR),
Test("Commit shadow instance (force) (all)",
- "crm_shadow --commit {shadow} --force --all"),
+ f"crm_shadow --commit {SHADOW_NAME} --force --all"),
Test("Get active shadow instance's diff (after commit all)",
"crm_shadow --diff",
expected_rc=ExitStatus.ERROR),
- ], cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")),
+ ], cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
TestGroup([
# Repeat sequence with XML output
ValidatingTest("Commit shadow instance",
- "crm_shadow --commit {shadow} --output-as=xml",
+ f"crm_shadow --commit {SHADOW_NAME} --output-as=xml",
expected_rc=ExitStatus.USAGE),
ValidatingTest("Commit shadow instance (force)",
- "crm_shadow --commit {shadow} --force --output-as=xml"),
+ f"crm_shadow --commit {SHADOW_NAME} --force --output-as=xml"),
ValidatingTest("Get active shadow instance's diff (after commit)",
"crm_shadow --diff --output-as=xml",
expected_rc=ExitStatus.ERROR),
ValidatingTest("Commit shadow instance (force) (all)",
- "crm_shadow --commit {shadow} --force --all --output-as=xml"),
+ f"crm_shadow --commit {SHADOW_NAME} --force --all --output-as=xml"),
ValidatingTest("Get active shadow instance's diff (after commit all)",
"crm_shadow --diff --output-as=xml",
expected_rc=ExitStatus.ERROR),
@@ -2066,67 +2068,67 @@ class CrmShadowRegressionTest(RegressionTest):
make_test_group("Commit shadow instance (nonexistent CIB file)",
"crm_shadow --commit {shadow} {fmt}",
[Test, ValidatingTest],
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"},
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"},
expected_rc=ExitStatus.USAGE),
make_test_group("Commit shadow instance (nonexistent CIB file) (force)",
"crm_shadow --commit {shadow} --force {fmt}",
[Test, ValidatingTest],
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"},
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"},
expected_rc=ExitStatus.NOSUCH),
make_test_group("Get active shadow instance's diff (nonexistent CIB file)",
"crm_shadow --diff {fmt}",
[Test, ValidatingTest],
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"},
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"},
expected_rc=ExitStatus.NOSUCH),
- ], cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")),
+ ], cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
]
delete_1_tests = [
# Delete an active shadow instance
- Test("Delete shadow instance", "crm_shadow --delete {shadow}",
+ Test("Delete shadow instance", f"crm_shadow --delete {SHADOW_NAME}",
expected_rc=ExitStatus.USAGE),
- Test("Delete shadow instance (force)", "crm_shadow --delete {shadow} --force"),
+ Test("Delete shadow instance (force)", f"crm_shadow --delete {SHADOW_NAME} --force"),
ShadowTestGroup([
ValidatingTest("Delete shadow instance",
- "crm_shadow --delete {shadow} --output-as=xml",
+ f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
expected_rc=ExitStatus.USAGE),
ValidatingTest("Delete shadow instance (force)",
- "crm_shadow --delete {shadow} --force --output-as=xml"),
+ f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
])
]
delete_2_tests = [
# Delete an inactive shadow instance with no active instance
Test("Delete shadow instance (no active instance)",
- "crm_shadow --delete {shadow}",
+ f"crm_shadow --delete {SHADOW_NAME}",
expected_rc=ExitStatus.USAGE),
Test("Delete shadow instance (no active instance) (force)",
- "crm_shadow --delete {shadow} --force"),
+ f"crm_shadow --delete {SHADOW_NAME} --force"),
]
delete_3_tests = [
ValidatingTest("Delete shadow instance (no active instance)",
- "crm_shadow --delete {shadow} --output-as=xml",
+ f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
expected_rc=ExitStatus.USAGE),
ValidatingTest("Delete shadow instance (no active instance) (force)",
- "crm_shadow --delete {shadow} --force --output-as=xml"),
+ f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
]
delete_4_tests = [
# Delete an inactive shadow instance with an active instance
Test("Delete shadow instance (mismatch)",
- "crm_shadow --delete {shadow}",
+ f"crm_shadow --delete {SHADOW_NAME}",
expected_rc=ExitStatus.USAGE),
Test("Delete shadow instance (mismatch) (force)",
- "crm_shadow --delete {shadow} --force"),
+ f"crm_shadow --delete {SHADOW_NAME} --force"),
]
delete_5_tests = [
ValidatingTest("Delete shadow instance (mismatch)",
- "crm_shadow --delete {shadow} --output-as=xml",
+ f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
expected_rc=ExitStatus.USAGE),
ValidatingTest("Delete shadow instance (mismatch) (force)",
- "crm_shadow --delete {shadow} --force --output-as=xml"),
+ f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
# Delete an active shadow instance whose shadow file is missing
Test("Delete shadow instance (nonexistent shadow file)",
"crm_shadow --delete nonexistent_shadow",
@@ -2143,18 +2145,18 @@ class CrmShadowRegressionTest(RegressionTest):
delete_6_tests = [
# Delete an active shadow instance when the CIB file is missing
Test("Delete shadow instance (nonexistent CIB file)",
- "crm_shadow --delete {shadow}",
+ f"crm_shadow --delete {SHADOW_NAME}",
expected_rc=ExitStatus.USAGE),
Test("Delete shadow instance (nonexistent CIB file) (force)",
- "crm_shadow --delete {shadow} --force"),
+ f"crm_shadow --delete {SHADOW_NAME} --force"),
]
delete_7_tests = [
ValidatingTest("Delete shadow instance (nonexistent CIB file)",
- "crm_shadow --delete {shadow} --output-as=xml",
+ f"crm_shadow --delete {SHADOW_NAME} --output-as=xml",
expected_rc=ExitStatus.USAGE),
ValidatingTest("Delete shadow instance (nonexistent CIB file) (force)",
- "crm_shadow --delete {shadow} --force --output-as=xml"),
+ f"crm_shadow --delete {SHADOW_NAME} --force --output-as=xml"),
]
create_1_tests = [
@@ -2162,13 +2164,13 @@ class CrmShadowRegressionTest(RegressionTest):
make_test_group("Create copied shadow instance (no active instance)",
"crm_shadow --create {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force",
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force",
env={"CIB_shadow": None}),
# Create new shadow instance based on active CIB with other instance active
make_test_group("Create copied shadow instance (mismatch)",
"crm_shadow --create {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force",
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force",
env={"CIB_shadow": "nonexistent_shadow"}),
# Create new shadow instance based on CIB (shadow file already exists)
make_test_group("Create copied shadow instance (file already exists)",
@@ -2183,8 +2185,8 @@ class CrmShadowRegressionTest(RegressionTest):
"crm_shadow --create {shadow} --batch --force {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.NOSUCH,
- setup="crm_shadow --delete {shadow} --force",
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}),
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force",
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
]
create_2_tests = [
@@ -2192,25 +2194,25 @@ class CrmShadowRegressionTest(RegressionTest):
make_test_group("Create empty shadow instance",
"crm_shadow --create-empty {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force"),
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
# Create empty shadow instance with no active instance
make_test_group("Create empty shadow instance (no active instance)",
"crm_shadow --create-empty {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force",
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force",
env={"CIB_shadow": None}),
# Create empty shadow instance with other instance active
make_test_group("Create empty shadow instance (mismatch)",
"crm_shadow --create-empty {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force",
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force",
env={"CIB_shadow": "nonexistent_shadow"}),
# Create empty shadow instance when the CIB file is missing
make_test_group("Create empty shadow instance (nonexistent CIB file)",
"crm_shadow --create-empty {shadow} --batch {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force",
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}),
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force",
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
# Create empty shadow instance (shadow file already exists)
make_test_group("Create empty shadow instance (file already exists)",
"crm_shadow --create-empty {shadow} --batch {fmt}",
@@ -2234,10 +2236,10 @@ class CrmShadowRegressionTest(RegressionTest):
reset_1_tests = [
Test("Resetting active shadow instance to active CIB requires force",
- "crm_shadow --reset {shadow} --batch",
+ f"crm_shadow --reset {SHADOW_NAME} --batch",
expected_rc=ExitStatus.USAGE),
Test("Reset active shadow instance to active CIB",
- "crm_shadow --reset {shadow} --batch --force"),
+ f"crm_shadow --reset {SHADOW_NAME} --batch --force"),
Test("Active shadow instance no different from active CIB after reset",
"crm_shadow --diff"),
Test("Active shadow instance differs from active CIB after change",
@@ -2246,7 +2248,7 @@ class CrmShadowRegressionTest(RegressionTest):
expected_rc=ExitStatus.ERROR),
ValidatingTest("Reset active shadow instance to active CIB",
- "crm_shadow --reset {shadow} --batch --force --output-as=xml"),
+ f"crm_shadow --reset {SHADOW_NAME} --batch --force --output-as=xml"),
ValidatingTest("Active shadow instance no different from active CIB after reset",
"crm_shadow --diff --output-as=xml"),
ValidatingTest("Active shadow instance differs from active CIB after change",
@@ -2257,7 +2259,7 @@ class CrmShadowRegressionTest(RegressionTest):
make_test_group("Reset shadow instance to active CIB with nonexistent shadow file",
"crm_shadow --reset {shadow} --batch --force {fmt}",
[Test, ValidatingTest],
- setup="crm_shadow --delete {shadow} --force"),
+ setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
Test("Active shadow instance no different from active CIB after force-reset",
"crm_shadow --diff"),
]
@@ -2295,47 +2297,47 @@ class CrmShadowRegressionTest(RegressionTest):
"crm_shadow --switch {shadow} --batch --force {fmt}",
[Test, ValidatingTest],
expected_rc=ExitStatus.NOSUCH),
- ], setup="crm_shadow --delete {shadow} --force"),
+ ], setup=f"crm_shadow --delete {SHADOW_NAME} --force"),
]
return no_instance_tests + [
ShadowTestGroup(new_instance_tests + more_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml"},
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"},
create=False),
ShadowTestGroup(delete_1_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
ShadowTestGroup(delete_2_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml",
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
"CIB_shadow": None}),
ShadowTestGroup(delete_3_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml",
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
"CIB_shadow": None}),
ShadowTestGroup(delete_4_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml",
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
"CIB_shadow": "nonexistent_shadow"}),
ShadowTestGroup(delete_5_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml",
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
"CIB_shadow": "nonexistent_shadow"}),
ShadowTestGroup(delete_6_tests,
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
ShadowTestGroup(delete_7_tests,
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
ShadowTestGroup(create_1_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml"},
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"},
create=False),
ShadowTestGroup(create_2_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml"},
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"},
create=False),
ShadowTestGroup(reset_1_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
ShadowTestGroup(reset_2_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml",
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
"CIB_shadow": None}),
ShadowTestGroup(reset_3_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml",
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml",
"CIB_shadow": "nonexistent_shadow"}),
ShadowTestGroup(reset_4_tests,
- env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/nonexistent_cib.xml"}),
ShadowTestGroup(switch_tests,
env={"CIB_shadow": "nonexistent_shadow"},
create_empty=True),
@@ -2367,7 +2369,7 @@ class CrmVerifyRegressionTest(RegressionTest):
[Test, ValidatingTest],
expected_rc=ExitStatus.CONFIG),
ValidatingTest("Verify another file-specified invalid configuration",
- "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_no_stonith.xml --output-as=xml",
+ f"crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_no_stonith.xml --output-as=xml",
expected_rc=ExitStatus.CONFIG),
]
@@ -2376,15 +2378,15 @@ class CrmVerifyRegressionTest(RegressionTest):
valid_tests = [
ValidatingTest("Verify a file-specified valid configuration",
- "crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml"),
+ f"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml"),
ValidatingTest("Verify a piped-in valid configuration",
"crm_verify -p --output-as=xml",
- stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))),
+ stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")),
ValidatingTest("Verbosely verify a file-specified valid configuration",
- "crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml --verbose"),
+ f"crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml --verbose"),
ValidatingTest("Verbosely verify a piped-in valid configuration",
"crm_verify -p --output-as=xml --verbose",
- stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))),
+ stdin=pathlib.Path(f"{cts_cli_data}/crm_mon.xml")),
ValidatingTest("Verify a string-supplied valid configuration",
"crm_verify -X '%s' --output-as=xml" % cib_contents),
ValidatingTest("Verbosely verify a string-supplied valid configuration",
@@ -2573,20 +2575,20 @@ class CrmMonRegressionTest(RegressionTest):
return [
TestGroup(basic_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon.xml"}),
Test("Check that CIB_file=\"-\" works", "crm_mon -1",
env={"CIB_file": "-"},
- stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))),
+ stdin=pathlib.Path(apply_substitutions(f"{cts_cli_data}/crm_mon.xml"))),
TestGroup(partial_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon-partial.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon-partial.xml"}),
TestGroup(unmanaged_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon-unmanaged.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon-unmanaged.xml"}),
TestGroup(maint1_tests,
- cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")),
+ cib_gen=partial(copy_existing_cib, f"{cts_cli_data}/crm_mon.xml")),
TestGroup(maint2_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon-rsc-maint.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon-rsc-maint.xml"}),
TestGroup(t180_tests,
- env={"CIB_file": "{cts_cli_data}/crm_mon-T180.xml"}),
+ env={"CIB_file": f"{cts_cli_data}/crm_mon-T180.xml"}),
]
@@ -3118,7 +3120,7 @@ class FeatureSetRegressionTest(RegressionTest):
basic_tests = [
# Import the test CIB
Test("Import the test CIB",
- "cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml",
+ f"cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml",
update_cib=True),
Test("Complete text output, no mixed status",
"crm_mon -1 --show-detail"),
diff --git a/cts/cts-regression.in b/cts/cts-regression.in
index 69fb7a9a02a..3f1119d3edd 100644
--- a/cts/cts-regression.in
+++ b/cts/cts-regression.in
@@ -4,6 +4,12 @@
Usage: cts-regression [-h] [-V] [-v] [COMPONENT ...]
"""
+# pylint doesn't like the module name "cts-regression" which is an invalid complaint for this file
+# but probably something we want to continue warning about elsewhere
+# pylint: disable=invalid-name
+# pacemaker imports need to come after we modify sys.path, which pylint will complain about.
+# pylint: disable=wrong-import-position
+
__copyright__ = 'Copyright 2012-2025 the Pacemaker project contributors'
__license__ = 'GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY'
@@ -19,12 +25,14 @@ import textwrap
if os.path.exists("@abs_top_srcdir@/python"):
sys.path.insert(0, "@abs_top_srcdir@/python")
+# pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant
if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@":
sys.path.insert(0, "@abs_top_builddir@/python")
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
+
class Component():
"""A class for running regression tests on a component.
@@ -50,7 +58,7 @@ class Component():
def __init__(self, name, description, test_home, requires_root=False,
supports_valgrind=False):
- """Constructor for the :class:`Component` class.
+ """Create a new :class:`Component` instance.
:param name: The name of the component.
:type name: str
@@ -127,6 +135,7 @@ class ComponentsArgAction(argparse.Action):
"""
def __call__(self, parser, namespace, values, option_string=None):
+ """Process `components` arguments."""
all_components = ['attrd', 'cli', 'exec', 'fencing', 'scheduler']
default_components = ['cli', 'scheduler']
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index 450f0bd4d77..8953b281bf6 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -1,6 +1,11 @@
#!@PYTHON@
-""" Regression tests for Pacemaker's scheduler
-"""
+"""Regression tests for Pacemaker's scheduler."""
+
+# pylint doesn't like the module name "cts-scheduler" which is an invalid complaint for this file
+# but probably something we want to continue warning about elsewhere
+# pylint: disable=invalid-name
+# pacemaker imports need to come after we modify sys.path, which pylint will complain about.
+# pylint: disable=wrong-import-position
__copyright__ = "Copyright 2004-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
@@ -23,16 +28,26 @@ import tempfile
if os.path.exists("@abs_top_srcdir@/python"):
sys.path.insert(0, "@abs_top_srcdir@/python")
+# pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant
if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@":
sys.path.insert(0, "@abs_top_builddir@/python")
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
-DESC = """Regression tests for Pacemaker's scheduler"""
class SchedulerTest:
+ """A single scheduler test."""
+
def __init__(self, name, desc, args=None):
+ """
+ Create a new SchedulerTest instance.
+
+ Arguments:
+ name -- A unique name for this test.
+ desc -- A meaningful description for the test.
+ args -- Additional arguments to pass when running this test
+ """
self.name = name
self.desc = desc
@@ -41,10 +56,21 @@ class SchedulerTest:
else:
self.args = args
+
class SchedulerTestGroup:
+ """Collection of scheduler regression tests."""
+
def __init__(self, tests):
+ """
+ Create a new SchedulerTestGroup instance.
+
+ Arguments:
+ tests -- A list of SchedulerTest instances to be executed as part of
+ this group.
+ """
self.tests = tests
+
# Each entry in TESTS is a group of tests, where each test consists of a
# test base name, test description, and additional test arguments.
# Test groups will be separated by newlines in output.
@@ -192,11 +218,9 @@ TESTS = [
SchedulerTest("rule-dbl-as-number-no-match",
"Floating-point rule values set to number comparison: no match"),
SchedulerTest("rule-dbl-parse-fail-default-str-match",
- "Floating-point rule values fail to parse, default to string "
- "comparison: match"),
+ "Floating-point rule values fail to parse, default to string comparison: match"),
SchedulerTest("rule-dbl-parse-fail-default-str-no-match",
- "Floating-point rule values fail to parse, default to string "
- "comparison: no match"),
+ "Floating-point rule values fail to parse, default to string comparison: no match"),
SchedulerTest("rule-int-as-auto-integer-match",
"Integer rule values default to integer comparison: match"),
SchedulerTest("rule-int-as-auto-integer-no-match",
@@ -210,11 +234,9 @@ TESTS = [
SchedulerTest("rule-int-as-number-no-match",
"Integer rule values set to number comparison: no match"),
SchedulerTest("rule-int-parse-fail-default-str-match",
- "Integer rule values fail to parse, default to string "
- "comparison: match"),
+ "Integer rule values fail to parse, default to string comparison: match"),
SchedulerTest("rule-int-parse-fail-default-str-no-match",
- "Integer rule values fail to parse, default to string "
- "comparison: no match"),
+ "Integer rule values fail to parse, default to string comparison: no match"),
]),
SchedulerTestGroup([
SchedulerTest("order1", "Order start 1"),
@@ -291,17 +313,13 @@ TESTS = [
SchedulerTest("coloc-cloned-group-promoted-dependent2",
"Cloned group promoted role with primitive (optional)"),
SchedulerTest("coloc-optional-promoted-dependent-moves-1",
- "Colocation score less than promotion score "
- + "difference: move"),
+ "Colocation score less than promotion score difference: move"),
SchedulerTest("coloc-optional-promoted-dependent-moves-2",
- "Colocation score greater than promotion score "
- + "difference: move"),
+ "Colocation score greater than promotion score difference: move"),
SchedulerTest("coloc-optional-promoted-dependent-stays-1",
- "Colocation score greater than promotion score "
- + "difference: stay"),
+ "Colocation score greater than promotion score difference: stay"),
SchedulerTest("coloc-optional-promoted-dependent-stays-2",
- "Colocation score less than promotion score "
- + "difference: stay"),
+ "Colocation score less than promotion score difference: stay"),
]),
SchedulerTestGroup([
SchedulerTest("rsc-sets-seq-true", "Resource Sets - sequential=false"),
@@ -476,32 +494,23 @@ TESTS = [
SchedulerTest("clone-recover-no-shuffle-3",
"Don't shuffle instances when starting a new bundle instance"),
SchedulerTest("clone-recover-no-shuffle-4",
- "Don't shuffle instances when starting a new primitive instance with "
- "location preference "),
+ "Don't shuffle instances when starting a new primitive instance with location preference"),
SchedulerTest("clone-recover-no-shuffle-5",
- "Don't shuffle instances when starting a new group instance with "
- "location preference"),
+ "Don't shuffle instances when starting a new group instance with location preference"),
SchedulerTest("clone-recover-no-shuffle-6",
- "Don't shuffle instances when starting a new bundle instance with "
- "location preference"),
+ "Don't shuffle instances when starting a new bundle instance with location preference"),
SchedulerTest("clone-recover-no-shuffle-7",
- "Don't shuffle instances when starting a new primitive instance that "
- "will be promoted"),
+ "Don't shuffle instances when starting a new primitive instance that will be promoted"),
SchedulerTest("clone-recover-no-shuffle-8",
- "Don't shuffle instances when starting a new group instance that "
- "will be promoted "),
+ "Don't shuffle instances when starting a new group instance that will be promoted"),
SchedulerTest("clone-recover-no-shuffle-9",
- "Don't shuffle instances when starting a new bundle instance that "
- "will be promoted "),
+ "Don't shuffle instances when starting a new bundle instance that will be promoted"),
SchedulerTest("clone-recover-no-shuffle-10",
- "Don't shuffle instances when starting a new primitive instance that "
- "won't be promoted"),
+ "Don't shuffle instances when starting a new primitive instance that won't be promoted"),
SchedulerTest("clone-recover-no-shuffle-11",
- "Don't shuffle instances when starting a new group instance that "
- "won't be promoted "),
+ "Don't shuffle instances when starting a new group instance that won't be promoted"),
SchedulerTest("clone-recover-no-shuffle-12",
- "Don't shuffle instances when starting a new bundle instance that "
- "won't be promoted "),
+ "Don't shuffle instances when starting a new bundle instance that won't be promoted"),
SchedulerTest("clone-max-zero", "Orphan processing with clone-max=0"),
SchedulerTest("clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"),
@@ -1161,7 +1170,7 @@ TESTS = [
SchedulerTest("rsc-defaults-2", "Test rsc_defaults conditional expressions without type"),
]),
SchedulerTestGroup([
- SchedulerTest("stop-all-resources", "Test stop-all-resources=true "),
+ SchedulerTest("stop-all-resources", "Test stop-all-resources=true"),
]),
SchedulerTestGroup([
SchedulerTest("ocf_degraded-remap-ocf_ok", "Test degraded remapped to OK"),
@@ -1177,8 +1186,7 @@ TESTS_64BIT = [
def is_executable(path):
- """ Check whether a file at a given path is executable. """
-
+ """Check whether a file at a given path is executable."""
try:
return os.stat(path)[stat.ST_MODE] & stat.S_IXUSR
except OSError:
@@ -1186,24 +1194,22 @@ def is_executable(path):
def diff(file1, file2, **kwargs):
- """ Call diff on two files """
-
- return subprocess.call([ "diff", "-u", "-N", "--ignore-all-space",
- "--ignore-blank-lines", file1, file2 ], **kwargs)
+ """Call diff on two files."""
+ return subprocess.call(["diff", "-u", "-N", "--ignore-all-space",
+ "--ignore-blank-lines", file1, file2], **kwargs)
def sort_file(filename):
- """ Sort a file alphabetically """
-
- with io.open(filename, "rt") as f:
+ """Sort a file alphabetically."""
+ with io.open(filename, "rt", encoding="utf-8") as f:
lines = sorted(f)
- with io.open(filename, "wt") as f:
+
+ with io.open(filename, "wt", encoding="utf-8") as f:
f.writelines(lines)
def remove_files(filenames):
- """ Remove a list of files """
-
+ """Remove a list of files."""
for filename in filenames:
try:
os.remove(filename)
@@ -1212,66 +1218,59 @@ def remove_files(filenames):
def normalize(filename):
- """ Remove text from a file that isn't important for comparison """
-
+ """Remove text from a file that isn't important for comparison."""
if not hasattr(normalize, "patterns"):
normalize.patterns = [
re.compile(r'crm_feature_set="[^"]*"'),
re.compile(r'batch-limit="[0-9]*"')
]
- if os.path.isfile(filename):
- with io.open(filename, "rt") as f:
- lines = f.readlines()
- with io.open(filename, "wt") as f:
- for line in lines:
- for pattern in normalize.patterns:
- line = pattern.sub("", line)
- f.write(line)
+ if not os.path.isfile(filename):
+ return
+
+ with io.open(filename, "rt", encoding="utf-8") as f:
+ lines = f.readlines()
+
+ with io.open(filename, "wt", encoding="utf-8") as f:
+ for line in lines:
+ for pattern in normalize.patterns:
+ line = pattern.sub("", line)
+
+ f.write(line)
-def cat(filename, dest=sys.stdout):
- """ Copy a file to a destination file descriptor """
- with io.open(filename, "rt") as f:
+def cat(filename, dest=sys.stdout):
+ """Copy a file to a destination file descriptor."""
+ with io.open(filename, "rt", encoding="utf-8") as f:
shutil.copyfileobj(f, dest)
-class CtsScheduler(object):
- """ Regression tests for Pacemaker's scheduler """
+class CtsScheduler:
+ """Regression tests for Pacemaker's scheduler."""
def _parse_args(self, argv):
- """ Parse command-line arguments """
-
- parser = argparse.ArgumentParser(description=DESC)
+ """Parse command-line arguments."""
+ parser = argparse.ArgumentParser(description="Regression tests for Pacemaker's scheduler")
parser.add_argument('-V', '--verbose', action='count',
help='Display any differences from expected output')
-
parser.add_argument('--run', metavar='TEST',
help=('Run only single specified test (any further '
'arguments will be passed to crm_simulate)'))
-
parser.add_argument('--update', action='store_true',
help='Update expected results with actual results')
-
parser.add_argument('-b', '--binary', metavar='PATH',
help='Specify path to crm_simulate')
-
parser.add_argument('-i', '--io-dir', metavar='PATH',
help='Specify path to regression test data directory')
-
parser.add_argument('-o', '--out-dir', metavar='PATH',
help='Specify where intermediate and output files should go')
-
parser.add_argument('-v', '--valgrind', action='store_true',
help='Run all commands under valgrind')
-
parser.add_argument('--valgrind-dhat', action='store_true',
help='Run all commands under valgrind with heap analyzer')
-
parser.add_argument('--valgrind-skip-output', action='store_true',
help='If running under valgrind, do not display output')
-
parser.add_argument('--testcmd-options', metavar='OPTIONS', default='',
help='Additional options for command under test')
@@ -1279,22 +1278,23 @@ class CtsScheduler(object):
self.single_test_args = []
narg = 0
for arg in argv:
- narg = narg + 1
+ narg += 1
if arg == '--run':
- (argv, self.single_test_args) = (argv[:narg+1], argv[narg+1:])
+ (argv, self.single_test_args) = (argv[:narg + 1], argv[narg + 1:])
break
self.args = parser.parse_args(argv[1:])
def _error(self, s):
+ """Print an error message."""
print(f" * ERROR: {s}")
def _failed(self, s):
+ """Print a failure message."""
print(f" * FAILED: {s}")
def _get_valgrind_cmd(self):
- """ Return command arguments needed (or not) to run valgrind """
-
+ """Return command arguments needed (or not) to run valgrind."""
if self.args.valgrind:
os.environ['G_SLICE'] = "always-malloc"
return [
@@ -1323,35 +1323,39 @@ class CtsScheduler(object):
return []
def _get_simulator_cmd(self):
- """ Locate the simulation binary """
-
+ """Locate the simulation binary."""
if self.args.binary is None:
- self.args.binary = BuildOptions._BUILD_DIR + "/tools/crm_simulate"
+ # pylint: disable=protected-access
+ self.args.binary = f"{BuildOptions._BUILD_DIR}/tools/crm_simulate"
+
if not is_executable(self.args.binary):
- self.args.binary = BuildOptions.SBIN_DIR + "/crm_simulate"
+ self.args.binary = f"{BuildOptions.SBIN_DIR}/crm_simulate"
if not is_executable(self.args.binary):
# @TODO it would be more pythonic to raise an exception
- self._error("Test binary " + self.args.binary + " not found")
+ self._error(f"Test binary {self.args.binary} not found")
sys.exit(ExitStatus.NOT_INSTALLED)
- return [ self.args.binary ] + shlex.split(self.args.testcmd_options)
+ return [self.args.binary] + shlex.split(self.args.testcmd_options)
def set_schema_env(self):
- """ Ensure schema directory environment variable is set, if possible """
-
+ """Ensure schema directory environment variable is set, if possible."""
try:
return os.environ['PCMK_schema_directory']
except KeyError:
- for d in [ os.path.join(BuildOptions._BUILD_DIR, "xml"),
- BuildOptions.SCHEMA_DIR ]:
- if os.path.isdir(d):
- os.environ['PCMK_schema_directory'] = d
- return d
+ # pylint: disable=protected-access
+ for d in [os.path.join(BuildOptions._BUILD_DIR, "xml"),
+ BuildOptions.SCHEMA_DIR]:
+ if not os.path.isdir(d):
+ continue
+
+ os.environ['PCMK_schema_directory'] = d
+ return d
+
return None
def __init__(self, argv=sys.argv):
-
+ """Create a new CtsScheduler instance."""
# Ensure all command output is in portable locale for comparison
os.environ['LC_ALL'] = "C"
@@ -1380,8 +1384,9 @@ class CtsScheduler(object):
self.failed_filename = os.path.join(self.failed_dir, "test-output.diff")
else:
self.failed_filename = os.path.join(self.args.out_dir, "test-output.diff")
+
os.environ['CIB_shadow_dir'] = self.args.out_dir
-
+
self.failed_file = None
self.outfile_out_dir = os.path.join(self.args.out_dir, "out")
@@ -1396,7 +1401,7 @@ class CtsScheduler(object):
# User can give test base name or file name of a test input
self.args.run = os.path.splitext(os.path.basename(self.args.run))[0]
except (AttributeError, TypeError):
- pass # --run was not specified
+ pass # --run was not specified
self.set_schema_env()
@@ -1421,6 +1426,7 @@ class CtsScheduler(object):
os.makedirs(self.scores_out_dir, 0o755, True)
os.makedirs(self.summary_out_dir, 0o755, True)
os.makedirs(self.stderr_out_dir, 0o755, True)
+
if self.valgrind_args:
os.makedirs(self.valgrind_out_dir, 0o755, True)
except OSError as ex:
@@ -1432,25 +1438,29 @@ class CtsScheduler(object):
self.summary_out_dir,
self.stderr_out_dir,
])
+
sys.exit(ExitStatus.CANTCREAT)
def _compare_files(self, filename1, filename2):
- """ Add any file differences to failed results """
-
+ """Add any file differences to failed results."""
if diff(filename1, filename2, stdout=subprocess.DEVNULL) != 0:
diff(filename1, filename2, stdout=self.failed_file, stderr=subprocess.DEVNULL)
self.failed_file.write("\n")
return True
+
return False
- def run_one(self, test_name, test_desc, test_args):
- """ Run one scheduler test """
+ def _file_missing(self, path):
+ """Return True if path does not exist or is empty."""
+ return not os.path.isfile(path) or os.path.getsize(path) == 0
- s = test_name + ":"
- print(f" Test {s:41} {test_desc}")
+ def run_one(self, test_name, test_desc, test_args):
+ """Run one scheduler test."""
+ # pylint: disable=too-many-locals
+ print(f" Test {f'{test_name}:':41} {test_desc}")
did_fail = False
- self.num_tests = self.num_tests + 1
+ self.num_tests += 1
# Test inputs
input_filename = os.path.join(self.xml_input_dir, f"{test_name}.xml")
@@ -1471,8 +1481,9 @@ class CtsScheduler(object):
# Common arguments for running test
test_cmd = []
if self.valgrind_args:
- test_cmd = self.valgrind_args + [ f"--log-file={valgrind_output_filename}" ]
- test_cmd = test_cmd + self.simulate_args
+ test_cmd = self.valgrind_args + [f"--log-file={valgrind_output_filename}"]
+
+ test_cmd += self.simulate_args
# @TODO It would be more pythonic to raise exceptions for errors,
# then perhaps it would be nice to make a single-test class
@@ -1480,18 +1491,19 @@ class CtsScheduler(object):
# Ensure necessary test inputs exist
if not os.path.isfile(input_filename):
self._error("No input")
- self.num_failed = self.num_failed + 1
+ self.num_failed += 1
return ExitStatus.NOINPUT
+
if not self.args.update and not os.path.isfile(expected_filename):
self._error("no stored output")
return ExitStatus.NOINPUT
# Run simulation to generate summary output
- test_cmd_full = test_cmd + [ '-x', input_filename, '-S' ] + test_args
- if self.args.run: # Single test mode
+ test_cmd_full = test_cmd + ['-x', input_filename, '-S'] + test_args
+ if self.args.run: # Single test mode
print(" ".join(test_cmd_full))
- with io.open(summary_output_filename, "wt") as f:
+ with io.open(summary_output_filename, "wt", encoding="utf-8") as f:
subprocess.run(test_cmd_full, stdout=f, stderr=subprocess.STDOUT,
env=os.environ, check=False)
@@ -1499,13 +1511,13 @@ class CtsScheduler(object):
cat(summary_output_filename)
# Re-run simulation to generate dot, graph, and scores
- test_cmd_full = test_cmd + [
- '-x', input_filename,
- '-D', dot_output_filename,
- '-G', output_filename,
- '-sSQ' ] + test_args
- with io.open(stderr_output_filename, "wt") as f_stderr, \
- io.open(score_output_filename, "wt") as f_score:
+ test_cmd_full = test_cmd + ['-x', input_filename,
+ '-D', dot_output_filename,
+ '-G', output_filename,
+ '-sSQ'] + test_args
+
+ with io.open(stderr_output_filename, "wt", encoding="utf-8") as f_stderr, \
+ io.open(score_output_filename, "wt", encoding="utf-8") as f_score:
rc = subprocess.call(test_cmd_full, stdout=f_score, stderr=f_stderr, env=os.environ)
# Check for test command failure
@@ -1516,15 +1528,16 @@ class CtsScheduler(object):
# Check for valgrind errors
if self.valgrind_args and not self.args.valgrind_skip_output:
- if os.stat(valgrind_output_filename).st_size > 0:
+ if os.path.getsize(valgrind_output_filename) > 0:
self._failed("Valgrind reported errors")
did_fail = True
cat(valgrind_output_filename)
- remove_files([ valgrind_output_filename ])
+
+ remove_files([valgrind_output_filename])
# Check for core dump
if os.path.isfile("core"):
- self._failed("Core-file detected: core." + test_name)
+ self._failed(f"Core-file detected: core.{test_name}")
did_fail = True
os.rename("core", f"{self.test_home}/core.{test_name}")
@@ -1533,51 +1546,52 @@ class CtsScheduler(object):
if self._compare_files(stderr_expected_filename, stderr_output_filename):
self._failed("stderr changed")
did_fail = True
- elif os.stat(stderr_output_filename).st_size > 0:
+ elif os.path.getsize(stderr_output_filename) != 0:
self._failed("Output was written to stderr")
did_fail = True
cat(stderr_output_filename)
- remove_files([ stderr_output_filename ])
+
+ remove_files([stderr_output_filename])
# Check whether output graph exists, and normalize it
- if (not os.path.isfile(output_filename)
- or os.stat(output_filename).st_size == 0):
+ if self._file_missing(output_filename):
self._error("No graph produced")
did_fail = True
- self.num_failed = self.num_failed + 1
- remove_files([ output_filename ])
+ self.num_failed += 1
+ remove_files([output_filename])
return ExitStatus.ERROR
+
normalize(output_filename)
# Check whether dot output exists, and sort it
- if (not os.path.isfile(dot_output_filename) or
- os.stat(dot_output_filename).st_size == 0):
+ if self._file_missing(dot_output_filename):
self._error("No dot-file summary produced")
did_fail = True
- self.num_failed = self.num_failed + 1
- remove_files([ dot_output_filename, output_filename ])
+ self.num_failed += 1
+ remove_files([dot_output_filename, output_filename])
return ExitStatus.ERROR
- with io.open(dot_output_filename, "rt") as f:
- first_line = f.readline() # "digraph" line with opening brace
+
+ with io.open(dot_output_filename, "rt", encoding="utf-8") as f:
+ first_line = f.readline() # "digraph" line with opening brace
lines = f.readlines()
- last_line = lines[-1] # closing brace
+ last_line = lines[-1] # closing brace
del lines[-1]
- lines = sorted(set(lines)) # unique sort
- with io.open(dot_output_filename, "wt") as f:
+ lines = sorted(set(lines)) # unique sort
+
+ with io.open(dot_output_filename, "wt", encoding="utf-8") as f:
f.write(first_line)
f.writelines(lines)
f.write(last_line)
# Check whether score output exists, and sort it
- if (not os.path.isfile(score_output_filename)
- or os.stat(score_output_filename).st_size == 0):
+ if self._file_missing(score_output_filename):
self._error("No allocation scores produced")
did_fail = True
- self.num_failed = self.num_failed + 1
- remove_files([ score_output_filename, output_filename ])
+ self.num_failed += 1
+ remove_files([score_output_filename, output_filename])
return ExitStatus.ERROR
- else:
- sort_file(score_output_filename)
+
+ sort_file(score_output_filename)
if self.args.update:
shutil.copyfile(output_filename, expected_filename)
@@ -1594,7 +1608,7 @@ class CtsScheduler(object):
self._failed("dot-file summary changed")
did_fail = True
else:
- remove_files([ dot_output_filename ])
+ remove_files([dot_output_filename])
if self._compare_files(expected_filename, output_filename):
self._failed("xml-file changed")
@@ -1604,59 +1618,63 @@ class CtsScheduler(object):
self._failed("scores-file changed")
did_fail = True
- remove_files([ output_filename,
- dot_output_filename,
- score_output_filename,
- summary_output_filename])
+ remove_files([output_filename,
+ dot_output_filename,
+ score_output_filename,
+ summary_output_filename])
if did_fail:
- self.num_failed = self.num_failed + 1
+ self.num_failed += 1
return ExitStatus.ERROR
return ExitStatus.OK
def run_all(self):
- """ Run all defined tests """
-
+ """Run all defined tests."""
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
for group in TESTS:
for test in group.tests:
self.run_one(test.name, test.desc, test.args)
+
print()
def _print_summary(self):
- """ Print a summary of parameters for this test run """
+ """Print a summary of parameters for this test run."""
+ print(f"Test home is:\t{self.test_home}")
+ print(f"Test binary is:\t{self.args.binary}")
- print("Test home is:\t" + self.test_home)
- print("Test binary is:\t" + self.args.binary)
if 'PCMK_schema_directory' in os.environ:
- print("Schema home is:\t" + os.environ['PCMK_schema_directory'])
- if self.valgrind_args != []:
+ print(f"Schema home is:\t{os.environ['PCMK_schema_directory']}")
+
+ if self.valgrind_args:
print("Activating memory testing with valgrind")
+
print()
def _test_results(self):
+ """Report test results."""
if self.num_failed == 0:
shutil.rmtree(self.failed_dir)
return ExitStatus.OK
- if os.path.isfile(self.failed_filename) and os.stat(self.failed_filename).st_size != 0:
- if self.args.verbose:
- self._error(f"Results of {self.num_failed} failed tests (out of {self.num_tests}):")
- cat(self.failed_filename)
- else:
- self._error(f"Results of {self.num_failed} failed tests (out of {self.num_tests}) "
- f"are in {self.failed_filename}")
- self._error("Use -V to display them after running the tests")
- else:
+ if self._file_missing(self.failed_filename):
self._error(f"{self.num_failed} (of {self.num_tests}) tests failed (no diff results)")
if os.path.isfile(self.failed_filename):
shutil.rmtree(self.failed_dir)
+ elif self.args.verbose:
+ self._error(f"Results of {self.num_failed} failed tests (out of {self.num_tests}):")
+ cat(self.failed_filename)
+ else:
+ self._error(f"Results of {self.num_failed} failed tests (out of {self.num_tests}) "
+ f"are in {self.failed_filename}")
+ self._error("Use -V to display them after running the tests")
+
return ExitStatus.ERROR
def find_test(self, name):
+ """Return the SchedulerTest object with the given name."""
if platform.architecture()[0] == "64bit":
TESTS.extend(TESTS_64BIT)
@@ -1668,20 +1686,20 @@ class CtsScheduler(object):
return None
def run(self):
- """ Run test(s) as specified """
-
+ """Run test(s) as specified."""
# Check for pre-existing core so we don't think it's from us
if os.path.exists("core"):
- self._failed("Can't run with core already present in " + self.test_home)
+ self._failed(f"Can't run with core already present in {self.test_home}")
return ExitStatus.OSFILE
self._print_summary()
# Zero out the error log
- self.failed_file = io.open(self.failed_filename, "wt")
+ # pylint: disable=consider-using-with
+ self.failed_file = io.open(self.failed_filename, "wt", encoding="utf-8")
if self.args.run is None:
- print("Performing the following tests from " + self.args.io_dir)
+ print(f"Performing the following tests from {self.args.io_dir}")
print()
self.run_all()
print()
@@ -1704,10 +1722,11 @@ class CtsScheduler(object):
rc = self.run_one(test.name, test.desc, args)
self.failed_file.close()
+
if self.num_failed > 0:
- print("\nFailures:\nThese have also been written to: " + self.failed_filename + "\n")
- cat(self.failed_filename)
- shutil.rmtree(self.failed_dir)
+ print(f"\nFailures:\nThese have also been written to: {self.failed_filename}\n")
+ cat(self.failed_filename)
+ shutil.rmtree(self.failed_dir)
return rc
diff --git a/python/pylintrc b/python/pylintrc
index bb453f32a8c..81f63fea952 100644
--- a/python/pylintrc
+++ b/python/pylintrc
@@ -89,7 +89,8 @@ enable=
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
# CHANGED
-disable=line-too-long,
+disable=R0801,
+ line-too-long,
too-few-public-methods,
too-many-arguments,
too-many-branches,