Skip to content

Commit 46be0c6

Browse files
committed
Remove cdi metrics recording rules tests
Removing tests for metrics kubevirt_cdi_clone_pods_high_restart kubevirt_cdi_upload_pods_high_restart kubevirt_cdi_operator_up kubevirt_cdi_import_pods_high_restart These metrics is recording rules that rely on kubernetes metrics and not cnv.
1 parent c2a37a6 commit 46be0c6

File tree

4 files changed

+2
-202
lines changed

4 files changed

+2
-202
lines changed

tests/observability/metrics/conftest.py

Lines changed: 1 addition & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
import pytest
66
from ocp_resources.data_source import DataSource
7-
from ocp_resources.datavolume import DataVolume
87
from ocp_resources.deployment import Deployment
98
from ocp_resources.persistent_volume_claim import PersistentVolumeClaim
109
from ocp_resources.pod import Pod
@@ -25,43 +24,35 @@
2524
ZERO_CPU_CORES,
2625
disk_file_system_info,
2726
enable_swap_fedora_vm,
28-
fail_if_not_zero_restartcount,
2927
metric_result_output_dict_by_mountpoint,
30-
restart_cdi_worker_pod,
3128
wait_for_metric_vmi_request_cpu_cores_output,
3229
)
3330
from tests.observability.utils import validate_metrics_value
3431
from tests.utils import create_vms
3532
from utilities import console
3633
from utilities.constants import (
37-
CDI_UPLOAD_TMP_PVC,
3834
NODE_STR,
3935
ONE_CPU_CORE,
4036
OS_FLAVOR_FEDORA,
41-
PVC,
42-
SOURCE_POD,
4337
SSP_OPERATOR,
4438
TIMEOUT_2MIN,
4539
TIMEOUT_4MIN,
4640
TIMEOUT_15SEC,
47-
TIMEOUT_30MIN,
4841
TWO_CPU_CORES,
4942
TWO_CPU_SOCKETS,
5043
TWO_CPU_THREADS,
5144
VIRT_TEMPLATE_VALIDATOR,
5245
Images,
5346
)
5447
from utilities.hco import ResourceEditorValidateHCOReconcile
55-
from utilities.infra import create_ns, get_http_image_url, get_node_selector_dict, get_pod_by_name_prefix, unique_name
48+
from utilities.infra import create_ns, get_node_selector_dict, get_pod_by_name_prefix, unique_name
5649
from utilities.monitoring import get_metrics_value
5750
from utilities.network import assert_ping_successful
5851
from utilities.ssp import verify_ssp_pod_is_running
5952
from utilities.storage import (
60-
create_dv,
6153
data_volume_template_with_source_ref_dict,
6254
is_snapshot_supported_by_sc,
6355
vm_snapshot,
64-
wait_for_cdi_worker_pod,
6556
)
6657
from utilities.virt import (
6758
VirtualMachineForTests,
@@ -202,101 +193,6 @@ def virt_up_metrics_values(request, prometheus):
202193
return int(query_response[0]["value"][1])
203194

204195

205-
@pytest.fixture()
206-
def windows_dv_with_block_volume_mode(
207-
namespace,
208-
unprivileged_client,
209-
storage_class_with_block_volume_mode,
210-
):
211-
with create_dv(
212-
dv_name="test-dv-windows-image",
213-
namespace=namespace.name,
214-
url=get_http_image_url(image_directory=Images.Windows.UEFI_WIN_DIR, image_name=Images.Windows.WIN2k19_IMG),
215-
size=Images.Windows.DEFAULT_DV_SIZE,
216-
storage_class=storage_class_with_block_volume_mode,
217-
client=unprivileged_client,
218-
volume_mode=DataVolume.VolumeMode.BLOCK,
219-
) as dv:
220-
dv.wait_for_dv_success(timeout=TIMEOUT_30MIN)
221-
yield dv
222-
223-
224-
@pytest.fixture()
225-
def cloned_dv_from_block_to_fs(
226-
unprivileged_client,
227-
windows_dv_with_block_volume_mode,
228-
storage_class_with_filesystem_volume_mode,
229-
):
230-
with create_dv(
231-
source=PVC,
232-
dv_name="cloned-test-dv-windows-image",
233-
namespace=windows_dv_with_block_volume_mode.namespace,
234-
source_pvc=windows_dv_with_block_volume_mode.name,
235-
source_namespace=windows_dv_with_block_volume_mode.namespace,
236-
size=windows_dv_with_block_volume_mode.size,
237-
storage_class=storage_class_with_filesystem_volume_mode,
238-
client=unprivileged_client,
239-
volume_mode=DataVolume.VolumeMode.FILE,
240-
) as cdv:
241-
cdv.wait_for_status(status=DataVolume.Status.CLONE_IN_PROGRESS, timeout=TIMEOUT_2MIN)
242-
yield cdv
243-
244-
245-
@pytest.fixture()
246-
def running_cdi_worker_pod(cloned_dv_from_block_to_fs):
247-
for pod_name in [CDI_UPLOAD_TMP_PVC, SOURCE_POD]:
248-
wait_for_cdi_worker_pod(
249-
pod_name=pod_name,
250-
storage_ns_name=cloned_dv_from_block_to_fs.namespace,
251-
).wait_for_status(status=Pod.Status.RUNNING, timeout=TIMEOUT_2MIN)
252-
253-
254-
@pytest.fixture()
255-
def restarted_cdi_dv_clone(
256-
unprivileged_client,
257-
cloned_dv_from_block_to_fs,
258-
running_cdi_worker_pod,
259-
):
260-
restart_cdi_worker_pod(
261-
unprivileged_client=unprivileged_client,
262-
dv=cloned_dv_from_block_to_fs,
263-
pod_prefix=CDI_UPLOAD_TMP_PVC,
264-
)
265-
266-
267-
@pytest.fixture()
268-
def ready_uploaded_dv(unprivileged_client, namespace):
269-
with create_dv(
270-
source=UPLOAD_STR,
271-
dv_name=f"{UPLOAD_STR}-dv",
272-
namespace=namespace.name,
273-
storage_class=py_config["default_storage_class"],
274-
client=unprivileged_client,
275-
) as dv:
276-
dv.wait_for_status(status=DataVolume.Status.UPLOAD_READY, timeout=TIMEOUT_2MIN)
277-
yield dv
278-
279-
280-
@pytest.fixture()
281-
def restarted_cdi_dv_upload(unprivileged_client, ready_uploaded_dv):
282-
restart_cdi_worker_pod(
283-
unprivileged_client=unprivileged_client,
284-
dv=ready_uploaded_dv,
285-
pod_prefix=CDI_UPLOAD_PRIME,
286-
)
287-
ready_uploaded_dv.wait_for_status(status=DataVolume.Status.UPLOAD_READY, timeout=TIMEOUT_2MIN)
288-
289-
290-
@pytest.fixture()
291-
def zero_clone_dv_restart_count(cloned_dv_from_block_to_fs):
292-
fail_if_not_zero_restartcount(dv=cloned_dv_from_block_to_fs)
293-
294-
295-
@pytest.fixture()
296-
def zero_upload_dv_restart_count(ready_uploaded_dv):
297-
fail_if_not_zero_restartcount(dv=ready_uploaded_dv)
298-
299-
300196
@pytest.fixture()
301197
def connected_vm_console_successfully(vm_for_test, prometheus):
302198
with console.Console(vm=vm_for_test) as vmc:
Lines changed: 0 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,6 @@
11
import pytest
22

33
from tests.observability.metrics.utils import expected_metric_labels_and_values
4-
from tests.observability.utils import validate_metrics_value
5-
from utilities.constants import CDI_OPERATOR
6-
7-
8-
@pytest.mark.polarion("CNV-10557")
9-
def test_kubevirt_cdi_clone_pods_high_restart(
10-
skip_test_if_no_block_sc,
11-
prometheus,
12-
zero_clone_dv_restart_count,
13-
restarted_cdi_dv_clone,
14-
):
15-
validate_metrics_value(
16-
prometheus=prometheus,
17-
expected_value="1",
18-
metric_name="kubevirt_cdi_clone_pods_high_restart",
19-
)
20-
21-
22-
@pytest.mark.polarion("CNV-10717")
23-
def test_kubevirt_cdi_upload_pods_high_restart(
24-
prometheus,
25-
zero_upload_dv_restart_count,
26-
restarted_cdi_dv_upload,
27-
):
28-
validate_metrics_value(
29-
prometheus=prometheus,
30-
expected_value="1",
31-
metric_name="kubevirt_cdi_upload_pods_high_restart",
32-
)
334

345

356
@pytest.mark.polarion("CNV-11744")
@@ -40,26 +11,3 @@ def test_metric_kubevirt_cdi_storageprofile_info(prometheus, storage_class_label
4011
f"{{storageclass='{storage_class_labels_for_testing['storageclass']}'}}",
4112
expected_labels_and_values=storage_class_labels_for_testing,
4213
)
43-
44-
45-
@pytest.mark.parametrize(
46-
"scaled_deployment",
47-
[
48-
pytest.param(
49-
{"deployment_name": CDI_OPERATOR, "replicas": 0},
50-
marks=(pytest.mark.polarion("CNV-11722")),
51-
id="Test_kubevirt_cdi_operator_up",
52-
),
53-
],
54-
indirect=True,
55-
)
56-
def test_kubevirt_cdi_operator_up(
57-
prometheus,
58-
disabled_virt_operator,
59-
scaled_deployment,
60-
):
61-
validate_metrics_value(
62-
prometheus=prometheus,
63-
expected_value="0",
64-
metric_name="kubevirt_cdi_operator_up",
65-
)

tests/observability/metrics/utils.py

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,11 @@
66
from datetime import datetime, timezone
77
from typing import Any, Optional
88

9-
import pytest
109
from kubernetes.dynamic import DynamicClient
11-
from ocp_resources.datavolume import DataVolume
1210
from ocp_resources.resource import Resource
1311
from ocp_resources.virtual_machine import VirtualMachine
1412
from ocp_utilities.monitoring import Prometheus
15-
from pyhelper_utils.shell import run_command, run_ssh_commands
13+
from pyhelper_utils.shell import run_ssh_commands
1614
from timeout_sampler import TimeoutExpiredError, TimeoutSampler
1715

1816
from tests.observability.constants import KUBEVIRT_VIRT_OPERATOR_READY
@@ -37,9 +35,7 @@
3735
USED,
3836
VIRT_HANDLER,
3937
)
40-
from utilities.infra import get_pod_by_name_prefix
4138
from utilities.monitoring import get_metrics_value
42-
from utilities.storage import wait_for_dv_expected_restart_count
4339
from utilities.virt import VirtualMachineForTests
4440

4541
LOGGER = logging.getLogger(__name__)
@@ -314,29 +310,6 @@ def get_resource_object(
314310
)
315311

316312

317-
def restart_cdi_worker_pod(unprivileged_client: DynamicClient, dv: DataVolume, pod_prefix: str) -> None:
318-
initial_dv_restartcount = dv.instance.get("status", {}).get("restartCount", 0)
319-
for iteration in range(TOTAL_4_ITERATIONS - initial_dv_restartcount):
320-
pod = get_pod_by_name_prefix(
321-
dyn_client=unprivileged_client,
322-
pod_prefix=pod_prefix,
323-
namespace=dv.namespace,
324-
)
325-
dv_restartcount = dv.instance.get("status", {}).get("restartCount", 0)
326-
run_command(
327-
command=shlex.split(f"oc exec -n {dv.namespace} {pod.name} -- kill 1"),
328-
check=False,
329-
)
330-
wait_for_dv_expected_restart_count(dv=dv, expected_result=dv_restartcount + 1)
331-
332-
333-
def fail_if_not_zero_restartcount(dv: DataVolume) -> None:
334-
restartcount = dv.instance.get("status", {}).get("restartCount", 0)
335-
336-
if restartcount != 0:
337-
pytest.fail(f"dv {dv.name} restartcount is not zero,\n actual restartcount: {restartcount}")
338-
339-
340313
def assert_virtctl_version_equal_metric_output(
341314
virtctl_server_version: dict[str, str], metric_output: list[dict[str, dict[str, str]]]
342315
) -> None:

utilities/storage.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -972,23 +972,6 @@ def get_storage_class_with_specified_volume_mode(volume_mode, sc_names):
972972
LOGGER.error(f"No {sc_with_volume_mode} among {sc_names}")
973973

974974

975-
def wait_for_dv_expected_restart_count(dv, expected_result):
976-
try:
977-
for sample in TimeoutSampler(
978-
wait_timeout=TIMEOUT_3MIN,
979-
sleep=TIMEOUT_20SEC,
980-
func=lambda: dv.instance.get("status", {}).get("restartCount"),
981-
):
982-
if sample and sample >= expected_result:
983-
return
984-
except TimeoutExpiredError:
985-
LOGGER.error(
986-
f"error while restarting dv: {dv.name} ,expected restartCount: {expected_result}, "
987-
f"actual restartCount: {sample}"
988-
)
989-
raise
990-
991-
992975
@contextmanager
993976
def create_vm_from_dv(
994977
dv,

0 commit comments

Comments
 (0)