diff --git a/changelog.d/2373.added.md b/changelog.d/2373.added.md
new file mode 100644
index 0000000000..44ebe65ab4
--- /dev/null
+++ b/changelog.d/2373.added.md
@@ -0,0 +1 @@
+Show DHCP stats graphs on VLAN and Prefix pages when recent enough DHCP stats are found
diff --git a/python/nav/bin/dhcpstats.py b/python/nav/bin/dhcpstats.py
index a0cfa588b2..9dd1b5ab87 100755
--- a/python/nav/bin/dhcpstats.py
+++ b/python/nav/bin/dhcpstats.py
@@ -15,7 +15,8 @@
# along with NAV. If not, see .
#
"""
-Collects statistics from DHCP servers and sends them to the Carbon backend.
+Collects statistics from DHCP servers and sends them to the Graphite/Carbon
+backend.
"""
import argparse
@@ -25,6 +26,7 @@
from nav.config import getconfig
from nav.dhcpstats import kea_dhcp
+from nav.dhcpstats.common import GraphiteMetric
from nav.dhcpstats.errors import CommunicationError
from nav.errors import ConfigurationError
from nav.logs import init_generic_logging
@@ -36,7 +38,7 @@
CONFIGFILE = "dhcpstats.conf"
PIDFILE = "dhcpstats.pid"
-ENDPOINT_CLIENTS = {
+CLIENTS = {
"kea-dhcp4": partial(kea_dhcp.Client, dhcp_version=4),
}
@@ -66,27 +68,27 @@ def parse_args():
"""
parser = argparse.ArgumentParser(
description="Collects statistics from DHCP servers and sends them to the "
- "Carbon backend",
- epilog="Statistics are collected from each DHCP API endpoint configured in "
- "'CONFDIR/dhcpstats.conf', and then sent to the Carbon backend configured in "
- "'CONFDIR/graphite.conf'.",
+ "Graphite/Carbon backend",
+ epilog="Statistics are collected from each DHCP server configured in "
+ "'CONFDIR/dhcpstats.conf', and then sent to the Graphite/Carbon backend "
+ "configured in 'CONFDIR/graphite.conf'.",
)
return parser.parse_args()
def collect_stats(config):
"""
- Collects current stats from each configured endpoint.
+ Collects current stats from each configured server.
:param config: dhcpstats.conf INI-parsed into a dict specifying
- endpoints to collect metrics from.
+ servers to collect metrics from.
"""
_logger.info("--> Starting stats collection <--")
- all_stats = []
+ all_stats: list[GraphiteMetric] = []
- for client in get_endpoint_clients(config):
+ for client in get_clients(config):
_logger.info(
"Collecting stats using %s...",
client,
@@ -96,13 +98,13 @@ def collect_stats(config):
fetched_stats = client.fetch_stats()
except ConfigurationError as err:
_logger.warning(
- "%s is badly configured: %s, skipping endpoint...",
+ "%s is badly configured: %s, skipping server...",
client,
err,
)
except CommunicationError as err:
_logger.warning(
- "Error while collecting stats using %s: %s, skipping endpoint...",
+ "Error while collecting stats using %s: %s, skipping server...",
client,
err,
)
@@ -118,46 +120,48 @@ def collect_stats(config):
_logger.info("--> Stats collection done <--")
-def get_endpoint_clients(config):
+def get_clients(config):
"""
- Yields one client per correctly configured endpoint in config. A section
- of the config correctly configures an endpoint if:
+ Yields one client per correctly configured server in config. A section
+ of the config correctly configures a server if:
- * Its name starts with 'endpoint_'.
+ * Its name starts with 'endpoint_' or 'server_'.
* It has the mandatory option 'type'.
* The value of the 'type' option is mapped to a client initializer
- by ENDPOINT_CLIENTS, and the client doesn't raise a
+ by the global CLIENTS dictionary, and the client doesn't raise a
ConfigurationError when it is initialized with the rest of the
options of the section as keyword arguments.
:param config: dhcpstats.conf INI-parsed into a dict specifying
- endpoints to collect metrics from.
+ servers to collect metrics from.
"""
for section, options in config.items():
- if not section.startswith("endpoint_"):
+ if section.startswith("endpoint_"):
+ server_name = section.removeprefix("endpoint_")
+ elif section.startswith("server_"):
+ server_name = section.removeprefix("server_")
+ else:
continue
- endpoint_name = section.removeprefix("endpoint_")
- endpoint_type = options.get("type")
+ server_type = options.get("type")
kwargs = {opt: val for opt, val in options.items() if opt != "type"}
try:
- cls = ENDPOINT_CLIENTS[endpoint_type]
+ cls = CLIENTS[server_type]
except KeyError:
_logger.warning(
- "Invalid endpoint type '%s' defined in config section [%s], skipping "
- "endpoint...",
- endpoint_type,
+ "Invalid server type '%s' defined in config section [%s], skipping "
+ "server...",
+ server_type,
section,
)
continue
try:
- client = cls(endpoint_name, **kwargs)
+ client = cls(server_name, **kwargs)
except (ConfigurationError, TypeError) as err:
_logger.warning(
- "Endpoint type '%s' defined in config section [%s] is badly "
- "configured: %s, skipping endpoint...",
- endpoint_type,
+ "Section [%s] of %s is badly configured: %s, skipping server...",
section,
+ CONFIGFILE,
err,
)
else:
diff --git a/python/nav/dhcpstats/common.py b/python/nav/dhcpstats/common.py
new file mode 100644
index 0000000000..15ee7dcf2c
--- /dev/null
+++ b/python/nav/dhcpstats/common.py
@@ -0,0 +1,357 @@
+#
+# Copyright (C) 2025 Sikt
+#
+# This file is part of Network Administration Visualized (NAV).
+#
+# NAV is free software: you can redistribute it and/or modify it under
+# the terms of the GNU General Public License version 3 as published by
+# the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details. You should have received a copy of the GNU General Public License
+# along with NAV. If not, see .
+#
+"""
+Common classes and functions used by DHCP API clients and various other parts of
+NAV that wants to make use of DHCP stats.
+"""
+
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import Any, Iterable, Literal, Optional, Union
+
+import IPy
+
+from nav.metrics.graphs import (
+ aliased_series,
+ diffed_series,
+ json_graph_url,
+ nonempty_series,
+ summed_series,
+)
+from nav.metrics.names import get_expanded_nodes, safe_name
+from nav.metrics.templates import metric_path_for_dhcp
+
+
+# Type expected by functions in NAV that send stats to a Graphite/Carbon backend. Values
+# of this type are interpreted as (path, (timestamp, value)).
+GraphiteMetric = tuple[str, tuple[float, int]]
+
+
+def fetch_graph_urls_for_prefixes(prefixes: list[IPy.IP]) -> list[str]:
+ """
+ Takes a list of IP prefixes, queries Graphite for DHCP stat paths, and
+ returns a list of Graphite graph URLs; one URL for each group of DHCP stat
+ paths in Graphite where at least one path in the group represents a range of
+ IP addresses that intersect with one or more of the given prefixes.
+
+ Each returned Graphite URL points to JSON graph data and can be supplied
+ directly to NAV's JavaScript RickshawGraph function.
+ """
+ if not prefixes:
+ return []
+
+ all_paths = fetch_paths_from_graphite()
+ grouped_paths = group_paths(all_paths)
+ filtered_grouped_paths = drop_groups_not_in_prefixes(grouped_paths, prefixes)
+
+ graph_urls: list[str] = []
+ for paths_of_same_group in sorted(filtered_grouped_paths):
+ paths_of_same_group = sorted(paths_of_same_group)
+ graph_lines = []
+ for path in paths_of_same_group:
+ assigned_addresses = aliased_series(
+ nonempty_series(
+ path.to_graphite_path("assigned"),
+ ),
+ name=f"Assigned addresses in {path.first_ip} - {path.last_ip}",
+ renderer="area",
+ )
+ graph_lines.append(assigned_addresses)
+
+ assert len(paths_of_same_group) > 0
+ path = paths_of_same_group[0] # Just select an arbitrary path instance in the group
+ unassigned_addresses = aliased_series(
+ diffed_series(
+ summed_series(
+ nonempty_series(
+ path.to_graphite_path("total", wildcard_for_group=True)
+ ),
+ ),
+ summed_series(
+ nonempty_series(
+ path.to_graphite_path("assigned", wildcard_for_group=True)
+ ),
+ ),
+ ),
+ name="Unassigned addresses",
+ renderer="area",
+ color="#d9d9d9",
+ )
+ graph_lines.append(unassigned_addresses)
+
+ total_addresses = aliased_series(
+ summed_series(
+ nonempty_series(
+ path.to_graphite_path("total", wildcard_for_group=True)
+ ),
+ ),
+ name="Total addresses",
+ color="#707070",
+ )
+ graph_lines.append(total_addresses)
+
+ type_human = path.allocation_type + "s"
+ title = f"DHCP {type_human} in '{path.group_name}' on server '{path.server_name}'"
+ graph_urls.append(json_graph_url(*graph_lines, title=title))
+ return graph_urls
+
+
+def fetch_paths_from_graphite():
+ """
+ Fetches and returns all unique DHCP stat paths in Graphite when their
+ trailing metric_name path segment has been removed.
+ """
+ wildcard = metric_path_for_dhcp(
+ ip_version=safe_name("{4,6}"),
+ server_name=safe_name("*"),
+ allocation_type=safe_name("{range,pool,subnet}"),
+ group_name_source=safe_name("{custom_groups,special_groups}"),
+ group_name=safe_name("*"),
+ first_ip=safe_name("*"),
+ last_ip=safe_name("*"),
+ metric_name="total",
+ )
+ graphite_paths = get_expanded_nodes(wildcard)
+
+ native_paths: list[DhcpPath] = []
+ for graphite_path in graphite_paths:
+ try:
+ native_path = DhcpPath.from_graphite_path(graphite_path)
+ except ValueError:
+ pass
+ else:
+ native_paths.append(native_path)
+ return native_paths
+
+
+def group_paths(paths: list["DhcpPath"]):
+ """
+ Takes a list of DhcpPath instances and partitions it into multiple lists,
+ such that two DhcpPath instances belong to the same list if and only if they
+ have the same group data.
+ """
+ grouped_paths: dict[Any, list[DhcpPath]] = defaultdict(list)
+ for path in paths:
+ group_total = path.to_graphite_path("total", wildcard_for_group=True)
+ group_assigned = path.to_graphite_path("assigned", wildcard_for_group=True)
+ grouped_paths[(group_total, group_assigned)].append(path)
+ return list(grouped_paths.values())
+
+
+def drop_groups_not_in_prefixes(grouped_paths: list[list["DhcpPath"]], prefixes: list[IPy.IP]):
+ """
+ Takes a list of grouped DhcpPath instances, and return only the groups that
+ has at least one DhcpPath instance that intersect with one or more of the
+ given prefixes.
+ """
+ grouped_paths_to_keep: list[list[DhcpPath]] = []
+ for paths_of_same_group in grouped_paths:
+ if any(
+ path.intersects(prefixes)
+ for path in paths_of_same_group
+ ):
+ grouped_paths_to_keep.append(paths_of_same_group)
+ return grouped_paths_to_keep
+
+
+@dataclass(frozen=True, order=True)
+class DhcpPath:
+ """
+ Represents a path to a DHCP stat in Graphite sans the stat's metric_name.
+ The absence of metric_name means that all of the following DHCP stat paths
+ in Graphite:
+
+ nav.dhcp.4.kea-osl.range.custom_groups.staff.1_0_1_1.1_0_1_255.assigned
+ nav.dhcp.4.kea-osl.range.custom_groups.staff.1_0_1_1.1_0_1_255.total
+ nav.dhcp.4.kea-osl.range.custom_groups.staff.1_0_1_1.1_0_1_255.declined
+
+ are represented by:
+
+ DhcpPath(
+ ip_version=4,
+ server_name"kea-osl",
+ allocation_type="range",
+ group_name_source="custom_groups",
+ group_name="staff",
+ fist_ip=IP("1.0.1.1"),
+ last_ip=IP("1.0.1.255"),
+ )
+
+ Instantiate me via :meth from_external_info: if path data is sourced from an
+ external source such as a DHCP server.
+ Instantiate me via :meth from_graphite_path: if path data is sourced from
+ NAV's Graphite database.
+ Use me to do the following translations, with validity checks:
+ external info (from DHCP server) --> DhcpPath <--> Graphite path
+ """
+ # Fields are ordered according to how we want instances to be sorted,
+ # *not* how fields occur in the respective paths in Graphite
+ server_name: str
+ group_name_source: Literal["special_groups", "custom_groups"]
+ group_name: str
+ allocation_type: Literal["range", "pool", "subnet"]
+ ip_version: int
+ first_ip: IPy.IP
+ last_ip: IPy.IP
+
+ @classmethod
+ def from_graphite_path(cls, graphite_path: str):
+ """
+ Instantiate me from a path to a DHCP stat (either sans metric_name or
+ not) from Graphite.
+
+ > my_path = Path.from_external_info(...)
+ > Path.from_graphite_path(my_path.to_graphite_path("foo")) == my_path
+ True
+ """
+ parts = graphite_path.split(".")
+ if not len(parts) >= 9:
+ raise ValueError(
+ f"Expected graphite_path {graphite_path!r} to have at least 9 dot-separated segments"
+ )
+ ip_version = parts[2]
+ server_name = parts[3]
+ allocation_type = parts[4]
+ group_name_source = parts[5]
+ group_name = parts[6]
+ first_ip = parts[7]
+ last_ip = parts[8]
+
+ allowed_group_sources = ("special_groups", "custom_groups")
+ if group_name_source not in allowed_group_sources:
+ raise ValueError(f"group_source_name {group_name_source!r} is not in {allowed_group_sources!r}")
+
+ allowed_allocation_types = ("range", "pool", "subnet")
+ if allocation_type not in allowed_allocation_types:
+ raise ValueError(f"allocation_type {allocation_type!r} is not in {allowed_allocation_types!r}")
+
+ first_ip = cls._unescape_graphite_address(first_ip)
+ last_ip = cls._unescape_graphite_address(last_ip)
+ cls._check_ip_pair(first_ip, last_ip)
+
+ if str(first_ip.version()) != ip_version or str(last_ip.version()) != ip_version:
+ raise ValueError(f"first_ip {first_ip!r} or last_ip {last_ip!r} not of same version as expected ip_version {ip_version!r}")
+
+ return cls(
+ ip_version=first_ip.version(),
+ server_name=server_name,
+ allocation_type=allocation_type,
+ group_name_source=group_name_source,
+ group_name=group_name,
+ first_ip=first_ip,
+ last_ip=last_ip,
+ )
+
+ @classmethod
+ def from_external_info(
+ cls,
+ server_name: str,
+ allocation_type: Literal["range", "pool", "subnet"],
+ group_name: Optional[str],
+ first_ip: Union[str, IPy.IP],
+ last_ip: Union[str, IPy.IP],
+ ):
+ """
+ Instantiate me with path data sourced from an external source such as a
+ DHCP server.
+
+ if group_name is missing, group_name_source is set to "special_groups"
+ and group_name is set to "standalone" such that the returned instance
+ will be treated as belonging to a group of size one.
+ otherwise, group_name_source is set to "custom_groups".
+
+ if first_ip cannot be parsed to an IP address, raises a ValueError
+ if last_ip cannot be parsed to an IP address, raises a ValueError
+ if first_ip > last_ip, raises a ValueError
+ otherwise, returns the Path instance.
+ """
+ if group_name is None:
+ group_name_source = "special_groups"
+ group_name = "standalone"
+ else:
+ group_name_source = "custom_groups"
+ group_name = group_name
+
+ first_ip = IPy.IP(IPy.IP(first_ip)[0])
+ last_ip = IPy.IP(IPy.IP(last_ip)[-1])
+ cls._check_ip_pair(first_ip, last_ip)
+
+ return cls(
+ ip_version=first_ip.version(),
+ server_name=server_name,
+ allocation_type=allocation_type,
+ group_name_source=group_name_source,
+ group_name=group_name,
+ first_ip=first_ip,
+ last_ip=last_ip,
+ )
+
+ def to_graphite_path(self, metric_name, wildcard_for_group=False):
+ """
+ Return me as a path recognized by Graphite.
+ """
+ if wildcard_for_group and not self._is_standalone():
+ first_ip = safe_name("*")
+ last_ip = safe_name("*")
+ else:
+ first_ip = self.first_ip.strNormal()
+ last_ip = self.last_ip.strNormal()
+
+ return metric_path_for_dhcp(
+ ip_version=self.ip_version,
+ server_name=self.server_name,
+ allocation_type=self.allocation_type,
+ group_name_source=self.group_name_source,
+ group_name=self.group_name,
+ first_ip=first_ip,
+ last_ip=last_ip,
+ metric_name=metric_name,
+ )
+
+ def intersects(self, prefixes: Iterable[IPy.IP]):
+ """
+ Check if the range of IP addresses between self.first_ip and self.last_ip
+ intersect with any of the given prefixes.
+ """
+ for prefix in prefixes:
+ if (
+ self.first_ip in prefix
+ or self.last_ip in prefix
+ or self.first_ip < prefix < self.last_ip
+ ):
+ return True
+ return False
+
+ @staticmethod
+ def _unescape_graphite_address(escaped_address: str) -> IPy.IP:
+ parts = escaped_address.split("_")
+ if len(parts) == 4:
+ return IPy.IP(".".join(parts))
+ elif len(parts) == 8:
+ return IPy.IP(":".join(parts))
+ else:
+ raise ValueError
+
+ @staticmethod
+ def _check_ip_pair(first_ip: IPy.IP, last_ip: IPy.IP):
+ if first_ip.version() != last_ip.version():
+ raise ValueError(f"first_ip {first_ip!r} is not of same version as last_ip {last_ip!r}")
+
+ if first_ip > last_ip:
+ raise ValueError(f"first_ip {first_ip!r} greater than last_ip {last_ip!r}")
+
+ def _is_standalone(self):
+ return self.group_name_source == "special_groups" and self.group_name == "standalone"
diff --git a/python/nav/dhcpstats/kea_dhcp.py b/python/nav/dhcpstats/kea_dhcp.py
index 39067f3a40..800b6dfc3f 100644
--- a/python/nav/dhcpstats/kea_dhcp.py
+++ b/python/nav/dhcpstats/kea_dhcp.py
@@ -29,6 +29,7 @@
from requests import RequestException, JSONDecodeError, Session
from requests.adapters import HTTPAdapter, Retry
+from nav.dhcpstats.common import DhcpPath, GraphiteMetric
from nav.dhcpstats.errors import (
CommunicationError,
KeaEmpty,
@@ -38,61 +39,67 @@
KeaUnsupported,
)
from nav.errors import ConfigurationError
-from nav.metrics.templates import metric_path_for_dhcp_pool
-
_logger = logging.getLogger(__name__)
@dataclass(order=True, frozen=True)
class Pool:
- """A Kea DHCP configured address pool"""
+ """
+ A pool configured in a Kea DHCP server.
+
+ Note that what is called a pool in Kea is called a range in NAV because it
+ is a continous set of IP addresses (i.e. it contains all IP addresses
+ between a first and a last IP address).
+ """
subnet_id: int
pool_id: int
- name: str
- range_start: IP
- range_end: IP
-
-
-GraphiteMetric = tuple[str, tuple[float, int]]
+ group_name: Optional[str]
+ first_ip: IP
+ last_ip: IP
class Client:
"""
- Fetches DHCP stats for each address pool managed by some Kea DHCP server by using
- the Kea API. See 'Client.fetch_stats()'.
-
- Note: This client assumes no hooks have been installed into the Kea DHCP
- server. The 'lease-stats' hook is required for reliable stats when
- multiple servers share the same lease database because the standard
- commands issue the cache, not the DB. This client does not support the
- hook. Anyhow, the hook doesn't support fetching statistics on a
+ Fetches DHCP stats for each pool configured in a Kea DHCP server by using
+ its Kea API. See 'Client.fetch_stats()'.
+
+ Note: This client does not assume that any hooks have been installed into
+ the Kea DHCP server. Kea offers a 'lease-stats' hook that comes with
+ extra API commands which become necessary to use if one want reliable
+ stats when using a setup where multiple Kea DHCP servers share the
+ same underlying lease database since the standard API commands issue a
+ server's cache, not the underlying database. This client does not use
+ the hook. Anyhow, the hook doesn't support fetching statistics on a
per-pool basis, only per-subnet basis, which is too coarse for us.
See https://kea.readthedocs.io/en/kea-2.6.3/arm/hooks.html#libdhcp-stat-cmds-so-statistics-commands-for-supplemental-lease-statistics.
"""
def __init__(
self,
- name: str,
+ server_name: str,
url: str,
dhcp_version: int = 4,
- http_basic_username: str = "",
- http_basic_password: str = "",
- client_cert_path: str = "",
- client_cert_key_path: str = "",
- user_context_poolname_key: str = "name",
+ http_basic_username: Optional[str] = None,
+ http_basic_password: Optional[str] = None,
+ client_cert_path: Optional[str] = None,
+ client_cert_key_path: Optional[str] = None,
+ user_context_groupname_key: str = "group",
timeout: float = 5.0,
):
- self._name: str = name
+ if not (url.lower().startswith("https://") or url.lower().startswith("http://")):
+ raise ConfigurationError("url must have an HTTPS or HTTP scheme")
+
+ self._server_name: str = server_name
self._url: str = url
self._dhcp_version: int = dhcp_version
- self._http_basic_user: str = http_basic_username
- self._http_basic_password: str = http_basic_password
- self._client_cert_path: str = client_cert_path
- self._client_key_path: str = client_cert_key_path
- self._user_context_poolname_key: str = user_context_poolname_key
+ self._http_basic_username: Optional[str] = http_basic_username
+ self._http_basic_password: Optional[str] = http_basic_password
+ self._client_cert_path: Optional[str] = client_cert_path
+ self._client_key_path: Optional[str] = client_cert_key_path
+ self._user_context_groupname_key: str = user_context_groupname_key
self._timeout: float = timeout
self._kea_config: Optional[dict] = None
@@ -112,29 +119,30 @@ def __init__(
def __str__(self):
return (
- f"API client for Kea DHCPv{self._dhcp_version} endpoint '{self._name}' at "
+ f"client for Kea DHCPv{self._dhcp_version} server '{self._server_name}' at "
f"{self._url}"
)
def fetch_stats(self) -> list[GraphiteMetric]:
"""
Fetches and returns a list containing the most recent stats of interest
- for each DHCP address pool. The stats of interest are:
+ for each pool configured in the Kea DHCP server. The stats of interest
+ are:
- * The total amount of addresses in that pool.
+ * The total amount of addresses in that Kea pool.
(Named "total" addresses in NAV.)
- * The amount of currently assigned (aka. leased) addresses in that pool.
+ * The amount of currently assigned (aka. leased) addresses in that Kea pool.
(Named "assigned" addresses in NAV.)
- * The amount of declined addresses in that pool. That is, addresses in
- that pool that are erroneously used by unknown entities and therefore
+ * The amount of declined addresses in that Kea pool. That is, addresses in
+ that Kea pool that are erroneously used by unknown entities and therefore
not available for assignment. The set of declined addresses is a
subset of the set of assigned addresses.
(Named "declined" addresses in NAV.)
If the Kea API responds with an empty response to one or more of the
- stats of interest for a pool, these stats will be missing in the
+ stats of interest for a Kea pool, these stats will be missing in the
returned list, but a list is still succesfully returned. Other errors
during this call will cause a subclass of
nav.dhcpstats.errors.CommunicationError or nav.errors.ConfigurationError
@@ -195,7 +203,7 @@ def _fetch_kea_config(self) -> dict:
def _fetch_kea_config_hash(self) -> Optional[str]:
"""
- Returns the hash of the current configation of the Kea DHCP server.
+ Returns the hash of the current configuration of the Kea DHCP server.
(API command: 'config-hash-get'.)
"""
try:
@@ -204,7 +212,7 @@ def _fetch_kea_config_hash(self) -> Optional[str]:
.get("arguments", {})
.get("hash", None)
)
- except KeaUnsupported as err:
+ except CommunicationError as err:
_logger.debug(str(err))
return None
@@ -258,14 +266,14 @@ def _send_query(self, command: str, **kwargs) -> dict:
responses = responses.json()
except JSONDecodeError as err:
raise KeaUnexpected(
- f"{self._url} does not look like a Kea API endpoint; "
+ f"{self._url} does not look like a Kea API; "
f"response to command {command} was not valid JSON",
) from err
except RequestException as err:
raise CommunicationError from err
# Any valid response from Kea is a JSON list with one entry corresponding to the
- # response from either the dhcp4 or dhcp6 service we queried
+ # response from the dhcp server "service" we queried
if not (
isinstance(responses, list)
and len(responses) == 1
@@ -323,7 +331,7 @@ def _create_session(self) -> Session:
session.mount("https://", HTTPAdapter(max_retries=retries))
session.mount("http://", HTTPAdapter(max_retries=retries))
- https = self._url.startswith("https://")
+ https = self._url.lower().startswith("https://")
if https:
_logger.debug("Using HTTPS")
@@ -332,15 +340,15 @@ def _create_session(self) -> Session:
"Using HTTP to request potentially sensitive data such as API passwords"
)
- if self._http_basic_user and self._http_basic_password:
+ if self._http_basic_username is not None and self._http_basic_password is not None:
_logger.debug("Using HTTP Basic Authentication")
if not https:
_logger.warning("Using HTTP Basic Authentication without HTTPS")
- session.auth = (self._http_basic_user, self._http_basic_password)
+ session.auth = (self._http_basic_username, self._http_basic_password)
else:
_logger.debug("Not using HTTP Basic Authentication")
- if self._client_cert_path:
+ if self._client_cert_path is not None:
_logger.debug("Using client certificate authentication")
if not https:
raise ConfigurationError(
@@ -348,7 +356,7 @@ def _create_session(self) -> Session:
"urls with HTTPS scheme"
)
_logger.debug("Certificate path: '%s'", self._client_cert_path)
- if self._client_key_path:
+ if self._client_key_path is not None:
_logger.debug("Certificate key path: '%s'", self._client_key_path)
session.cert = (self._client_cert_path, self._client_key_path)
else:
@@ -387,65 +395,76 @@ def _pools_of_subnet(self, subnet: dict) -> Iterator[Pool]:
)
return
+ used_pool_ids = set()
for pool in subnet.get("pools", []):
try:
pool_id = int(pool["pool-id"])
- pool_start, pool_end = self._bounds_of_pool_range(pool["pool"])
- pool_name = self._name_of_pool(
- pool,
- fallback=f"pool-{pool_start.strNormal()}-{pool_end.strNormal()}",
- )
+ first_ip, last_ip = self._bounds_of_pool_range(pool["pool"])
+ group_name = self._group_name_of_pool(pool)
except (AttributeError, KeyError, TypeError, ValueError):
_logger.error(
- 'Could not parse pool in subnet %d from %s, skipping pool... '
- '(make sure every pool has "pool-id" and "pool" configured in the '
+ 'Could not parse Kea pool in subnet %d from %s, skipping Kea pool... '
+ '(make sure every Kea pool has "pool-id" and "pool" configured in the '
'Kea DHCP configuration)',
subnet_id,
self._url,
)
continue
- yield Pool(
- subnet_id=subnet_id,
- pool_id=pool_id,
- name=pool_name,
- range_start=pool_start,
- range_end=pool_end,
- )
+ if pool_id not in used_pool_ids:
+ yield Pool(
+ subnet_id=subnet_id,
+ pool_id=pool_id,
+ group_name=group_name,
+ first_ip=first_ip,
+ last_ip=last_ip,
+ )
+ used_pool_ids.add(pool_id)
+ else:
+ _logger.warning(
+ "Subnet %d from %s has multiple pools with the pool_id %d; "
+ "due to this, the pool from %s to %s with pool_id %d will be "
+ "ignored by NAV",
+ subnet_id,
+ self._url,
+ pool_id,
+ first_ip,
+ last_ip,
+ pool_id,
+ )
def _stats_of_pool(self, raw_stats: dict, pool: Pool) -> Iterator[GraphiteMetric]:
"""
Returns as graphite metric tuples the most recent stats of interest in
- raw_stats, for the given pool.
+ raw_stats, for the given Kea pool.
raw_stats is a dictionary representing the result of the Kea API command
'statistic-get-all'.
"""
-
+ path_prefix = DhcpPath.from_external_info(
+ server_name=self._server_name,
+ allocation_type="range",
+ group_name=pool.group_name,
+ first_ip=pool.first_ip,
+ last_ip=pool.last_ip,
+ )
for nav_stat_name, api_stat_name in self._api_namings:
statistic = f"subnet[{pool.subnet_id}].pool[{pool.pool_id}].{api_stat_name}"
samples = raw_stats.get(statistic, [])
if len(samples) == 0:
_logger.info(
- "No stats found when querying for '%s' in pool having range "
+ "No stats found when querying for '%s' in Kea pool having range "
"'%s-%s' and name '%s'",
api_stat_name,
- pool.range_start,
- pool.range_end,
- pool.name,
+ pool.first_ip,
+ pool.last_ip,
+ pool.group_name,
)
else:
# The reference API client assumes samples[0] is the most recent sample
# See https://gitlab.isc.org/isc-projects/stork/-/blob/4193375c01e3ec0b3d862166e2329d76e686d16d/backend/server/apps/kea/rps.go#L223-227
value, _timestring = samples[0]
- path = metric_path_for_dhcp_pool(
- self._dhcp_version,
- self._name,
- pool.name,
- pool.range_start,
- pool.range_end,
- nav_stat_name,
- )
+ path = path_prefix.to_graphite_path(nav_stat_name)
yield (path, (self._start_time, value))
def _bounds_of_pool_range(self, pool_range: str) -> tuple[IP, IP]:
@@ -472,29 +491,28 @@ def _bounds_of_pool_range(self, pool_range: str) -> tuple[IP, IP]:
range_end = ip[-1]
return range_start, range_end
- def _name_of_pool(self, pool: dict, fallback: str) -> str:
+ def _group_name_of_pool(self, pool: dict) -> Optional[str]:
"""
- Looks for a pool name in a pool of a Kea DHCP configuration.
- Returns pool name if found, else returns a fallback name.
+ Looks for a group name in a pool of a Kea DHCP configuration. Returns
+ group name if found, else returns None.
"""
- pool_name_key = self._user_context_poolname_key
- pool_name = pool.get("user-context", {}).get(pool_name_key, None)
- if not isinstance(pool_name, str):
+ group_name_key = self._user_context_groupname_key
+ group_name = pool.get("user-context", {}).get(group_name_key, None)
+ if not isinstance(group_name, str):
_logger.debug(
- '%s did not find a pool name when looking up "%s" in "user-context" '
- 'for a pool, defaulting to name "%s"... ',
+ '%s did not find a group name when looking up "%s" in "user-context" '
+ 'for a Kea pool',
self,
- pool_name_key,
- fallback,
+ group_name_key,
)
- return fallback
- return pool_name
+ return None
+ return group_name
def _log_consistency_with_upstream_pools(self, local_pools: list):
"""
- The part of the Kea API that deal with pools identify each pool by
+ The part of the Kea API that deal with pools identify each Kea pool by
ID. This function logs a warning if the mapping between pool ID and pool
- object differs between the pools stored in the client (local_pools) and
+ object differs between the pools known to the client (local_pools) and
the pools known to the Kea API right now.
"""
upstream_config = self._fetch_kea_config()
@@ -507,9 +525,10 @@ def _log_consistency_with_upstream_pools(self, local_pools: list):
if sorted(local_pools) != sorted(upstream_pools):
_logger.warning(
- "The DHCP server's address pool configuration was modified while stats "
+ "The Kea DHCPv4 server's pool configuration was modified while stats "
"were being fetched. This may cause stats collected during this run to "
- "be associated with the wrong address pool."
+ "be associated with the wrong Kea pool (and subsequently wrong range in "
+ "NAV)."
)
def _log_runtime(
@@ -517,10 +536,10 @@ def _log_runtime(
):
"""
Logs a debug message about the time spent during a 'self.fetch_stats()'
- run and the amount of pools and stats seen.
+ run and the amount of Kea pools and stats seen.
"""
_logger.debug(
- "Fetched %d stats from %d pool(s) in %.2f seconds from %s",
+ "Fetched %d stats from %d Kea pool(s) in %.2f seconds from %s",
n_stats,
n_pools,
end_time - start_time,
diff --git a/python/nav/metrics/graphs.py b/python/nav/metrics/graphs.py
index 48267389f9..f6a67a8701 100644
--- a/python/nav/metrics/graphs.py
+++ b/python/nav/metrics/graphs.py
@@ -348,3 +348,60 @@ def _convert_char(char):
pat = "".join(_convert_char(c) for c in series)
return re.compile(pat)
+
+
+def aliased_series(series_list: str, name: str, **meta: str) -> str:
+ """
+ Add a name to the supplied series_list.
+ https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.alias
+
+ Supported keyword arguments:
+ :param color: the css color of the graph
+ :param renderer: Rickshaw graph renderer to use; either "area" or "line"
+ """
+ tmpl = "alias({series_list}, '{name}')"
+ if len(meta) > 0:
+ # note how key-value pairs from the meta-dict is added in a quite
+ # special way to the resulting name; NAV's custom JavaScript Rickshaw
+ # graph constructor reconstructs the meta-dict and strips it away from
+ # the resulting name before it is displayed. It then uses the meta-dict
+ # to customize the graph, such as adding a specific color attribute
+ name = ";;".join(f"{key}={val}" for key, val in meta.items()) + ";;" + name
+ return tmpl.format(series_list=series_list, name=name)
+
+
+def summed_series(*series_list: str) -> str:
+ """
+ Sum each series in all supplied series_lists and return as a single series_list.
+ https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.sumSeries
+ """
+ tmpl = "sumSeries({series_lists})"
+ return tmpl.format(series_lists=",".join(series_list))
+
+
+def diffed_series(*series_list: str) -> str:
+ """
+ Subtract from the first series in the first seriesList the rest of the
+ supplied series from the first and following seriesLists.
+ https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.diffSeries
+ """
+ tmpl = "diffSeries({series_lists})"
+ return tmpl.format(series_lists=",".join(series_list))
+
+
+def nonempty_series(series_list: str, x_files_factor: float = 0.0) -> str:
+ """
+ Out of all metrics in series_list, draw only the metrics with not
+ empty data.
+ https://graphite.readthedocs.io/en/latest/functions.html#graphite.render.functions.removeEmptySeries
+ """
+ tmpl = "removeEmptySeries({series_list}, {x_files_factor})"
+ return tmpl.format(series_list=series_list, x_files_factor=x_files_factor)
+
+
+def json_graph_url(*series_list: str, title: str) -> str:
+ """
+ Create a url for fetching the JSON data necessary to graph the supplied
+ series_lists.
+ """
+ return get_simple_graph_url(series_list, format="json", title=title)
diff --git a/python/nav/metrics/names.py b/python/nav/metrics/names.py
index c9039742aa..46af066539 100644
--- a/python/nav/metrics/names.py
+++ b/python/nav/metrics/names.py
@@ -28,6 +28,17 @@
LEGAL_METRIC_CHARACTERS = string.ascii_letters + string.digits + "-_"
+class safe_name(str):
+ """
+ Marks a string as not needing to be escaped by :func escape_metric_name:
+ """
+
+ def __str__(self):
+ # This assures that safe_name strings aren't weakened to normal
+ # strings on calls to str()
+ return self
+
+
def escape_metric_name(name):
"""
Escapes any character of `name` that may not be used in graphite metric
@@ -35,6 +46,8 @@ def escape_metric_name(name):
"""
if name is None:
return name
+ if isinstance(name, safe_name):
+ return name
name = name.replace('\x00', '') # some devices have crazy responses!
name = ''.join([c if c in LEGAL_METRIC_CHARACTERS else "_" for c in name])
return name
@@ -64,6 +77,23 @@ def get_all_leaves_below(top, ignored=None):
return list(itertools.chain(*paths))
+def get_expanded_nodes(path):
+ """
+ Expands any wildcard in path and returns a list of all matching paths.
+
+ :param path: A search string, e.g. "nav.{a,b}.*.*"
+ :returns: A list of expanded metric paths,
+ e.g. ["nav.a.1.1", "nav.a.2.1", "nav.b.1.1", "nav.b.1.2"]
+ """
+ data = raw_metric_query(path, operation="expand")
+ if not isinstance(data, dict):
+ return []
+ result = data.get("results", [])
+ if not isinstance(result, list):
+ return []
+ return result
+
+
def get_metric_leaf_children(path):
"""Returns a list of available graphite leaf nodes just below path.
@@ -119,15 +149,18 @@ def nodewalk(top, ignored=None):
yield x
-def raw_metric_query(query):
+def raw_metric_query(query, operation="find"):
"""Runs a query for metric information against Graphite's REST API.
:param query: A search string, e.g. "nav.devices.some-gw_example_org.*"
- :returns: A list of matching metrics, each represented by a dict.
+ :param operation: One of "find" or "expand",
+ see https://graphite.readthedocs.io/en/1.1.10/metrics_api.html
+ :returns: The JSON response from Graphite, or an empty list if response
+ could not be decoded.
"""
base = CONFIG.get("graphiteweb", "base")
- url = urljoin(base, "/metrics/find")
+ url = urljoin(base, "/metrics/" + operation)
query = urlencode({'query': query})
url = "%s?%s" % (url, query)
diff --git a/python/nav/metrics/templates.py b/python/nav/metrics/templates.py
index 7444e1c44f..fc72c29550 100644
--- a/python/nav/metrics/templates.py
+++ b/python/nav/metrics/templates.py
@@ -18,9 +18,7 @@
Metric naming templates for various things that NAV sends/retrieves from
Graphite.
"""
-
from nav.metrics.names import escape_metric_name
-import IPy
def metric_prefix_for_ipdevpoll_job(sysname, job_name):
@@ -186,20 +184,24 @@ def metric_path_for_multicast_usage(group, sysname):
)
-def metric_path_for_dhcp_pool(
- ip_version, server_name, pool_name, range_start, range_end, metric_name
+def metric_path_for_dhcp(
+ ip_version,
+ server_name,
+ allocation_type,
+ group_name_source,
+ group_name,
+ first_ip,
+ last_ip,
+ metric_name,
):
- tmpl = (
- "nav.dhcp.{ip_version}.pool.{server_name}.{pool_name}."
- "{range_start}.{range_end}.{metric_name}"
- )
- range_start = IPy.IP(range_start).strNormal()
- range_end = IPy.IP(range_end).strNormal()
+ tmpl = "nav.dhcp.{ip_version}.{server_name}.{allocation_type}.{group_name_source}.{group_name}.{first_ip}.{last_ip}.{metric_name}"
return tmpl.format(
ip_version=escape_metric_name(str(ip_version)),
server_name=escape_metric_name(server_name),
- pool_name=escape_metric_name(pool_name),
- range_start=escape_metric_name(range_start),
- range_end=escape_metric_name(range_end),
+ allocation_type=escape_metric_name(allocation_type),
+ group_name_source=escape_metric_name(group_name_source),
+ group_name=escape_metric_name(group_name),
+ first_ip=escape_metric_name(str(first_ip)),
+ last_ip=escape_metric_name(str(last_ip)),
metric_name=escape_metric_name(metric_name),
)
diff --git a/python/nav/models/manage.py b/python/nav/models/manage.py
index 0beec174e7..f2e3b400ac 100644
--- a/python/nav/models/manage.py
+++ b/python/nav/models/manage.py
@@ -41,7 +41,13 @@
from nav import util
from nav.bitvector import BitVector
from nav.metrics.data import get_netboxes_availability
-from nav.metrics.graphs import get_simple_graph_url, Graph
+from nav.metrics.graphs import (
+ aliased_series,
+ get_simple_graph_url,
+ Graph,
+ json_graph_url,
+ summed_series,
+)
from nav.metrics.names import get_all_leaves_below
from nav.metrics.templates import (
metric_prefix_for_interface,
@@ -57,6 +63,7 @@
from nav.models.fields import CIDRField
import nav.models.event
from nav.oids import get_enterprise_id
+import nav.dhcpstats.common
_logger = logging.getLogger(__name__)
@@ -1458,17 +1465,27 @@ def get_router_ports(self):
def get_graph_url(self):
"""Creates the graph url used for graphing this prefix"""
path = partial(metric_path_for_prefix, self.net_address)
- ip_count = 'alias({0}, "IP addresses ")'.format(path('ip_count'))
- ip_range = 'alias({0}, "Max addresses")'.format(path('ip_range'))
- mac_count = 'alias({0}, "MAC addresses")'.format(path('mac_count'))
+ ip_count = aliased_series(path('ip_count'), name="IP addresses")
+ ip_range = aliased_series(path('ip_range'), name="Max addresses")
+ mac_count = aliased_series(path('mac_count'), name="MAC addresses")
metrics = [ip_count, mac_count]
if IPy.IP(self.net_address).version() == 4:
metrics.append(ip_range)
- return get_simple_graph_url(metrics, title=str(self), format='json')
+ return json_graph_url(*metrics, title=str(self))
def get_absolute_url(self):
return reverse('prefix-details', args=[self.pk])
+ def get_dhcp_graph_urls(self):
+ """
+ Creates urls to graphs showing range/pool/subnet utilization, with one
+ url (and one graph) per set of ranges/pools/subnets in graphite with the
+ same ip_version, server_name and group where at least one
+ range/pool/subnet intersects this prefix.
+ """
+ prefix = IPy.IP(self.net_address)
+ return nav.dhcpstats.common.fetch_graph_urls_for_prefixes([prefix])
+
class Vlan(models.Model):
"""From NAV Wiki: The vlan table defines the IP broadcast domain / vlan. A
@@ -1542,31 +1559,40 @@ def get_graph_url(self, family=4):
# Put metainformation in the alias so that Rickshaw can pick it up and
# know how to draw the series.
series = [
- "alias({}, 'renderer=area;;{}')".format(
+ aliased_series(
metric_path_for_prefix(prefix.net_address, 'ip_count'),
- prefix.net_address,
+ name=prefix.net_address,
+ renderer="area",
)
for prefix in prefixes
]
- if series:
- if family == 4:
- series.append(
- "alias(sumSeries(%s), 'Max addresses')"
- % ",".join(
- [
- metric_path_for_prefix(prefix.net_address, 'ip_range')
- for prefix in prefixes
- ]
- )
- )
- return get_simple_graph_url(
- series,
- title="Total IPv{} addresses on vlan {} - stacked".format(
- family, str(self)
- ),
- format='json',
+
+ if not series:
+ return
+
+ if family == 4:
+ ip_ranges = [
+ metric_path_for_prefix(prefix.net_address, 'ip_range')
+ for prefix in prefixes
+ ]
+ series.append(
+ aliased_series(summed_series(*ip_ranges), name="Max addresses"),
)
+ title = f"Total IPv{family} addresses on vlan {self} - stacked"
+
+ return json_graph_url(*series, title=title)
+
+ def get_dhcp_graph_urls(self):
+ """
+ Creates urls to graphs showing range/pool/subnet utilization, with one
+ url (and one graph) per set of ranges/pools/subnets in graphite with the
+ same ip_version, server_name and group where at least one
+ range/pool/subnet intersects this vlan.
+ """
+ prefixes = [IPy.IP(prefix.net_address) for prefix in self.prefixes.all()]
+ return nav.dhcpstats.common.fetch_graph_urls_for_prefixes(prefixes)
+
class NetType(models.Model):
"""From NAV Wiki: The nettype table defines network type;lan, core, link,
diff --git a/python/nav/web/info/prefix/views.py b/python/nav/web/info/prefix/views.py
index 359a9d5234..669b79c8f1 100644
--- a/python/nav/web/info/prefix/views.py
+++ b/python/nav/web/info/prefix/views.py
@@ -26,6 +26,7 @@
from nav.web import utils
from nav.web.auth.utils import get_account
+from nav.metrics.errors import GraphiteUnreachableError
from nav.models.manage import Prefix, Usage, PrefixUsage
from ..forms import SearchForm
@@ -140,6 +141,16 @@ def prefix_details(request, prefix_id):
context['form'] = PrefixUsageForm(instance=prefix)
context['can_edit'] = authorize_user(request)
+ try:
+ dhcp_graph_urls = prefix.get_dhcp_graph_urls()
+ graphite_error = False
+ except GraphiteUnreachableError:
+ dhcp_graph_urls = []
+ graphite_error = True
+
+ context['dhcp_graph_urls'] = dhcp_graph_urls
+ context['graphite_error'] = graphite_error
+
return render(request, 'info/prefix/details.html', context)
diff --git a/python/nav/web/info/vlan/views.py b/python/nav/web/info/vlan/views.py
index baf00a11b2..237db5a7dc 100644
--- a/python/nav/web/info/vlan/views.py
+++ b/python/nav/web/info/vlan/views.py
@@ -28,6 +28,7 @@
from nav.models.manage import Prefix, Vlan
from nav.web.utils import create_title
+from nav.metrics.errors import GraphiteUnreachableError
from nav.metrics.graphs import get_simple_graph_url
from nav.metrics.names import join_series
from nav.metrics.templates import metric_path_for_prefix
@@ -115,6 +116,13 @@ def vlan_details(request, vlanid):
navpath = get_path([(str(vlan), '')])
+ try:
+ dhcp_graph_urls = vlan.get_dhcp_graph_urls()
+ graphite_error = False
+ except GraphiteUnreachableError:
+ dhcp_graph_urls = []
+ graphite_error = True
+
return render(
request,
'info/vlan/vlandetails.html',
@@ -126,6 +134,8 @@ def vlan_details(request, vlanid):
'has_v4': has_v4,
'has_v6': has_v6,
'title': create_title(navpath),
+ 'dhcp_graph_urls': dhcp_graph_urls,
+ 'graphite_error': graphite_error,
},
)
diff --git a/python/nav/web/static/js/src/plugins/rickshaw_graph.js b/python/nav/web/static/js/src/plugins/rickshaw_graph.js
index 2964b118ef..bd27e41299 100644
--- a/python/nav/web/static/js/src/plugins/rickshaw_graph.js
+++ b/python/nav/web/static/js/src/plugins/rickshaw_graph.js
@@ -69,6 +69,9 @@ define([
serie.key = name;
serie.name = RickshawUtils.filterFunctionCalls(name);
serie.renderer = meta.renderer ? meta.renderer : 'line';
+ if (meta.color !== undefined) {
+ serie.color = meta.color;
+ }
// If this is a nav-metric, typically very long, display only the last two "parts"
if (serie.name.substr(0, 4) === 'nav.') {
diff --git a/python/nav/web/templates/info/prefix/details.html b/python/nav/web/templates/info/prefix/details.html
index 98b1a864f8..0043789ace 100644
--- a/python/nav/web/templates/info/prefix/details.html
+++ b/python/nav/web/templates/info/prefix/details.html
@@ -16,6 +16,9 @@
$('.prefixgraph').each(function (index, element) {
new GraphFetcher($(element), element.dataset.url);
});
+ $('.dhcpgraph').each(function (index, element) {
+ new GraphFetcher($(element), element.dataset.url);
+ });
});
@@ -202,6 +205,21 @@
Prefix details for {{ prefix.net_address }}
+ {% if dhcp_graph_urls or graphite_error %}
+ {% if graphite_error %}
+ Graphite unreachable
+ {% else %}
+
+ {% for graph in dhcp_graph_urls %}
+
+ {% endfor %}
+
+ {% endif %}
+ {% endif %}
+
+
{% endblock %}
diff --git a/python/nav/web/templates/info/vlan/vlandetails.html b/python/nav/web/templates/info/vlan/vlandetails.html
index 912bc88d9b..8382a0b47d 100644
--- a/python/nav/web/templates/info/vlan/vlandetails.html
+++ b/python/nav/web/templates/info/vlan/vlandetails.html
@@ -16,6 +16,9 @@
$('.prefixgraph').each(function (index, element) {
new GraphFetcher($(element), element.dataset.url);
});
+ $('.dhcpgraph').each(function (index, element) {
+ new GraphFetcher($(element), element.dataset.url);
+ });
});
{% endblock %}
@@ -154,4 +157,20 @@ Prefix graphs
{% endfor %}
+ {% comment %} Graph containing DHCP statistics for total, assigned and declined addresses {% endcomment %}
+
+ {% if dhcp_graph_urls or graphite_error %}
+ DHCP graphs
+ {% if graphite_error %}
+ Graphite unreachable
+ {% else %}
+
+ {% for graph in dhcp_graph_urls %}
+ -
+
+
+ {% endfor %}
+
+ {% endif %}
+ {% endif %}
{% endblock %}
diff --git a/tests/unittests/dhcpstats/common_test.py b/tests/unittests/dhcpstats/common_test.py
new file mode 100644
index 0000000000..de30700200
--- /dev/null
+++ b/tests/unittests/dhcpstats/common_test.py
@@ -0,0 +1,312 @@
+from dataclasses import dataclass, replace
+from itertools import chain
+
+import IPy
+import pytest
+
+from nav.dhcpstats.common import DhcpPath, drop_groups_not_in_prefixes, group_paths
+from nav.metrics.templates import metric_path_for_dhcp
+
+@dataclass
+class Path:
+ native: DhcpPath
+ graphite: str
+
+@pytest.fixture(params=[
+ Path(
+ DhcpPath(
+ ip_version=4,
+ server_name="server1",
+ allocation_type="range",
+ group_name="group1",
+ group_name_source="custom_groups",
+ first_ip=IPy.IP("192.0.0.1"),
+ last_ip=IPy.IP("192.0.0.255"),
+ ),
+ "nav.dhcp.4.server1.range.custom_groups.group1.192_0_0_1.192_0_0_255",
+ ),
+ Path(
+ DhcpPath(
+ ip_version=6,
+ server_name="server1",
+ allocation_type="range",
+ group_name="group1",
+ group_name_source="custom_groups",
+ first_ip=IPy.IP("::1"),
+ last_ip=IPy.IP("::ffff"),
+ ),
+ "nav.dhcp.6.server1.range.custom_groups.group1.0_0_0_0_0_0_0_1.0_0_0_0_0_0_0_ffff",
+ ),
+ Path(
+ DhcpPath(
+ ip_version=6,
+ server_name="server1",
+ allocation_type="range",
+ group_name="standalone",
+ group_name_source="special_groups",
+ first_ip=IPy.IP("::1"),
+ last_ip=IPy.IP("::ffff"),
+ ),
+ "nav.dhcp.6.server1.range.special_groups.standalone.0_0_0_0_0_0_0_1.0_0_0_0_0_0_0_ffff",
+ )
+])
+def path(request):
+ return request.param
+
+class TestDhcpPath:
+ def test_to_graphite_path_should_return_expected_value(self, path):
+ assert path.native.to_graphite_path(metric_name="foo") == path.graphite + ".foo"
+
+ def test_to_graphite_path_should_be_reversed_by_from_graphite_path(self, path):
+ assert DhcpPath.from_graphite_path(path.native.to_graphite_path(metric_name="foo")) == path.native
+
+ def test_from_graphite_path_should_be_reversed_by_to_graphite_path(self, path):
+ assert DhcpPath.from_graphite_path(path.graphite).to_graphite_path(metric_name="foo") == path.graphite + ".foo"
+
+ @pytest.mark.parametrize(
+ "graphite_path",
+ [
+ "nav.dhcp.6.server1.range.custom_groups.group1.0_0_0_1.0_0_0_255",
+ "nav.dhcp.4.server1.range.custom_groups.group1.0_0_0_0_0_0_0_1.0_0_0_0_0_0_0_ffff",
+ "nav.dhcp.6.server1.range.custom_groups.group1.0_0_0_1.0_0_0_0_0_0_0_ffff",
+ "nav.dhcp.6.server1.range.custom_groups.group1.0_0_0_0_0_0_0_1.0_0_0_255",
+ ],
+ )
+ def test_from_graphite_path_should_fail_on_ip_version_mismatch(self, graphite_path):
+ with pytest.raises(ValueError):
+ DhcpPath.from_graphite_path(graphite_path)
+
+ @pytest.mark.parametrize(
+ "graphite_path,reason",
+ [
+ ("nav.dhcp.6.server1.range.custom_groups.group1.0_0_0_0_0_0_0_1", "missing segment"),
+ ("nav.dhcp.6.server1.range.invalid.group1.0_0_0_0_0_0_0_1.0_0_0_0_0_0_0_ffff", "invalid segment"),
+ ("nav.dhcp.6.server1.invalid.custom_groups.group1.0_0_0_0_0_0_0_1.0_0_0_0_0_0_0_ffff", "invalid segment"),
+ ("nav.dhcp.invalid.server1.range.custom_groups.group1.0_0_0_0_0_0_0_1.0_0_0_0_0_0_0_ffff", "invalid segment"),
+ ],
+ )
+ def test_from_graphite_path_should_fail_on_invalid_names(self, graphite_path, reason):
+ with pytest.raises(ValueError):
+ DhcpPath.from_graphite_path(graphite_path)
+ pytest.fail(f"Didn't fail when {reason!r}")
+
+ def test_from_graphite_path_should_work_with_metric_path_for_dhcp(self):
+ path_kwargs = {
+ "ip_version": 4,
+ "server_name": "server1",
+ "allocation_type": "range",
+ "group_name_source": "custom_groups",
+ "group_name": "group1",
+ "first_ip": IPy.IP("192.0.0.1"),
+ "last_ip": IPy.IP("192.0.0.255"),
+ }
+ assert DhcpPath.from_graphite_path(metric_path_for_dhcp(metric_name="foo", **path_kwargs)) == DhcpPath(**path_kwargs)
+ assert DhcpPath.from_graphite_path(metric_path_for_dhcp(metric_name="foo", **path_kwargs)).to_graphite_path(metric_name="foo") == metric_path_for_dhcp(metric_name="foo", **path_kwargs)
+
+ @pytest.mark.parametrize(
+ "server_name,allocation_type,group_name,first_ip,last_ip,expected",
+ [
+ ("server1", "range", "group1", "192.0.0.1", "192.0.0.255", DhcpPath(ip_version=4, server_name="server1", allocation_type="range", group_name="group1", group_name_source="custom_groups", first_ip=IPy.IP("192.0.0.1"), last_ip=IPy.IP("192.0.0.255"))),
+ ("server1", "subnet", None, "0.0.0.0", "255.255.255.255", DhcpPath(ip_version=4, server_name="server1", allocation_type="subnet", group_name="standalone", group_name_source="special_groups", first_ip=IPy.IP("0.0.0.0"), last_ip=IPy.IP("255.255.255.255"))),
+ ("server2", "pool", "group2", "::1", "::ffff", DhcpPath(ip_version=6, server_name="server2", allocation_type="pool", group_name="group2", group_name_source="custom_groups", first_ip=IPy.IP("::1"), last_ip=IPy.IP("::ffff"))),
+ ]
+ )
+ def test_from_external_info_should_work_when_given_valid_arguments(self, server_name, allocation_type, group_name, first_ip, last_ip, expected):
+ assert DhcpPath.from_external_info(server_name, allocation_type, group_name, first_ip, last_ip) == expected
+
+ @pytest.mark.parametrize(
+ "server_name,allocation_type,group_name,first_ip,last_ip,reason",
+ [
+ ("server1", "range", "group1", "::1", "0.0.0.255", "first_ip being IPv6 and last_ip being IPv4"),
+ ("server1", "range", "group1", "0.0.0.1", "::ffff", "first_ip being IPv4 and last_ip being IPv6"),
+ ("server1", "range", "192.0.0.1", "192.0.0.255", "group1", "last_ip being an invalid IP address"),
+ ("server1", "range", "group1", "192.0.0.255", "192.0.0.1", "first_ip being a greater IP address than last_ip"),
+ ("server2", "pool", "group2", "::1:", "::ffff", "first_ip being an invalid IP address"),
+ ("server2", "pool", "group2", "::1", "::fffff", "last_ip being an invalid IP address"),
+ ("server2", "pool", "group2", "::1", "::ffff:", "last_ip being an invalid IP address"),
+ ]
+ )
+ def test_from_external_info_should_fail_when_given_invalid_arguments(self, server_name, allocation_type, group_name, first_ip, last_ip, reason):
+ # We skip checking for errors that should be caught by a static type checker such
+ # as invalid argument types
+ with pytest.raises(ValueError):
+ DhcpPath.from_external_info(server_name, allocation_type, group_name, first_ip, last_ip)
+ pytest.fail(f"Didn't fail when {reason!r}")
+
+
+def test_group_paths_should_group_special_standalone_groups_paths_individually():
+ path0 = DhcpPath(
+ ip_version=6,
+ server_name="server1",
+ allocation_type="range",
+ group_name="standalone",
+ group_name_source="special_groups",
+ first_ip=IPy.IP("::0:1"),
+ last_ip=IPy.IP("::0:ffff"),
+ )
+ path1 = replace(path0, first_ip=IPy.IP("::1:1"), last_ip=IPy.IP("::1:ffff"))
+ path2 = replace(path0, first_ip=IPy.IP("::2:1"), last_ip=IPy.IP("::2:ffff"))
+ assert sorted(group_paths([path0, path1, path2])) == sorted([[path0], [path1], [path2]])
+
+
+def test_group_paths_should_group_special_groups_paths_separately_from_custom_groups_paths():
+ path0 = DhcpPath(
+ ip_version=6,
+ server_name="server1",
+ allocation_type="range",
+ group_name="standalone",
+ group_name_source="special_groups",
+ first_ip=IPy.IP("::0:1"),
+ last_ip=IPy.IP("::0:ffff"),
+ )
+ path1 = replace(path0, first_ip=IPy.IP("::1:1"), last_ip=IPy.IP("::1:ffff"))
+ path2 = replace(path0, first_ip=IPy.IP("::2:1"), last_ip=IPy.IP("::2:ffff"), group_name_source="custom_groups")
+ path3 = replace(path0, first_ip=IPy.IP("::3:1"), last_ip=IPy.IP("::3:ffff"), group_name_source="custom_groups")
+ assert sorted(group_paths([path0, path1, path2, path3])) == sorted([[path0], [path1], [path2, path3]])
+
+
+def test_group_paths_should_group_custom_groups_paths_together_when_only_ip_addresses_differ():
+ path0 = DhcpPath(
+ ip_version=6,
+ server_name="server1",
+ allocation_type="range",
+ group_name="group1",
+ group_name_source="custom_groups",
+ first_ip=IPy.IP("::0:1"),
+ last_ip=IPy.IP("::0:ffff"),
+ )
+ path1 = replace(path0, first_ip=IPy.IP("::1:1"), last_ip=IPy.IP("::1:ffff"))
+ path2 = replace(path0, first_ip=IPy.IP("::2:1"), last_ip=IPy.IP("::2:ffff"), group_name="different")
+ path3 = replace(path0, first_ip=IPy.IP("::3:1"), last_ip=IPy.IP("::3:ffff"), allocation_type="pool")
+ path4 = replace(path0, first_ip=IPy.IP("::4:1"), last_ip=IPy.IP("::4:ffff"), server_name="different")
+ path5 = replace(path0, first_ip=IPy.IP("0.0.5.1"), last_ip=IPy.IP("0.0.5.255"), ip_version=4)
+ assert sorted(group_paths([path0, path1, path2, path3, path4, path5])) == sorted([[path0, path1], [path2], [path3], [path4], [path5]])
+
+
+def test_group_paths_should_group_custom_groups_paths_separately_when_other_than_ip_addresses_differ():
+ path0 = DhcpPath(
+ ip_version=6,
+ server_name="server1",
+ allocation_type="range",
+ group_name="group1",
+ group_name_source="custom_groups",
+ first_ip=IPy.IP("::1"),
+ last_ip=IPy.IP("::ff"),
+ )
+ path1 = replace(path0, group_name="different")
+ path2 = replace(path0, server_name="different")
+ path3 = replace(path0, allocation_type="subnet")
+ path4 = replace(path0, first_ip=IPy.IP("0.0.0.1"), last_ip=IPy.IP("0.0.0.255"), ip_version=4)
+ assert sorted(group_paths([path0, path1, path2, path3, path4])) == sorted([[path0], [path1], [path2], [path3], [path4]])
+
+@pytest.mark.parametrize(
+ "prefixes,test_input,expected_output",
+ [
+ (
+ [IPy.IP("0.0.0.0/0")],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_100_0.1_1_101_0.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group3.1_1_100_0.1_1_101_0.foo", # Inside
+ "nav.dhcp.4.server2.range.custom_groups.group1.1_1_100_0.1_1_101_0.foo", # Inside
+ "nav.dhcp.4.server2.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Inside
+ "nav.dhcp.4.server2.range.custom_groups.group3.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Inside
+ "nav.dhcp.4.server4.range.custom_groups.group1.1_1_253_1.1_1_255_1.foo", # Inside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Inside
+ ],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_100_0.1_1_101_0.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group3.1_1_100_0.1_1_101_0.foo", # Inside
+ "nav.dhcp.4.server2.range.custom_groups.group1.1_1_100_0.1_1_101_0.foo", # Inside
+ "nav.dhcp.4.server2.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Inside
+ "nav.dhcp.4.server2.range.custom_groups.group3.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Inside
+ "nav.dhcp.4.server4.range.custom_groups.group1.1_1_253_1.1_1_255_1.foo", # Inside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Inside
+ ],
+ ),
+ (
+ [IPy.IP("1.1.252.0/24"), IPy.IP("1.1.253.0/24")],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Another path in the group is inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_100_0.1_1_101_0.foo", # Another path in the group is inside
+ "nav.dhcp.4.server1.range.custom_groups.group3.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group1.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group3.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Partially Inside
+ "nav.dhcp.4.server4.range.custom_groups.group1.1_1_253_1.1_1_255_1.foo", # Partially Inside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Partially Inside
+ ],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Another path in the group is inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_100_0.1_1_101_0.foo", # Another path in the group inside
+ "nav.dhcp.4.server2.range.custom_groups.group3.1_1_253_1.1_1_253_8.foo", # Inside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Partially Inside
+ "nav.dhcp.4.server4.range.custom_groups.group1.1_1_253_1.1_1_255_1.foo", # Partially Inside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Partially Inside
+ ],
+ ),
+ (
+ [IPy.IP("1.1.252.0/24")],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_253_1.1_1_253_8.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group3.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group1.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group3.1_1_253_1.1_1_253_8.foo", # Outside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Partially Inside
+ "nav.dhcp.4.server4.range.custom_groups.group1.1_1_253_1.1_1_255_1.foo", # Outside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Partially Inside
+ ],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Inside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Inside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Partially Inside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Partially Inside
+ ],
+ ),
+ (
+ [],
+ [
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_0.1_1_252_12.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group1.1_1_252_64.1_1_252_127.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_253_1.1_1_253_8.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group2.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server1.range.custom_groups.group3.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group1.1_1_100_0.1_1_101_0.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group2.1_1_254_0.1_1_254_15.foo", # Outside
+ "nav.dhcp.4.server2.range.custom_groups.group3.1_1_253_1.1_1_253_8.foo", # Outside
+ "nav.dhcp.4.server3.range.custom_groups.group1.1_1_250_1.1_1_255_1.foo", # Outside
+ "nav.dhcp.4.server4.range.custom_groups.group1.1_1_253_1.1_1_255_1.foo", # Outside
+ "nav.dhcp.4.server5.range.custom_groups.group1.1_1_250_1.1_1_253_1.foo", # Outside
+ ],
+ [],
+ )
+ ]
+)
+def test_drop_groups_not_in_prefixes_should_work_as_expected(prefixes, test_input, expected_output):
+ native_paths = [DhcpPath.from_graphite_path(path) for path in test_input]
+ grouped_paths = group_paths(native_paths)
+ remaining_grouped_paths = drop_groups_not_in_prefixes(grouped_paths, prefixes)
+ remaining_graphite_paths = [path.to_graphite_path("foo") for path in chain.from_iterable(remaining_grouped_paths)]
+ assert sorted(remaining_graphite_paths) == sorted(expected_output)
diff --git a/tests/unittests/dhcpstats/kea_dhcp_test.py b/tests/unittests/dhcpstats/kea_dhcp_test.py
index 480ba6eff6..7e70974494 100644
--- a/tests/unittests/dhcpstats/kea_dhcp_test.py
+++ b/tests/unittests/dhcpstats/kea_dhcp_test.py
@@ -16,7 +16,7 @@
from nav.errors import ConfigurationError
-ENDPOINT_NAME = "dhcp-server-foo"
+SERVER_NICKNAME = "dhcp-server-foo"
class TestExpectedAPIResponses:
@@ -33,7 +33,7 @@ def test_fetch_stats_should_return_correct_stats(self, valid_dhcp4, api_mock):
config, statistics, expected_stats = valid_dhcp4
api_mock.autofill("dhcp4", config=config, statistics=statistics)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
actual_stats = client.fetch_stats()
@@ -61,7 +61,7 @@ def test_fetch_stats_should_only_have_recent_timestamps(
config, statistics, expected_stats = valid_dhcp4
api_mock.autofill("dhcp4", config=config, statistics=statistics)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
actual_stats = client.fetch_stats()
assert len(actual_stats) > 0
@@ -85,7 +85,7 @@ def test_fetch_stats_should_handle_empty_config_api_response(
"config-get",
lambda kea_arguments, kea_service: make_api_response({"Dhcp4": {}}),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
assert list(client.fetch_stats()) == []
def test_fetch_stats_should_handle_empty_statistic_in_statistics_api_response(
@@ -100,7 +100,7 @@ def test_fetch_stats_should_handle_empty_statistic_in_statistics_api_response(
config, statistics, expected_stats = valid_dhcp4
statistics = {key: [] for key, value in statistics.items()}
api_mock.autofill("dhcp4", config=config, statistics=statistics)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
assert list(client.fetch_stats()) == []
def test_fetch_stats_should_handle_empty_statistics_api_response(
@@ -119,7 +119,7 @@ def test_fetch_stats_should_handle_empty_statistics_api_response(
"statistic-get-all",
lambda kea_arguments, kea_service: make_api_response({}),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
assert list(client.fetch_stats()) == []
@pytest.mark.parametrize("http_status", chain(range(400, 430), range(500, 530)))
@@ -139,7 +139,7 @@ def test_fetch_stats_should_raise_an_exception_on_http_error_response(
attrs={"status_code": http_status},
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
with pytest.raises(CommunicationError):
client.fetch_stats()
@@ -162,7 +162,7 @@ def test_fetch_stats_should_raise_an_exception_on_error_status_in_config_api_res
config, status=kea_status
),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
with pytest.raises(CommunicationError):
client.fetch_stats()
@@ -189,7 +189,7 @@ def test_fetch_stats_should_raise_an_exception_on_error_status_in_statistic_api_
statistics, status=status
),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
with pytest.raises(CommunicationError):
client.fetch_stats()
@@ -201,24 +201,21 @@ def test_fetch_stats_should_raise_an_exception_on_error_status_in_statistic_api_
if status not in (_KeaStatus.SUCCESS, _KeaStatus.UNSUPPORTED)
],
)
- def test_fetch_stats_should_raise_an_exception_on_error_status_in_config_hash_api_response( # noqa: E501
+ def test_fetch_stats_should_not_raise_an_exception_on_error_status_in_config_hash_api_response( # noqa: E501
self, valid_dhcp4, api_mock, status
):
"""
- If the server reports an API-specific error regarding serving its
- configuration's hash, other than that functionality being unsupported,
- the client should raise an error.
+ We don't depend on get-config-hash to work
"""
foohash = "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"
config, statistics, expected_stats = valid_dhcp4
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
config["Dhcp4"]["hash"] = foohash
api_mock.autofill("dhcp4", config=config, statistics=statistics)
api_mock.add(
"config-hash-get", make_api_response({"hash": foohash}, status=status)
)
- with pytest.raises(CommunicationError):
- client.fetch_stats()
+ client.fetch_stats()
def test_fetch_stats_should_check_and_warn_if_server_config_changed_during_call(
self, valid_dhcp4, api_mock, caplog
@@ -236,7 +233,7 @@ def test_fetch_stats_should_check_and_warn_if_server_config_changed_during_call(
a bad mapping from configuration to statistics.
"""
config, statistics, expected_stats = valid_dhcp4
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
api_mock.autofill("dhcp4", config=None, statistics=statistics)
api_mock.add("config-get", make_api_response(config))
updated_config = deepcopy(config)
@@ -265,7 +262,7 @@ def test_fetch_stats_should_raise_an_exception_on_unrecognizable_config_api_resp
self, valid_dhcp4, api_mock, invalid_response
):
config, statistics, expected_stats = valid_dhcp4
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
api_mock.autofill("dhcp4", config=None, statistics=statistics)
api_mock.add("config-get", invalid_response)
@@ -276,23 +273,25 @@ def test_fetch_stats_should_raise_an_exception_on_unrecognizable_statistic_api_r
self, valid_dhcp4, api_mock, invalid_response
):
config, statistics, expected_stats = valid_dhcp4
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
api_mock.autofill("dhcp4", config=config, statistics=None)
api_mock.add("statistic-get-all", invalid_response)
with pytest.raises(KeaUnexpected):
client.fetch_stats()
- def test_fetch_stats_should_raise_an_exception_on_unrecognizable_config_hash_api_response( # noqa: E501
+ def test_fetch_stats_should_not_raise_an_exception_on_unrecognizable_config_hash_api_response( # noqa: E501
self, valid_dhcp4, api_mock, invalid_response
):
+ """
+ We don't depend on get-config-hash to work
+ """
config, statistics, expected_stats = valid_dhcp4
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
config["Dhcp4"]["hash"] = "foo"
api_mock.autofill("dhcp4", config=config, statistics=statistics)
api_mock.add("config-hash-get", invalid_response)
- with pytest.raises(KeaUnexpected):
- client.fetch_stats()
+ client.fetch_stats()
class TestConfigCaching:
@@ -315,7 +314,7 @@ def test_fetch_kea_config_should_not_refetch_config_if_its_hash_is_unchanged(
lambda kea_arguments, kea_service: make_api_response({"hash": "1"}),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
client._fetch_kea_config()
client._fetch_kea_config()
@@ -335,7 +334,7 @@ def test_fetch_kea_config_should_refetch_config_if_its_hash_is_changed(
lambda kea_arguments, kea_service: make_api_response({"hash": "2"}),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
client._fetch_kea_config()
client._fetch_kea_config()
@@ -353,7 +352,7 @@ def test_fetch_kea_config_should_refetch_config_if_its_hash_is_missing(
lambda kea_arguments, kea_service: make_api_response({"hash": "1"}),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
client._fetch_kea_config()
client._fetch_kea_config()
@@ -369,7 +368,7 @@ def test_fetch_kea_config_should_refetch_config_if_config_hash_is_unsupported(
),
)
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
client._fetch_kea_config()
client._fetch_kea_config()
@@ -387,7 +386,7 @@ def test_fetch_stats_should_warn_if_using_http(self, valid_dhcp4, api_mock, capl
from the Kea API may contain sensitive data such as passwords in plaintext.
"""
config, statistics, expected_stats = valid_dhcp4
- client = Client(ENDPOINT_NAME, "http://example.org/")
+ client = Client(SERVER_NICKNAME, "http://example.org/")
api_mock.autofill("dhcp4", config=config, statistics=statistics)
with caplog.at_level(logging.WARNING):
@@ -408,7 +407,7 @@ def test_fetch_stats_should_warn_if_using_http_basic_auth_with_http(
"""
config, statistics, expected_stats = valid_dhcp4
client = Client(
- ENDPOINT_NAME,
+ SERVER_NICKNAME,
"http://example.org/",
http_basic_username="nav",
http_basic_password="nav",
@@ -431,7 +430,7 @@ def test_fetch_stats_should_error_if_using_client_certificate_with_http(
"""
config, statistics, expected_stats = valid_dhcp4
client = Client(
- ENDPOINT_NAME, "http://example.org/", client_cert_path="/bar/baz.pem"
+ SERVER_NICKNAME, "http://example.org/", client_cert_path="/bar/baz.pem"
)
api_mock.autofill("dhcp4", config=config, statistics=statistics)
@@ -449,7 +448,7 @@ def test_fetch_stats_should_use_http_basic_auth_when_this_is_configured(
"""
config, statistics, expected_stats = valid_dhcp4
client = Client(
- ENDPOINT_NAME,
+ SERVER_NICKNAME,
"http://example.org/",
http_basic_username="bar",
http_basic_password="baz",
@@ -480,7 +479,7 @@ def test_fetch_stats_should_use_client_certificates_when_this_is_configured(
"""
config, statistics, expected_stats = valid_dhcp4
client = Client(
- ENDPOINT_NAME,
+ SERVER_NICKNAME,
"https://example.org/",
client_cert_path="/bar/baz.pem",
)
@@ -530,7 +529,7 @@ def valid_dhcp4():
"pool": "42.0.3.1-42.0.3.10",
"pool-id": 1,
"user-context": {
- "name": "oslo-student",
+ "group": "oslo-student",
},
},
],
@@ -544,9 +543,9 @@ def valid_dhcp4():
"option-data": [],
"pool": "42.0.4.1-42.0.4.5",
"pool-id": 1,
- # Pool with 'user-context'
+ # This is a Kea pool with 'group' in 'user-context'
"user-context": {
- "name": "oslo-staff",
+ "group": "oslo-staff",
},
},
],
@@ -566,7 +565,7 @@ def valid_dhcp4():
"option-data": [],
"pool": "42.0.5.1-42.0.5.5",
"pool-id": 1,
- # Pool without 'user-context'
+ # This is a Kea pool without 'user-context'
},
],
"subnet": "42.0.5.0/24",
@@ -574,6 +573,28 @@ def valid_dhcp4():
],
"valid-lifetime": 4000,
},
+ {
+ "name": "shared-network-3",
+ "subnet4": [
+ {
+ "id": 6,
+ "option-data": [],
+ "pools": [
+ {
+ "option-data": [],
+ "pool": "42.0.6.1-42.0.6.5",
+ "pool-id": 1,
+ # This is a Kea pool with 'user-context' but without 'group' in 'user-context'
+ "user-context": {
+ "name": "bob",
+ },
+ },
+ ],
+ "subnet": "42.0.6.0/24",
+ },
+ ],
+ "valid-lifetime": 4000,
+ }
],
"subnet4": [
{
@@ -585,7 +606,7 @@ def valid_dhcp4():
"pool": "42.0.1.1-42.0.1.10",
"pool-id": 1,
"user-context": {
- "name": "bergen-staff",
+ "group": "bergen-staff",
},
},
],
@@ -601,7 +622,7 @@ def valid_dhcp4():
"pool": "42.0.2.1-42.0.2.10",
"pool-id": 1,
"user-context": {
- "name": "bergen-student",
+ "group": "bergen-student",
},
},
{
@@ -610,7 +631,7 @@ def valid_dhcp4():
"pool": "42.0.2.32/28",
"pool-id": 3,
"user-context": {
- "name": "bergen-student",
+ "group": "bergen-student",
},
},
{
@@ -618,7 +639,7 @@ def valid_dhcp4():
"pool": "42.0.2.128/25",
"pool-id": 2,
"user-context": {
- "name": "bergen-student",
+ "group": "bergen-student",
},
},
],
@@ -675,6 +696,9 @@ def valid_dhcp4():
"subnet[5].pool[1].assigned-addresses": [[0, "2025-05-30 05:49:49.468085"]],
"subnet[5].pool[1].declined-addresses": [[0, "2025-05-30 05:49:49.468087"]],
"subnet[5].pool[1].total-addresses": [[5, "2025-05-30 05:49:49.467976"]],
+ "subnet[6].pool[1].assigned-addresses": [[0, "2025-05-30 05:49:49.468085"]],
+ "subnet[6].pool[1].declined-addresses": [[0, "2025-05-30 05:49:49.468087"]],
+ "subnet[6].pool[1].total-addresses": [[5, "2025-05-30 05:49:49.467976"]],
# Some irrelevant values that won't be used by the client:
"subnet[1].cumulative-assigned-addresses": [[0, "2022-02-11 17:54:17.487528"]],
"subnet[1].declined-addresses": [[0, "2022-02-11 17:54:17.487585"]],
@@ -690,87 +714,108 @@ def valid_dhcp4():
# the api response.
expected_stats = [
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-staff.42_0_1_1.42_0_1_10.assigned",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-staff.42_0_1_1.42_0_1_10.assigned",
("2025-05-30 05:49:49.467993", 2),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-staff.42_0_1_1.42_0_1_10.declined",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-staff.42_0_1_1.42_0_1_10.declined",
("2025-05-30 05:49:49.467993", 1),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-staff.42_0_1_1.42_0_1_10.total",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-staff.42_0_1_1.42_0_1_10.total",
("2025-05-30 05:49:49.467993", 10),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_1.42_0_2_10.assigned",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_1.42_0_2_10.assigned",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_1.42_0_2_10.declined",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_1.42_0_2_10.declined",
("2025-05-30 05:49:49.467993", 1),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_1.42_0_2_10.total",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_1.42_0_2_10.total",
("2025-05-30 05:49:49.467993", 10),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_128.42_0_2_255.assigned",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_128.42_0_2_255.assigned",
("2025-05-30 05:49:49.467993", 1),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_128.42_0_2_255.declined",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_128.42_0_2_255.declined",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_128.42_0_2_255.total",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_128.42_0_2_255.total",
("2025-05-30 05:49:49.467993", 128),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_32.42_0_2_47.assigned",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_32.42_0_2_47.assigned",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_32.42_0_2_47.declined",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_32.42_0_2_47.declined",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.bergen-student.42_0_2_32.42_0_2_47.total",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.bergen-student.42_0_2_32.42_0_2_47.total",
("2025-05-30 05:49:49.467993", 16),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.oslo-student.42_0_3_1.42_0_3_10.assigned",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.oslo-student.42_0_3_1.42_0_3_10.assigned",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.oslo-student.42_0_3_1.42_0_3_10.declined",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.oslo-student.42_0_3_1.42_0_3_10.declined",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.oslo-student.42_0_3_1.42_0_3_10.total",
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.oslo-student.42_0_3_1.42_0_3_10.total",
("2025-05-30 05:49:49.467993", 10),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.oslo-staff.42_0_4_1.42_0_4_5.assigned",
+ # From Kea pool with 'group': 'oslo-staff' in 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.oslo-staff.42_0_4_1.42_0_4_5.assigned",
+ ("2025-05-30 05:49:49.467993", 0),
+ ),
+ (
+ # From Kea pool with 'group': 'oslo-staff' in 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.oslo-staff.42_0_4_1.42_0_4_5.declined",
+ ("2025-05-30 05:49:49.467993", 0),
+ ),
+ (
+ # From Kea pool with 'group': 'oslo-staff' in 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.custom_groups.oslo-staff.42_0_4_1.42_0_4_5.total",
+ ("2025-05-30 05:49:49.467993", 5),
+ ),
+ (
+ # From Kea pool without 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.special_groups.standalone.42_0_5_1.42_0_5_5.assigned",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.oslo-staff.42_0_4_1.42_0_4_5.declined",
+ # From Kea pool without 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.special_groups.standalone.42_0_5_1.42_0_5_5.declined",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.oslo-staff.42_0_4_1.42_0_4_5.total",
+ # From Kea pool without 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.special_groups.standalone.42_0_5_1.42_0_5_5.total",
("2025-05-30 05:49:49.467993", 5),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.pool-42_0_5_1-42_0_5_5.42_0_5_1.42_0_5_5.assigned",
+ # From Kea pool with 'user-context' but without 'group' in 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.special_groups.standalone.42_0_6_1.42_0_6_5.assigned",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.pool-42_0_5_1-42_0_5_5.42_0_5_1.42_0_5_5.declined",
+ # From Kea pool with 'user-context' but without 'group' in 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.special_groups.standalone.42_0_6_1.42_0_6_5.declined",
("2025-05-30 05:49:49.467993", 0),
),
(
- f"nav.dhcp.4.pool.{ENDPOINT_NAME}.pool-42_0_5_1-42_0_5_5.42_0_5_1.42_0_5_5.total",
+ # From Kea pool with 'user-context' but without 'group' in 'user-context'
+ f"nav.dhcp.4.{SERVER_NICKNAME}.range.special_groups.standalone.42_0_6_1.42_0_6_5.total",
("2025-05-30 05:49:49.467993", 5),
),
]
diff --git a/tests/unittests/metrics/names_test.py b/tests/unittests/metrics/names_test.py
index e71bc2c21b..11e83135e7 100644
--- a/tests/unittests/metrics/names_test.py
+++ b/tests/unittests/metrics/names_test.py
@@ -1,6 +1,7 @@
import pytest
from unittest import TestCase
-from nav.metrics.names import join_series, escape_metric_name
+from unittest.mock import patch
+from nav.metrics.names import join_series, escape_metric_name, get_expanded_nodes, safe_name
class MetricNamingTests(TestCase):
@@ -15,6 +16,28 @@ def test_join_single_series_should_return_same(self):
self.assertEqual(join_series([series]), series)
+class TestGetExpandedNodes:
+ def test_when_valid_response_should_return_results(self):
+ raw_response = {
+ "results": [
+ "nav.foo.1",
+ "nav.foo.2",
+ "nav.foo.3",
+ "nav.bar.baz",
+ ]
+ }
+
+ with patch("nav.metrics.names.raw_metric_query", return_value=raw_response):
+ assert get_expanded_nodes("whatever.path") == raw_response["results"]
+
+ @pytest.mark.parametrize(
+ "raw_response", [[], {}, "foo", "", {"results": "foo"}]
+ )
+ def test_when_invalid_response_should_return_empty_list(self, raw_response):
+ with patch("nav.metrics.names.raw_metric_query", return_value=raw_response):
+ assert get_expanded_nodes("whatever.path") == []
+
+
@pytest.mark.parametrize(
"test_input,expected",
[
@@ -29,5 +52,9 @@ def test_join_single_series_should_return_same(self):
('temperature, top', 'temperature__top'),
],
)
-def test_escape_metric_name(test_input, expected):
- assert escape_metric_name(test_input) == expected
+class TestEscapeMetricName:
+ def test_should_escape_by_default(self, test_input, expected):
+ assert escape_metric_name(test_input) == expected
+ def test_should_not_escape_safe_names(self, test_input, expected):
+ assert escape_metric_name(safe_name(test_input)) == str(test_input)
+ assert escape_metric_name(str(safe_name(test_input))) == str(test_input)
diff --git a/tests/unittests/metrics/templates_test.py b/tests/unittests/metrics/templates_test.py
new file mode 100644
index 0000000000..e46a7baf7f
--- /dev/null
+++ b/tests/unittests/metrics/templates_test.py
@@ -0,0 +1,90 @@
+import pytest
+
+from nav.metrics.names import safe_name
+from nav.metrics.templates import metric_path_for_dhcp
+
+@pytest.mark.parametrize(
+ "test_input,expected",
+ [
+ (
+ dict(
+ allocation_type="range",
+ ip_version=4,
+ server_name="foo",
+ group_name="bar",
+ group_name_source="custom_groups",
+ first_ip="192.0.2.0",
+ last_ip="192.0.2.9",
+ metric_name="baz",
+ ),
+ "nav.dhcp.4.foo.range.custom_groups.bar.192_0_2_0.192_0_2_9.baz",
+ ),
+ (
+ dict(
+ allocation_type="range",
+ ip_version=4,
+ server_name="foo",
+ group_name="bar",
+ group_name_source="custom_groups",
+ first_ip=safe_name("192.0.2.0"),
+ last_ip=safe_name("192.0.2.9"),
+ metric_name="baz",
+ ),
+ "nav.dhcp.4.foo.range.custom_groups.bar.192.0.2.0.192.0.2.9.baz",
+ ),
+ (
+ dict(
+ allocation_type="range",
+ ip_version=4,
+ server_name="foo",
+ group_name="bar",
+ group_name_source="custom_groups",
+ first_ip="*",
+ last_ip="*",
+ metric_name="baz",
+ ),
+ "nav.dhcp.4.foo.range.custom_groups.bar._._.baz",
+ ),
+ (
+ dict(
+ allocation_type="range",
+ ip_version=4,
+ server_name="foo",
+ group_name="bar",
+ group_name_source="custom_groups",
+ first_ip=safe_name("*"),
+ last_ip=safe_name("*"),
+ metric_name="baz",
+ ),
+ "nav.dhcp.4.foo.range.custom_groups.bar.*.*.baz",
+ ),
+ (
+ dict(
+ allocation_type="range*",
+ ip_version="4!",
+ server_name="foo?",
+ group_name="bar{",
+ group_name_source="special_groups}",
+ first_ip="*_*_0_0",
+ last_ip="*.*.0.1",
+ metric_name="baz baz!",
+ ),
+ "nav.dhcp.4_.foo_.range_.special_groups_.bar_.____0_0.____0_1.baz_baz_",
+ ),
+ (
+ dict(
+ allocation_type=safe_name("range*"),
+ ip_version=safe_name("4!"),
+ server_name=safe_name("foo?"),
+ group_name=safe_name("bar{"),
+ group_name_source=safe_name("special_groups}"),
+ first_ip=safe_name("*_*_0_0"),
+ last_ip=safe_name("*.*.0.1"),
+ metric_name=safe_name("baz baz!"),
+ ),
+ "nav.dhcp.4!.foo?.range*.special_groups}.bar{.*_*_0_0.*.*.0.1.baz baz!",
+ ),
+ ]
+)
+def test_metric_path_for_dhcp(test_input, expected):
+ assert metric_path_for_dhcp(**test_input) == expected