Skip to content
This repository was archived by the owner on Oct 16, 2024. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 27 additions & 21 deletions clusterman/kubernetes/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from kubernetes.client.models.v1_node_selector_requirement import V1NodeSelectorRequirement
from kubernetes.client.models.v1_node_selector_term import V1NodeSelectorTerm
from kubernetes.client.models.v1_pod import V1Pod as KubernetesPod
from kubernetes.config.config_exception import ConfigException

from clusterman.util import ClustermanResources

Expand All @@ -53,33 +54,38 @@


class KubeApiClientWrapper:
# CLUSTERMAN-812: By making client a class variable we are avoiding re-creating kubernetes
# client object multiple times ( due to call to reload_state which calls reload_client in return)
_client = None
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

might be worth a comment here re: why this is a classvar now (just to make sure that we can figure out why we did this even if someone moves this line around or something and the git blame no longer points directly at this commit) :)


def __init__(self, kubeconfig_path: str, client_class: Type) -> None:
"""Init k8s API client

:param str kubeconfig_path: k8s configuration path
:param Type client_class: k8s client class to initialize
"""
try:
"""
https://kubernetes.io/docs/concepts/containers/container-environment/#container-environment
Every pod in k8s gets some default environment variable injected, including KUBERNETES_SERVICE_HOST
which points to default kuberbetes service. We are using this variable to distinguise between
when cluterman is started in a pod vs when it's started on host. For clusterman instances running inside
a k8s cluster, we prioritise using K8s Service account since that let us avoid creating any kubeconfig
in advance. For clusterman CLI invocation we continue using provided KUBECONFIG file
"""
if os.getenv("KUBERNETES_SERVICE_HOST"):
kubernetes.config.load_incluster_config()
else:
kubernetes.config.load_kube_config(kubeconfig_path, context=os.getenv("KUBECONTEXT"))
except TypeError:
error_msg = "Could not load KUBECONFIG; is this running on Kubernetes master?"
if "yelpcorp" in socket.getfqdn():
error_msg += "\nHint: try using the clusterman-k8s-<clustername> wrapper script!"
logger.error(error_msg)
raise

self._client = client_class()
if self._client is None:
try:
"""
https://kubernetes.io/docs/concepts/containers/container-environment/#container-environment
Every pod in k8s gets some default environment variable injected, including KUBERNETES_SERVICE_HOST
which points to default kuberbetes service. We are using this variable to distinguise between
when cluterman is started in a pod vs when it's started on host. For clusterman instances running inside
a k8s cluster, we prioritise using K8s Service account since that let us avoid creating any kubeconfig
in advance. For clusterman CLI invocation we continue using provided KUBECONFIG file
"""
if os.getenv("KUBERNETES_SERVICE_HOST"):
kubernetes.config.load_incluster_config()
else:
kubernetes.config.load_kube_config(kubeconfig_path, context=os.getenv("KUBECONTEXT"))
except (TypeError, ConfigException):
error_msg = "Could not load KUBECONFIG; is this running on Kubernetes master?"
if "yelpcorp" in socket.getfqdn():
error_msg += "\nHint: try using the clusterman-k8s-<clustername> wrapper script!"
logger.error(error_msg)
raise

KubeApiClientWrapper._client = client_class()

def __getattr__(self, attr):
return getattr(self._client, attr)
Expand Down
14 changes: 13 additions & 1 deletion tests/kubernetes/util_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from kubernetes.client.models.v1_node_selector_requirement import V1NodeSelectorRequirement
from kubernetes.client.models.v1_node_selector_term import V1NodeSelectorTerm

from clusterman.kubernetes.util import CachedCoreV1Api
from clusterman.kubernetes.util import CachedCoreV1Api, KubeApiClientWrapper
from clusterman.kubernetes.util import ConciseCRDApi
from clusterman.kubernetes.util import get_node_kernel_version
from clusterman.kubernetes.util import get_node_lsbrelease
Expand All @@ -16,6 +16,8 @@

@pytest.fixture
def mock_cached_core_v1_api():
# Make sure we always reset the client before each test
KubeApiClientWrapper._client = None
with mock.patch("clusterman.kubernetes.util.kubernetes"):
yield CachedCoreV1Api("/foo/bar/admin.conf")

Expand All @@ -41,6 +43,16 @@ def test_cached_corev1_api_use_load_kubeconfig_config_when_running_as_cli():
assert mock_load_kube_config.called


def test_client_initialization_happen_only_once():
with mock.patch.dict(os.environ, {"KUBERNETES_SERVICE_HOST": "ABC"}):
with mock.patch(
"clusterman.kubernetes.util.kubernetes.config.load_incluster_config"
) as mock_load_incluster_config:
_ = CachedCoreV1Api("/foo/bar/admin.conf")
_ = CachedCoreV1Api("/foo/bar/admin1.conf")
assert mock_load_incluster_config.call_count == 1


def test_cached_corev1_api_caches_non_cached_function(mock_cached_core_v1_api):
mock_cached_core_v1_api.list_namespace()
assert mock_cached_core_v1_api._client.list_namespace.call_count == 1
Expand Down