From bd6fd2009c2592226d69bfda34fb8c3c5d72a51e Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Wed, 13 Apr 2016 11:05:18 -0400 Subject: [PATCH 1/5] Added cert param in `make_rest_request` to pass client side certs Added cert param so that now we can pass client side certs while making request to API server, this can be passed as path to file that contains cert and key or it can be a tuple which has path ti cert and key. This is part of the effort to add support to authenticate to server via cert in addition to access_token. --- atomicapp/utils.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index e8917d18..112ebacc 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -432,7 +432,7 @@ def getUserHome(): return home @staticmethod - def make_rest_request(method, url, verify=True, data=None): + def make_rest_request(method, url, verify=True, data=None, cert=None): """ Make HTTP request to url @@ -444,6 +444,10 @@ def make_rest_request(method, url, verify=True, data=None): of trusted CAs data (dict/list): object to be serialised to json and send as http data (when method=post/put/delete) + cert (tuple/string): Path to local cert as client side certificate, + as a single file (containing the certificate + and the private key) or as a tuple of both + file's path Returns: tuple (status_code, return_data): status_code - http status code @@ -459,16 +463,17 @@ def make_rest_request(method, url, verify=True, data=None): try: if method.lower() == "get": - res = requests.get(url, verify=verify) + res = requests.get(url, verify=verify, cert=cert) elif method.lower() == "post": - res = requests.post(url, json=data, verify=verify) + res = requests.post(url, json=data, verify=verify, cert=cert) elif method.lower() == "put": - res = requests.put(url, json=data, verify=verify) + res = requests.put(url, json=data, verify=verify, cert=cert) elif method.lower() == "delete": - res = requests.delete(url, json=data, verify=verify) + res = requests.delete(url, json=data, verify=verify, cert=cert) elif method.lower() == "patch": headers = {"Content-Type": "application/json-patch+json"} - res = requests.patch(url, json=data, verify=verify, headers=headers) + res = requests.patch(url, json=data, verify=verify, + headers=headers, cert=cert) status_code = res.status_code return_data = res.json() From ba16abd259832db0b4fedda2e1ec5c4139e3a96f Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Mon, 18 Apr 2016 17:51:23 +0200 Subject: [PATCH 2/5] Improve kubeconfig parsing add support for geting certificates and keys from '-data' --- atomicapp/providers/lib/kubeconfig.py | 57 ++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/atomicapp/providers/lib/kubeconfig.py b/atomicapp/providers/lib/kubeconfig.py index 9f71792d..ca6aeb52 100644 --- a/atomicapp/providers/lib/kubeconfig.py +++ b/atomicapp/providers/lib/kubeconfig.py @@ -1,4 +1,6 @@ import anymarkup +import os +from base64 import b64decode from atomicapp.plugin import ProviderFailedException from atomicapp.constants import (PROVIDER_AUTH_KEY, @@ -8,6 +10,7 @@ PROVIDER_TLS_VERIFY_KEY, PROVIDER_CA_KEY) import logging +from atomicapp.utils import Utils logger = logging.getLogger(LOGGER_DEFAULT) @@ -68,7 +71,7 @@ def parse_kubeconf_data(kubecfg): dict of parsed values from config """ url = None - token = None + auth = None namespace = None tls_verify = True ca = None @@ -103,16 +106,52 @@ def parse_kubeconf_data(kubecfg): logger.debug("user: %s", user) url = cluster["cluster"]["server"] - token = user["user"]["token"] - if "namespace" in context["context"]: - namespace = context["context"]["namespace"] - if "insecure-skip-tls-verify" in cluster["cluster"]: - tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] - elif "certificate-authority" in cluster["cluster"]: - ca = cluster["cluster"]["certificate-authority"] + auth = user["user"].get("token") + namespace = context["context"].get("namespace") + tls_verify = not cluster["cluster"].get("insecure-skip-tls-verify") + + if tls_verify: + ca_data = cluster["cluster"].get("certificate-authority-data") + if ca_data: + ca = Utils.getTmpFile(b64decode(ca_data)) + else: + if "certificate-authority" in cluster["cluster"]: + # if we are in container translate path to path on host + ca = os.path.join(Utils.getRoot(), + cluster["cluster"].get("certificate-authority").lstrip('/')) + + if not auth: + # If token not specified, check for certificate auth. + + # client-certificate-data and client-key-data options overrides + # client-certificate and client-key + # https://github.com/kubernetes/kubernetes/blob/v1.2.2/pkg/client/unversioned/clientcmd/api/types.go#L78 + # `{client-certificate,client-key}-data` keys in Kubernetes config + # file are inline base64 encoded certificates, requests library + # requires certs in files, this is why we are putting them to tmp + # files. + + cert_data = user["user"].get("client-certificate-data") + key_data = user["user"].get("client-key-data") + + if cert_data: + cert = Utils.getTmpFile(b64decode(cert_data)) + else: + if "client-certificate" in user["user"]: + cert = os.path.join(Utils.getRoot(), + user["user"].get("client-certificate").lstrip('/')) + + if key_data: + key = Utils.getTmpFile(b64decode(key_data)) + else: + if "client-key" in user["user"]: + key = os.path.join(Utils.getRoot(), + user["user"].get("client-key").lstrip('/')) + + auth = "{}:{}".format(cert, key) return {PROVIDER_API_KEY: url, - PROVIDER_AUTH_KEY: token, + PROVIDER_AUTH_KEY: auth, NAMESPACE_KEY: namespace, PROVIDER_TLS_VERIFY_KEY: tls_verify, PROVIDER_CA_KEY: ca} From f396a57578f44dcaa09637963b8e38cb490e0eff Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Mon, 18 Apr 2016 17:52:14 +0200 Subject: [PATCH 3/5] OpenShift: Add support for authentication using X509 certs. --- atomicapp/providers/openshift.py | 63 +++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 22 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 268bae0d..2f81a8ad 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -50,11 +50,12 @@ class OpenshiftClient(object): def __init__(self, providerapi, access_token, - provider_tls_verify, provider_ca): + provider_tls_verify, provider_ca, certs): self.providerapi = providerapi self.access_token = access_token self.provider_tls_verify = provider_tls_verify self.provider_ca = provider_ca + self.certs = certs # construct full urls for api endpoints self.kubernetes_api = urljoin(self.providerapi, "api/v1/") @@ -80,7 +81,8 @@ def test_connection(self): (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + cert=self.certs) except SSLError as e: if self.provider_tls_verify: msg = "SSL/TLS ERROR: invalid certificate. " \ @@ -100,7 +102,8 @@ def get_oapi_resources(self): (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + cert=self.certs) if status_code == 200: oapi_resources = return_data["resources"] else: @@ -121,7 +124,8 @@ def get_kapi_resources(self): (status_code, return_data) = \ Utils.make_rest_request("get", self.kubernetes_api, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + cert=self.certs) if status_code == 200: kapi_resources = return_data["resources"] else: @@ -139,7 +143,8 @@ def deploy(self, url, artifact): Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), - data=artifact) + data=artifact, + cert=self.certs) if status_code == 201: logger.info("Object %s successfully deployed.", artifact['metadata']['name']) @@ -162,7 +167,8 @@ def delete(self, url): (status_code, return_data) = \ Utils.make_rest_request("delete", url, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + cert=self.certs) if status_code == 200: logger.info("Successfully deleted.") else: @@ -186,7 +192,8 @@ def scale(self, url, replicas): Utils.make_rest_request("patch", url, data=patch, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + cert=self.certs) if status_code == 200: logger.info("Successfully scaled to %s replicas", replicas) else: @@ -199,7 +206,8 @@ def process_template(self, url, template): Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), - data=template) + data=template, + cert=self.certs) if status_code == 201: logger.info("template processed %s", template['metadata']['name']) logger.debug("processed template %s", return_data) @@ -306,7 +314,10 @@ def get_pod_status(self, namespace, pod): 'namespaces/{namespace}/pods/{pod}?' 'access_token={access_token}'.format(**args)) (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self._requests_tls_verify()) + Utils.make_rest_request("get", + url, + verify=self._requests_tls_verify(), + cert=self.certs) if status_code != 200: raise ProviderFailedException( @@ -331,6 +342,8 @@ class OpenShiftProvider(Provider): provider_tls_verify = True # path to file or dir with CA certificates provider_ca = None + # client certificate and key for authentication + certs = None def init(self): # Parsed artifacts. Key is kind of artifacts. Value is list of artifacts. @@ -341,7 +354,8 @@ def init(self): self.oc = OpenshiftClient(self.providerapi, self.access_token, self.provider_tls_verify, - self.provider_ca) + self.provider_ca, + self.certs) self.openshift_api = self.oc.openshift_api self.kubernetes_api = self.oc.kubernetes_api @@ -432,7 +446,10 @@ def stop(self): "replicationcontroller", params=params) (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify()) + Utils.make_rest_request("get", + url, + verify=self.oc._requests_tls_verify(), + cert=self.certs) if status_code != 200: raise ProviderFailedException("Cannot get Replication" "Controllers for Deployment" @@ -593,10 +610,11 @@ def _get_url(self, namespace, kind, name=None, params=None): if name: url = urljoin(url, name) - if params: - params["access_token"] = self.access_token - else: - params = {"access_token": self.access_token} + if not params: + params = {} + + if self.access_token: + params["access_token"] = self.access_token url = urljoin(url, "?%s" % urlencode(params)) logger.debug("url: %s", url) @@ -677,15 +695,16 @@ def _set_config_values(self): # set config values self.providerapi = result[PROVIDER_API_KEY] - self.access_token = result[PROVIDER_AUTH_KEY] + if ":" in result[PROVIDER_AUTH_KEY]: + self.access_token = None + self.certs = tuple(result[PROVIDER_AUTH_KEY].split(":")) + else: + self.access_token = result[PROVIDER_AUTH_KEY] + self.certs = None + self.namespace = result[NAMESPACE_KEY] self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY] - if result[PROVIDER_CA_KEY]: - # if we are in container translate path to path on host - self.provider_ca = os.path.join(Utils.getRoot(), - result[PROVIDER_CA_KEY].lstrip('/')) - else: - self.provider_ca = None + self.provider_ca = result[PROVIDER_CA_KEY] def extract(self, image, src, dest, update=True): """ From cc1fe89dba31c4cfdb8cd1f5edf5b366a73c50df Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Wed, 20 Apr 2016 12:39:09 +0200 Subject: [PATCH 4/5] Add unittests for kubeconfig --- tests/units/nulecule/test_kubeconfig.py | 158 ++++++++++++++++++++++++ 1 file changed, 158 insertions(+) diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/nulecule/test_kubeconfig.py index 974b694e..1af03d7b 100644 --- a/tests/units/nulecule/test_kubeconfig.py +++ b/tests/units/nulecule/test_kubeconfig.py @@ -1,6 +1,7 @@ import unittest from atomicapp.plugin import ProviderFailedException from atomicapp.providers.lib.kubeconfig import KubeConfig +import base64 class TestKubeConfParsing(unittest.TestCase): @@ -97,6 +98,163 @@ def test_parse_kubeconf_data_cafile(self): 'provider-tlsverify': True, 'provider-cafile': '/foo/bar'}) + def test_parse_kubeconf_data_cafile_data(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and certificate-authority-data + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'certificate-authority-data': base64.b64encode("foobar"), + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + result = KubeConfig.parse_kubeconf_data(kubecfg_data) + + self.assertDictContainsSubset({'provider-api': 'server1', + 'provider-auth': 'token1', + 'namespace': 'namespace1', + 'provider-tlsverify': True}, + result) + + # verify content of ca file + ca_content = open(result['provider-cafile']).read() + self.assertEqual(ca_content, "foobar") + + def test_parse_kubeconf_data_client_cert(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and client-certificate and key + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'certificate-authority': '/foo/bar', + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + + 'client-certificate': '/foo/ca', + 'client-key': '/foo/key' + } + } + ] + } + + self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), + {'provider-api': 'server1', + 'provider-auth': '/foo/ca:/foo/key', + 'namespace': 'namespace1', + 'provider-tlsverify': True, + 'provider-cafile': '/foo/bar'}) + + def test_parse_kubeconf_data_client_cert_data(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and client-certificate-data and key + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'certificate-authority-data': base64.b64encode("foobar"), + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'client-certificate-data': base64.b64encode("cert"), + 'client-key-data': base64.b64encode("key") + } + } + ] + } + + result = KubeConfig.parse_kubeconf_data(kubecfg_data) + + self.assertDictContainsSubset({'provider-api': 'server1', + 'namespace': 'namespace1', + 'provider-tlsverify': True}, + result) + + # verify content of ca file + ca_content = open(result['provider-cafile']).read() + self.assertEqual(ca_content, "foobar") + + # verify content client ca and key + client_ca_file, client_key_file = result['provider-auth'].split(":") + client_ca_content = open(client_ca_file).read() + client_key_content = open(client_key_file).read() + self.assertEqual(client_ca_content, "cert") + self.assertEqual(client_key_content, "key") + def test_parse_kubeconf_data_no_context(self): """ Test parsing kubeconf data with missing context data for From bdc3a2bd3cc953ea1436392367a3f92a1b532b8d Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Thu, 21 Apr 2016 07:59:37 -0400 Subject: [PATCH 5/5] Remove temporary files generated to store certs and keys Temporary files will be garbage collected. Also added a function in Utils to remove files. --- atomicapp/providers/lib/kubeconfig.py | 6 ++++++ atomicapp/utils.py | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/atomicapp/providers/lib/kubeconfig.py b/atomicapp/providers/lib/kubeconfig.py index ca6aeb52..e76387e9 100644 --- a/atomicapp/providers/lib/kubeconfig.py +++ b/atomicapp/providers/lib/kubeconfig.py @@ -10,6 +10,7 @@ PROVIDER_TLS_VERIFY_KEY, PROVIDER_CA_KEY) import logging +import atexit from atomicapp.utils import Utils logger = logging.getLogger(LOGGER_DEFAULT) @@ -110,10 +111,12 @@ def parse_kubeconf_data(kubecfg): namespace = context["context"].get("namespace") tls_verify = not cluster["cluster"].get("insecure-skip-tls-verify") + temporary_files = [] if tls_verify: ca_data = cluster["cluster"].get("certificate-authority-data") if ca_data: ca = Utils.getTmpFile(b64decode(ca_data)) + temporary_files.append(ca) else: if "certificate-authority" in cluster["cluster"]: # if we are in container translate path to path on host @@ -136,6 +139,7 @@ def parse_kubeconf_data(kubecfg): if cert_data: cert = Utils.getTmpFile(b64decode(cert_data)) + temporary_files.append(cert) else: if "client-certificate" in user["user"]: cert = os.path.join(Utils.getRoot(), @@ -143,6 +147,7 @@ def parse_kubeconf_data(kubecfg): if key_data: key = Utils.getTmpFile(b64decode(key_data)) + temporary_files.append(key) else: if "client-key" in user["user"]: key = os.path.join(Utils.getRoot(), @@ -150,6 +155,7 @@ def parse_kubeconf_data(kubecfg): auth = "{}:{}".format(cert, key) + atexit.register(Utils.rm_files, *temporary_files) return {PROVIDER_API_KEY: url, PROVIDER_AUTH_KEY: auth, NAMESPACE_KEY: namespace, diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 112ebacc..ac488301 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -379,6 +379,21 @@ def rm_dir(directory): logger.debug('Recursively removing directory: %s' % directory) distutils.dir_util.remove_tree(directory) + @staticmethod + def rm_files(*files): + """ + Remove the files whose paths are provided in 'files' + + Args: + files (list/tuple): each item in iterable is a path to file + as string + + Returns: None + """ + for file in files: + if os.path.exists(file): + os.remove(file) + @staticmethod def getUserName(): """