From 397223c0f1413be747fc218384ee4edb48169524 Mon Sep 17 00:00:00 2001 From: Tony Xu Date: Tue, 14 Jul 2020 17:24:54 -0700 Subject: [PATCH] Revert "Revert "Support dynamic kubelet (#139)"" This reverts commit 7250ec2f07ff327ad968d061584b6977a24c4b0e. --- .gitignore | 1 + go.mod | 3 +- .../linux/cloud-init/artifacts/cse_config.sh | 12 + .../cloud-init/artifacts/kubelet.service | 5 +- parts/linux/cloud-init/nodecustomdata.yml | 2 +- pkg/agent/agent_suite_test.go | 21 + pkg/agent/baker.go | 16 +- pkg/agent/baker_test.go | 164 +++++ .../AKSUbuntu1604+AKSCustomCloud/CSECommand | 1 + .../AKSUbuntu1604+AKSCustomCloud/CustomData | 160 +++++ .../AKSUbuntu1604+AKSCustomCloud/line16.sh | 160 +++++ .../AKSUbuntu1604+AKSCustomCloud/line23.sh | 276 ++++++++ .../AKSUbuntu1604+AKSCustomCloud/line30.sh | 337 +++++++++ .../AKSUbuntu1604+AKSCustomCloud/line43.sh | 38 + .../AKSUbuntu1604+AKSCustomCloud/line9.sh | 305 ++++++++ .../testdata/AKSUbuntu1604+K8S115/CSECommand | 1 + .../testdata/AKSUbuntu1604+K8S115/CustomData | 160 +++++ .../testdata/AKSUbuntu1604+K8S115/line16.sh | 160 +++++ .../testdata/AKSUbuntu1604+K8S115/line23.sh | 276 ++++++++ .../testdata/AKSUbuntu1604+K8S115/line30.sh | 337 +++++++++ .../testdata/AKSUbuntu1604+K8S115/line43.sh | 38 + .../testdata/AKSUbuntu1604+K8S115/line9.sh | 305 ++++++++ .../testdata/AKSUbuntu1604+K8S117/CSECommand | 1 + .../testdata/AKSUbuntu1604+K8S117/CustomData | 158 +++++ .../testdata/AKSUbuntu1604+K8S117/line16.sh | 160 +++++ .../testdata/AKSUbuntu1604+K8S117/line23.sh | 276 ++++++++ .../testdata/AKSUbuntu1604+K8S117/line30.sh | 337 +++++++++ .../testdata/AKSUbuntu1604+K8S117/line43.sh | 38 + .../testdata/AKSUbuntu1604+K8S117/line9.sh | 305 ++++++++ .../testdata/AKSUbuntu1604+K8S118/CSECommand | 1 + .../testdata/AKSUbuntu1604+K8S118/CustomData | 158 +++++ .../testdata/AKSUbuntu1604+K8S118/line16.sh | 160 +++++ .../testdata/AKSUbuntu1604+K8S118/line23.sh | 276 ++++++++ .../testdata/AKSUbuntu1604+K8S118/line30.sh | 337 +++++++++ .../testdata/AKSUbuntu1604+K8S118/line43.sh | 38 + .../testdata/AKSUbuntu1604+K8S118/line9.sh | 305 ++++++++ .../CSECommand | 1 + .../CustomData | 215 ++++++ .../line16.sh | 158 +++++ .../line23.sh | 296 ++++++++ .../line30.sh | 347 ++++++++++ .../line43.sh | 38 + .../line9.sh | 305 ++++++++ .../AKSUbuntu1604+TempDisk/CSECommand | 1 + .../AKSUbuntu1604+TempDisk/CustomData | 161 +++++ .../testdata/AKSUbuntu1604+TempDisk/line16.sh | 160 +++++ .../testdata/AKSUbuntu1604+TempDisk/line23.sh | 276 ++++++++ .../testdata/AKSUbuntu1604+TempDisk/line30.sh | 337 +++++++++ .../testdata/AKSUbuntu1604+TempDisk/line43.sh | 38 + .../testdata/AKSUbuntu1604+TempDisk/line9.sh | 305 ++++++++ pkg/agent/testdata/RawUbuntu/CSECommand | 1 + pkg/agent/testdata/RawUbuntu/CustomData | 220 ++++++ pkg/agent/testdata/RawUbuntu/line119.sh | 3 + pkg/agent/testdata/RawUbuntu/line16.sh | 160 +++++ pkg/agent/testdata/RawUbuntu/line23.sh | 276 ++++++++ pkg/agent/testdata/RawUbuntu/line30.sh | 337 +++++++++ pkg/agent/testdata/RawUbuntu/line38.sh | 77 ++ pkg/agent/testdata/RawUbuntu/line52.sh | 38 + pkg/agent/testdata/RawUbuntu/line62.sh | 79 +++ pkg/agent/testdata/RawUbuntu/line69.sh | 8 + pkg/agent/testdata/RawUbuntu/line76.sh | 7 + pkg/agent/testdata/RawUbuntu/line83.sh | 9 + pkg/agent/testdata/RawUbuntu/line9.sh | 305 ++++++++ pkg/agent/testdata/RawUbuntu/line90.sh | 19 + pkg/agent/testdata/RawUbuntu/line97.sh | 0 pkg/agent/testdata/convert.sh | 17 + pkg/agent/types.go | 322 +++++++++ pkg/agent/utils.go | 174 +++++ pkg/agent/utils_test.go | 115 +++ pkg/templates/templates_generated.go | 19 +- vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 655 ++++++++++++++++++ .../google/go-cmp/cmp/export_panic.go | 15 + .../google/go-cmp/cmp/export_unsafe.go | 25 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 ++++ .../google/go-cmp/cmp/internal/diff/diff.go | 372 ++++++++++ .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../go-cmp/cmp/internal/function/func.go | 99 +++ .../cmp/internal/value/pointer_purego.go | 23 + .../cmp/internal/value/pointer_unsafe.go | 26 + .../google/go-cmp/cmp/internal/value/sort.go | 106 +++ .../google/go-cmp/cmp/internal/value/zero.go | 48 ++ .../github.com/google/go-cmp/cmp/options.go | 549 +++++++++++++++ vendor/github.com/google/go-cmp/cmp/path.go | 377 ++++++++++ vendor/github.com/google/go-cmp/cmp/report.go | 51 ++ .../google/go-cmp/cmp/report_compare.go | 296 ++++++++ .../google/go-cmp/cmp/report_reflect.go | 278 ++++++++ .../google/go-cmp/cmp/report_slices.go | 333 +++++++++ .../google/go-cmp/cmp/report_text.go | 387 +++++++++++ .../google/go-cmp/cmp/report_value.go | 121 ++++ vendor/modules.txt | 6 + 94 files changed, 14103 insertions(+), 11 deletions(-) create mode 100644 pkg/agent/agent_suite_test.go create mode 100644 pkg/agent/baker_test.go create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CSECommand create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CustomData create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line16.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line23.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line30.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line43.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line9.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/CSECommand create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/CustomData create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/line16.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/line23.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/line30.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/line43.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S115/line9.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/CSECommand create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/CustomData create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/line16.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/line23.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/line30.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/line43.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S117/line9.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/CSECommand create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/CustomData create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/line16.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/line23.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/line30.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/line43.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+K8S118/line9.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CSECommand create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CustomData create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line16.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line23.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line30.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line43.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line9.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/CSECommand create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/CustomData create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/line16.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/line23.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/line30.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/line43.sh create mode 100644 pkg/agent/testdata/AKSUbuntu1604+TempDisk/line9.sh create mode 100644 pkg/agent/testdata/RawUbuntu/CSECommand create mode 100644 pkg/agent/testdata/RawUbuntu/CustomData create mode 100644 pkg/agent/testdata/RawUbuntu/line119.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line16.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line23.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line30.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line38.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line52.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line62.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line69.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line76.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line83.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line9.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line90.sh create mode 100644 pkg/agent/testdata/RawUbuntu/line97.sh create mode 100755 pkg/agent/testdata/convert.sh create mode 100644 pkg/agent/utils_test.go create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go diff --git a/.gitignore b/.gitignore index 19ad8e181e6..f5bcd23ca04 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ _logs/ cmd/_test_output *.test *.coverprofile +**/junit.xml .idea .vs diff --git a/go.mod b/go.mod index e07ed226428..ad78088ff8e 100644 --- a/go.mod +++ b/go.mod @@ -8,12 +8,13 @@ require ( github.com/Azure/go-autorest/autorest/to v0.3.0 github.com/blang/semver v3.5.1+incompatible github.com/go-playground/universal-translator v0.17.0 // indirect + github.com/google/go-cmp v0.4.0 github.com/google/uuid v1.1.1 github.com/imdario/mergo v0.3.9 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/leonelquinteros/gotext v1.4.0 github.com/mattn/go-colorable v0.0.9 - github.com/onsi/ginkgo v1.12.2 // indirect + github.com/onsi/ginkgo v1.12.2 github.com/onsi/gomega v1.10.1 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.6.0 diff --git a/parts/linux/cloud-init/artifacts/cse_config.sh b/parts/linux/cloud-init/artifacts/cse_config.sh index 1b2c42e2660..f9c3464b3d2 100755 --- a/parts/linux/cloud-init/artifacts/cse_config.sh +++ b/parts/linux/cloud-init/artifacts/cse_config.sh @@ -208,6 +208,18 @@ EOF EOF set -x {{end}} + +{{- if IsDynamicKubeletSupported}} + set +x + KUBELET_CONFIG_JSON_PATH="/etc/default/kubeletconfig.json" + touch "${KUBELET_CONFIG_JSON_PATH}" + chmod 0644 "${KUBELET_CONFIG_JSON_PATH}" + chown root:root "${KUBELET_CONFIG_JSON_PATH}" + cat << EOF > "${KUBELET_CONFIG_JSON_PATH}" +{{GetDynamicKubeletConfigFileContent}} +EOF + set -x +{{- end}} } configureCNI() { diff --git a/parts/linux/cloud-init/artifacts/kubelet.service b/parts/linux/cloud-init/artifacts/kubelet.service index c61a469cc65..44cfdc0d599 100644 --- a/parts/linux/cloud-init/artifacts/kubelet.service +++ b/parts/linux/cloud-init/artifacts/kubelet.service @@ -33,7 +33,10 @@ ExecStart=/usr/local/bin/kubelet \ --node-labels="${KUBELET_NODE_LABELS}" \ --v=2 {{if NeedsContainerd}}--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock{{end}} \ --volume-plugin-dir=/etc/kubernetes/volumeplugins \ - $KUBELET_CONFIG \ + {{- if IsDynamicKubeletSupported}} + --config /etc/default/kubeletconfig.json --dynamic-config-dir /etc/default/dynamickubelet \ + {{- end}} + $KUBELET_FLAGS \ $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS [Install] diff --git a/parts/linux/cloud-init/nodecustomdata.yml b/parts/linux/cloud-init/nodecustomdata.yml index c4a99290311..ae6968ac2be 100644 --- a/parts/linux/cloud-init/nodecustomdata.yml +++ b/parts/linux/cloud-init/nodecustomdata.yml @@ -356,7 +356,7 @@ write_files: permissions: "0644" owner: root content: | - KUBELET_CONFIG={{GetKubeletConfigKeyVals .KubernetesConfig }} + KUBELET_FLAGS={{GetKubeletConfigKeyVals .KubernetesConfig }} KUBELET_REGISTER_SCHEDULABLE=true {{- if not (IsKubernetesVersionGe "1.17.0")}} KUBELET_IMAGE={{GetHyperkubeImageReference}} diff --git a/pkg/agent/agent_suite_test.go b/pkg/agent/agent_suite_test.go new file mode 100644 index 00000000000..17312a1d0f6 --- /dev/null +++ b/pkg/agent/agent_suite_test.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +package agent + +import ( + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + + "testing" +) + +var _ = BeforeSuite(func() { +}) + +func TestUtils(t *testing.T) { + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Agent Suite", []Reporter{junitReporter}) +} diff --git a/pkg/agent/baker.go b/pkg/agent/baker.go index c1de1813454..cbd372fb413 100644 --- a/pkg/agent/baker.go +++ b/pkg/agent/baker.go @@ -19,11 +19,6 @@ import ( "github.com/Azure/aks-engine/pkg/i18n" ) -// Context represents the object that is passed to the package -type Context struct { - Translator *i18n.Translator -} - // TemplateGenerator represents the object that performs the template generation. type TemplateGenerator struct { Translator *i18n.Translator @@ -232,11 +227,20 @@ func getContainerServiceFuncMap(cs *api.ContainerService, profile *api.AgentPool "GetAgentKubernetesLabelsDeprecated": func(profile *api.AgentPoolProfile, rg string) string { return profile.GetKubernetesLabels(rg, true) }, + "GetDynamicKubeletConfigFileContent": func() string { + if profile.KubernetesConfig == nil { + return "" + } + return getDynamicKubeletConfigFileContent(profile.KubernetesConfig.KubeletConfig) + }, + "IsDynamicKubeletSupported": func() bool { + return IsDynamicKubeletSupported(cs) + }, "GetKubeletConfigKeyVals": func(kc *api.KubernetesConfig) string { if kc == nil { return "" } - return kc.GetOrderedKubeletConfigString() + return GetOrderedKubeletConfigFlagString(kc, cs) }, "GetKubeletConfigKeyValsPsh": func(kc *api.KubernetesConfig) string { if kc == nil { diff --git a/pkg/agent/baker_test.go b/pkg/agent/baker_test.go new file mode 100644 index 00000000000..86c7b6ac813 --- /dev/null +++ b/pkg/agent/baker_test.go @@ -0,0 +1,164 @@ +package agent + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + + "github.com/Azure/aks-engine/pkg/api" + "github.com/Azure/aks-engine/pkg/api/common" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +var _ = Describe("Assert generated customData and cseCmd", func() { + DescribeTable("Generated customData and CSE", func(folder, k8sVersion string, containerServiceUpdator func(*api.ContainerService)) { + cs := &api.ContainerService{ + Location: "southcentralus", + Type: "Microsoft.ContainerService/ManagedClusters", + Properties: &api.Properties{ + OrchestratorProfile: &api.OrchestratorProfile{ + OrchestratorType: api.Kubernetes, + OrchestratorVersion: k8sVersion, + KubernetesConfig: &api.KubernetesConfig{ + KubeletConfig: map[string]string{ + "--feature-gates": "RotateKubeletServerCertificate=true,a=b, PodPriority=true, x=y", + }, + }, + }, + HostedMasterProfile: &api.HostedMasterProfile{ + DNSPrefix: "uttestdom", + }, + AgentPoolProfiles: []*api.AgentPoolProfile{ + { + Name: "agent2", + Count: 3, + VMSize: "Standard_DS1_v2", + StorageProfile: "ManagedDisks", + OSType: api.Linux, + VnetSubnetID: "/subscriptions/359833f5/resourceGroups/MC_rg/providers/Microsoft.Network/virtualNetworks/aks-vnet-07752737/subnet/subnet1", + AvailabilityProfile: api.VirtualMachineScaleSets, + KubernetesConfig: &api.KubernetesConfig{ + KubeletConfig: map[string]string{ + "--address": "0.0.0.0", + "--pod-manifest-path": "/etc/kubernetes/manifests", + "--cluster-domain": "cluster.local", + "--cluster-dns": "10.0.0.10", + "--cgroups-per-qos": "true", + "--tls-cert-file": "/etc/kubernetes/certs/kubeletserver.crt", + "--tls-private-key-file": "/etc/kubernetes/certs/kubeletserver.key", + "--tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256", + "--max-pods": "110", + "--node-status-update-frequency": "10s", + "--image-gc-high-threshold": "85", + "--image-gc-low-threshold": "80", + "--event-qps": "0", + "--pod-max-pids": "-1", + "--enforce-node-allocatable": "pods", + "--streaming-connection-idle-timeout": "4h0m0s", + "--rotate-certificates": "true", + "--read-only-port": "10255", + "--protect-kernel-defaults": "true", + "--resolv-conf": "/etc/resolv.conf", + "--anonymous-auth": "false", + "--client-ca-file": "/etc/kubernetes/certs/ca.crt", + "--authentication-token-webhook": "true", + "--authorization-mode": "Webhook", + "--eviction-hard": "memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5%", + "--feature-gates": "RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y", + "--system-reserved": "cpu=2,memory=1Gi", + "--kube-reserved": "cpu=100m,memory=1638Mi", + }, + }, + Distro: api.AKSUbuntu1604, + }, + }, + LinuxProfile: &api.LinuxProfile{ + AdminUsername: "azureuser", + }, + ServicePrincipalProfile: &api.ServicePrincipalProfile{ + ClientID: "ClientID", + Secret: "Secret", + }, + }, + } + cs.Properties.LinuxProfile.SSH.PublicKeys = []api.PublicKey{{ + KeyData: string("testsshkey"), + }} + + // AKS always pass in te customHyperKubeImage to aks-e, so we don't really rely on + // the default component version for "hyperkube", which is not set since 1.17 + if IsKubernetesVersionGe(k8sVersion, "1.17.0") { + cs.Properties.OrchestratorProfile.KubernetesConfig.CustomHyperkubeImage = fmt.Sprintf("k8s.gcr.io/hyperkube-amd64:v%v", k8sVersion) + } + + if containerServiceUpdator != nil { + containerServiceUpdator(cs) + } + + agentPool := cs.Properties.AgentPoolProfiles[0] + baker := InitializeTemplateGenerator() + + // customData + customData := baker.GetNodeBootstrappingPayload(cs, agentPool) + // Uncomment below line to generate test data in local if agentbaker is changed in generating customData + // backfillCustomData(folder, customData) + expectedCustomData, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s/CustomData", folder)) + if err != nil { + panic(err) + } + Expect(customData).To(Equal(string(expectedCustomData))) + + // CSE + cseCommand := baker.GetNodeBootstrappingCmd(cs, agentPool, + "tenantID", "subID", "resourceGroupName", "userAssignedID", true, true) + // Uncomment below line to generate test data in local if agentbaker is changed in generating customData + // ioutil.WriteFile(fmt.Sprintf("./testdata/%s/CSECommand", folder), []byte(cseCommand), 0644) + expectedCSECommand, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s/CSECommand", folder)) + if err != nil { + panic(err) + } + Expect(cseCommand).To(Equal(string(expectedCSECommand))) + + }, Entry("AKSUbuntu1604 with k8s version less than 1.18", "AKSUbuntu1604+K8S115", "1.15.7", nil), + Entry("AKSUbuntu1604 with k8s version 1.18", "AKSUbuntu1604+K8S118", "1.18.2", nil), + Entry("AKSUbuntu1604 with k8s version 1.17", "AKSUbuntu1604+K8S117", "1.17.7", nil), + Entry("AKSUbuntu1604 with Temp Disk", "AKSUbuntu1604+TempDisk", "1.15.7", func(cs *api.ContainerService) { + cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{ + ContainerRuntimeConfig: map[string]string{ + common.ContainerDataDirKey: "/mnt/containers", + }, + } + }), + Entry("AKSUbuntu1604 with Temp Disk and containerd", "AKSUbuntu1604+TempDisk+Containerd", "1.15.7", func(cs *api.ContainerService) { + cs.Properties.OrchestratorProfile.KubernetesConfig = &api.KubernetesConfig{ + ContainerRuntimeConfig: map[string]string{ + common.ContainerDataDirKey: "/mnt/containers", + }, + } + cs.Properties.AgentPoolProfiles[0].KubernetesConfig = &api.KubernetesConfig{ + KubeletConfig: map[string]string{}, + ContainerRuntime: api.Containerd, + } + }), + Entry("AKSUbuntu1604 with RawUbuntu", "RawUbuntu", "1.15.7", func(cs *api.ContainerService) { + // cs.Properties.OrchestratorProfile.KubernetesConfig = nil + cs.Properties.AgentPoolProfiles[0].Distro = api.Ubuntu + }), + Entry("AKSUbuntu1604 with AKSCustomCloud", "AKSUbuntu1604+AKSCustomCloud", "1.15.7", func(cs *api.ContainerService) { + // cs.Properties.OrchestratorProfile.KubernetesConfig = nil + cs.Location = "usnat" + })) +}) + +func backfillCustomData(folder, customData string) { + if _, err := os.Stat(fmt.Sprintf("./testdata/%s", folder)); os.IsNotExist(err) { + e := os.MkdirAll(fmt.Sprintf("./testdata/%s", folder), 0755) + Expect(e).To(BeNil()) + } + ioutil.WriteFile(fmt.Sprintf("./testdata/%s/CustomData", folder), []byte(customData), 0644) + err := exec.Command("/bin/sh", "-c", fmt.Sprintf("./testdata/convert.sh testdata/%s", folder)).Run() + Expect(err).To(BeNil()) +} diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CSECommand b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CSECommand new file mode 100644 index 00000000000..861efb3f379 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.15.7 HYPERKUBE_URL=hyperkube-amd64:v1.15.7 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=usnat VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME= CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=true GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CustomData b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CustomData new file mode 100644 index 00000000000..48415a3fb4a --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/CustomData @@ -0,0 +1,160 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEorCTDmE3y3qoaiLXyvlb/+mE8iXSM7mrwrAM1baW27f7y+IfxpC/oFQpp7Fyc9aOh9yEwlTjuhmedSuFdzYm9rP01Pa+drz84Cg+jTVBhopDAjGcEGdBFPEPIBa70lpHGm5wLpbUKVOI2WZCIplEa06wQeE/ifhgGWqWsCum8+SM3dIPowvWDgTeqIDLcr+8QoweKHNbtVaKTJSR9uYGdfMcc7Bmh08lwGA1GQdgdDqN7WVzneJ2jQRBdnPXvatABnZ/t9GwQTqQuz4uzPhAqleAwLRQYstkRcsYVpLxgpLaD67W4YZiRF0an3mTUr14au8Gb+lwV+aaxJu8no3BSaSx9D2mvdmrRLq5qsOLjMud9zOUmPKXTHyr18RbkJc3zsklzZARZQlGuji879BEYq+XIpG6mglDV10eXutZp9Qum6AJ1S5U7I1TXXFyOs2JG2Xr1vJhihqrLyPmSDyoYnv2IgAwGf9wrO1xgrFCr+RgzmqJUfSosqxR6iGtNCOdvZOVXbzSwrNWpJbrlr3/xQrC4RLg7TdXUPDqS+tuu2DYhrQRbr+3m6xO0XzWPEnt69Hvbjlsn7RZiu/kaEd6BI2+lMy2kc7XQf0nJQs78KioUzZyCTSkjmxlkOW+0juifv9zKn6wGDqrEEUlDc3626ouUWmJhqF3vxpoJc9XQ0W4Q5+QkR0E5ockTCi/OVTRDFeWFmCG0m3DUhFa7CXEeJ3Ns2xqVphNdEtZFdzjod8OBN4pc3+80re54EAWuf+H6UX8URL4b+gM36LSbKwQVAWO704EXjVzQq1hhRtll40UVznfVmYnZd4NO/WAHA0P96+6Td3pABiYzzi+LfFtMw7k7tIIw6NT/uYIbhKZNmtsxWm76rgnni1rr+E3j+KjRap80Wse1F/CwAO8FyrDd+Zsgqtjv/m/iuwbs0PPOJ+PotDsYPpx1H6Fpl46UbhisItrzRqN1WH9vVr17WqbK73uaW81mlcl/mLLq4WXOErCv/tqRMXj16kjPqI+Ii9FqImKK1/DM9kBzfx7YLwclWA1KL6Fc0FTPOEH98GrBgrJC4b45tn5o6ru4JueFIvyagS2gBXu1n6CuOFeLWFz+0R1SVtx0Z8gUFGzOM7K806uZfZpqp5DCyeh0zRvyVipckAaJaXa7VP9YDOsXbK1nHqpQvlQhpYzKOZKGLg6QKK5Q6BGGYaLvRUjmmFxCwgn+owb17Sz+zZMYGdn5IF5cEirAzne/R/QQoHiRzB/9urdyCXFxk15fa67eHFpL2rlsZHwGe5ZVDjL3XLCeu96p9f8AAAD//xsNyOSCEAAA + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9waaXPaSPY7v+JFVm2uaQmcxJtNhuxikD2UMbg4MplNpVSN1IgeJLWm1XJMCP99q3ULhI9kNrs1qQqG7tev331JR4/0OfX1OQ6XjUa3a06M8ft+1zD7Q3N6edXWWSB0/CXiRLeYLzD1CQ91y0IBZzdrLST8mlpEo358dtS9MKb3PcqsFRHxyWHf7I6GZ/1zs9cftxWdCEu3fKr7RGi2Eu+f9ofppsQqN+fUT7Z6o1+Hg1GnN9kBsNln32XYDpVGdzScdvpDY9yrh87Is8uHLl5PaqFX0ZxwnwgSlqFnp7PhdGaOjYHRmRht9Ykbzk1OXIJDAogDCp82Gpx47Jpcsvn6yVPYNAAAcCCQQwQEEXcIoDV4bL5GxHeoT5Lvlksb2+xsNyf1DgwFT/Kw5RLsR0FxuueeUZeEORLuAeILUA9JSiKhfiiw6/ZIUDpHBF9bnm3ShbnA1I04MX1mhgKLEFrHTXgFx6/AirgLaBFOBrAUIgjf6HqArRV2SKh51OIsZAuhWcyTqlhQR4/mkS8iXd1UxbrNj6H8mDQnW7PJHN6BLrzgVpCvX4HcUAGqMR6blxPzajzqmT3jNGfWnPYvjdFseog5OJEstZpgBysHEP2OK68uzs1Or2eedfqDTJEe5qtfOwPqRzcdh/gClsy1s03TIcKMAhsLUsXauZqas6teZ2pUyM+O2DSU5xyO7ZqDvf5Enj4fd3rV4wvGYxQpc0B9wAG2luQYRYK6YWx4gmM/DBgXKFYszF02X0Qhabe0ltYCCyOLcEEX1MKChGCRYIks5nnMB8vhLAqQSwUBiy7CFKvFfF9wbK3Akp8unSMe+YJ6BMhc4LlLQiBiKRhzQd4EDhXguFEoCF+E0ltiuQkWAF3Enz4VKFyHgnhoSdyA8BAoi3cCziJBjoEGIRFAgxT973+AS+cB9lDw+Y8Iu1Ss44XsB5KXh+CxyBfgy0sTjgLqfIGQWVhAuA6lD4DkhMS3gGQPuVK1cPMlZfYLDd6CzWJ5A9AFPMq1lnobvGhCC06aTVBLyngLYkn89BjA7yziPnYt4QJCPkMBdggHFFXO5NBV/feHk2lnMKioHmBB4y8285NzdAEfP4KibjqzXn/aM41h53Rg9LYKtNsgeETg06cKUbfzgiObCvu+XCTQ30L/gpYC1/nVrMfpNeFF+PJWNuWAAlDPr2Zmz5hMpT9/X2Tzr6lNseZQsYzmGmXpArJlwuO6EzgrsoZ31St1HIjsYOBUvTSGG/ffG+NJLbefMRXmgnFTittl1io8GL1yuqXvSjKwbf8vCPlTJJkkif0cUYHSXBqKPWnXgGSc/ghWsbibnncQ10E4EHrIIm6RMF7X7O8nvppLDhrLC0nui9hd0xIj82Mk46Ef3aAlwTbhIVKfRD72ZKHzFBzLAg+vCNgrL3yYAR2k4qSZ2cdgkptHFGpZAaalpiJLCEFCF+uJfN/rw/f9Xr+D4pSKbl6fmCcvkbpJNrcaj3xADNIFYzItrCeJFAXswzgRXtCjvL0fVuK4+CSNUjLrICIpCGhAJMfphmXLWJsg2Srp4v10lIkEKmZy3FbUTSILsyfL9LEpae+PhtvnuxuT2dlZ/8NWeRjHTyvR/P7nKiF6cv5hN0QTa8lA6Sf71Hdgcv4BEu0kgnGZhV1IuYlX0u9t9YnDSQCywhn3T7MAkfjVM5QW51/BigSgBRwDskFpK09jHJas29UUE9CEL6X1Wmu+TCHkv8n5h5Q5czYetJXMNHO7bLY0xh2d+oK4KHRudNvCAWppx3rsQPHPLD3K9iiJaRPCrwmPL9ND58aMYc2bk5dmwrjZ0lrHptVqNZutYy3uhDKK3r5NKT35kZSefAelz0pUJsp+T3hImV8Sfwg+ExBGgSw1ia1Ua4HWLk4SYqtRMo6rTveic25M2kocmGSEksFJeWj63Pc2tM6DoprdAlXHKSS/7wAlGguwRlVfbfXJHIckjq9qVY1PS+dHRtyj1nfdjOyUO5sEfNu4u1QoJWZ1U71/mwTPFJde3t7eXwixuy09ZgN+fnMIXQz14Ksm0854mnRYRZDJu+Bx0lfkoSYrcvMm2BzPhpLKuMpVkkiq7Fa6kFmA7OsbpeKzdGWl5e/OxmNjODWLMJVgtgGh69T0v0IcupRevAPpsgJptLJB+UmRQatVWoF45UVp5XkC87TKXfX6hDd1czk6/S3PCTVMJq6ZUaqWwaV/YpcTbOfeQOyfIFzRIJARW3Kf56XE6YgbFg1JMRbJlxwi5O+rlZMvxTd2B/32Dq2FGrIeJYNMuhPlhdbUXtZwVEEaQ70oAkuqw3K5lDn6cTNuZI6b5VnNLlXP8uFNu0TQM0AIuy77jKQ44o58p0SKQQ/nyUajkMy3DGG+d/zCmZ2Xp7Jw3Rl4VIrRgh/Zpw36k+mdxV4rna5YwWH0h8ri77j3G6W1IuuwIFHDoSX9NnAAIZtg7jG+J6b9pupyYp5fnZsXxm/3H0IdFFOMPheQ4FEoiC0X9+Vzx7UPnDcVoW5IxGfGV1du5FB/L7Ruhsb019H4wrwazM77MtC0QYnT1eG42pHb3WG/HFuzUJ6u5uPL3Xkw/E2SlsWe7rBf0/rvHUqi9LBvTs//HY+y1Y38lZA8kYnv6OiZvoUjmIUE5jhcwjyirqA+HB2BYGlAAxksrCXmITyRVQ5EgdwUSwIL6mMXFF2pqlgKXGA+lwcTS1T2qdMTalLatgrsUbeTEMsYygrLpJLJ9xtF835oTM3/b/nUkfhAIdUM3mtm5UltrW4ObJmnMopanJbG86gCnaUPLemvsWefvNQE5przRdnVzaFR/Q5tJSOupypW1j0EfeA+vYK4Ypb19+1KvgZqP6yU7fMHumYStx7JMuo+prYXwkqRpxy+qh6WPtxKBgeYA7r5cr/rUHcfgbVkn31AY+CMiTfyow5GVttoDH9/9aq6Wwh8Lyr8aK//s0Rflz6q8i8ePt4qwT0wKcRcgju7/yUNF/rpe0UBSD3HXFCXBFgs23oUcj3uCeMnu9Rz9n2b3BArip92pO6tllFAPh3AVog8yjnjWpykie0QzSdCYpX/USlMoeum9ko7UcDdKWn7l+e1nk1uBMeW+GUdEL6K5kUf1h30zeloNGirSWcfs6XoS+YRfZlBo/zhq65uLmanxnhoTI1J0cIkR6NSv9f3sENAzdCDuvnltytjLA/HcanaJqVQd/R+uZ4VVZJZNBBH0F0SayXxfSZgYR9SfkFS7xIB2Lfj75ZwwaZc/l3DgjMPciYfhzCnPuZrWDDXJrzc6yQkQTzCRLIAQsQXfB0w6gtQFEDXEJP0Jv7cYxby5/6ALFBkKVk1nE1K508pjdsEnVLTRXnXKft6ekYBZQdbuoFu0VU9Nku49dgs4d4HGyci4qU2ttx4JmFiR4wPFluuLdgxgTTYVK6kngORLxsKQCwzmnpLXNBGySLV0URaotodjY3RxBxNzGHn0tg3SCszxYIuKb/szQWpnMa+pGthpegLvPmEpowLysD73B4g5ptM4wCxDzaMysD5Irm749sXycmdjiVJPw+6pKaP+ca5UizOnSCZgx+26CJB3IGJes6esWYJ8vqBajoErtyG7hYhHgBXdqaF9ZfWLWf2mbaKB5h7duCo3DiUf5Iecz/THMxn6XOW/mXnPHb3tnp8sNU/aUJL5uhmKW/Jq0DdxXKgqo7T79WsNE3K3geaBVV6i2cui8i3BGU+pHC5zSRw46ho7JOYJldNwUxbCpHkE810BxBaMO5hAY83G21MAhZSwfh6u32z2WhT7Gy3j7OJJ7o2QKm1CfVr7bL2sYn+8en5gV1Uv2wq2X2Pi1xbPIZIJupSmKbFbNJW/7njy2q+B4/a0DzkulIZOWTJYfNpyK7ktorEpxyIBVmq8ijsH/34r0/bXWfe1mpTap0z1yX8EvvYybT/l9Kq5bLIjrvshFPkJaz+ZZVMbgLG4+eYB322Hu6wNdwxfjxuQlYG3X7lQ1DcQk0Rtmpe5JFRfbwo3qrIFxcPeI2iLjLae1fc9ookuyacU5ukQ9BuPE4vD4xki3jWHxhXnekv2Zuu0lSTT81aOJqtv26acbdl+kRalcQhd5LcJ1hkLZPhShlZYgUWFvDzz2CMzuDduzogGwucyOFNfCBujt+UHnUE7lpeK6lPr34DCyxLDGN01tg2juSf/wQAAP//M+NLdjUsAAA= + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xae3OjRrb/X5+il+iOx7XBeB5JJt5oqjC0vcQSKIAmMzdOUS1oyR3zUJrGtnbs736rm4dAAiTV3Nr8oXHgnN95dJ9XN9/9Q5mTWJmj9G5gWjr0DFOHn0fD13dJymIUYfAMGCIhkH3w9jQnMdUJrFGcDsgC/PEHGFoOGI3AULNsaDme5QhC8Oef/wLsDscDAACY2sYn1YWeMR0NX5MVQCC9Sx4BZnfn4BksKV4BeZqAExJjBm5v/rgNzv7858npAIcp3gWodJQN8Az8jAE5OAEnQF68OR0syAC6mu5NIbS9mT0eSXeMrdILRRl+3aC8XLx99+Fcykm1sQFNt5/4p5+lwcBP4gVZZhSrQUTiWYrp69OvQj//Di0xkCGQ33Ct+G8EzoE8AT/z/4A0/KrqE8OcOdB+keos4fa7l5ocB/sUs7SUok4NB9qfoO2Vyt3AL95Udf89khTMfOU+m2MaY4ZTxceUpQpakRTTB0zP7vE6l8uSzL8TQjvRKg2jJADnP56fH0iePMaAJgm74D97eQSTph5si49ajGjhb9W+j25H7XZiQS02zJGLgJkfdK5CH2CrJQcw5IEp/0fYYjkTy/FmtvEiNYOyNJ2rd8F/DkRfkJoritg5xhV+SHDMulzRAdjtin0MO6u7h2tjm0ghx1i2wpgOv27y6UuXka3I3Sb2kX/rWvdhN1a62BQatF3jytA4+YGb3qesZ9NvA2654f37gxnaV7qba2cXH2NbsYs7bOsA7LZtH0PvLu61TSzwMZbt7uIOI1uRu03sJ283sItHMKWYgX8+iT+xf5d0FpwXCTyDOUrxj++BLAfYTwIMPh5U00rcZkXoBuwuMyVSR4LthjwgIzewdzPbHuz+BNrAFsuxH7Evp7T5obbAh/mhYxe1+eFw7P4Q3PUDJzzEES2AL4MBjtOMYnuqvT4FeXeXrlOGI5+FMEbzEKtx4DBEGaArf07iADw/A/xEGBhC2/acL44LJ5o79hxXtV3vSjXG+1DklCF2CE6lnpoFhOmFhnml4VEz0w1X96CpXo6h/iLx9p/RDDdLTpciiGMeZk3V/pMFQCsGQpIyIMskThkKQxyUs8NJjnnSKHhoxbwlZt4qo0sM3p6Dd+fgzdvzUv6rssKJn3rTfZPNcYiZI4qXhimrVuhmdgnH0D22/7vPAbdbwC00vk2OgBE5WeAkKxynaQiWOKYpAnKSMTDcoyp4e/7+Q4Ob4r+BHONHID/9cP4zkAO0TsFP787PgXyP1/sBW8VWNgE5zeZ/AUnRzFFRW/iUuDXu3HxId3x98HTQ1VV2AbW2W3uJd8pUD8f2zDa7HBvaESPbbtntBGutuvup+wa2HZbcnP+d2dD71bHMDiPQfzKKz/5Kk3hb9yZn+4zZQbOr5w5hV0fQsj7dSXvP8rd0GZWXDmoyWpdB/HAKQ4NcsqkZU3VcliMHajZ0R8OveygU5fZWub29vX35f8OTOJ6U4/mIgV9+AdC6KozaWYA8bCU/TLJAugCSyjfCNJuHxNfEs+9zAoZjFDND0Ay/utBUTdcz9JfyfZrNU5+SFSNJXFI5s0tHs42pa1hmnRahQBNxXxF2GdXGlJ+u7GHMvVExU5wmGfXxNU2yVc5qQ8ea2Rr0rm1rNq0ow8RH3IacaGxpKte+ev0QuesVzl9+mnjulymsuyDGzEQRrsw3azqk2M8oYWuhw4bKhO7vln3DNZ7ZhvtlS5+HBuQnw3Zn6tgrmBpU9q6NW+Reh800yRh2ebnfSLKtmQs9l/cKFd2KkgjRtfqASIjmJCRs7dS1m9rGRLW/eOon1Rirl8aYm+PUXFAAOD4KcSuno6lj2GARG3NKkwcSYHqJ/PtksZgkQcGnja2ZPrWtT4YObe9S1W6sqytvYumwF0C6AB28Lz1cNmaU4LSb2bOhaxvQ6QOBT6skxjHrQYGfp5YJTbcPRs9ouU27YPSZne/dHphfCWOY9oD8argutFshbMTwmESkzRRbdeHYmBjtNlScv02dPmbvt2m7LyuAy8y/x70KeJcz7Qbu1+N3Shjep4z3u2248BCV9sPlejURsxRPUIyWODACHDPC1vCJ4TgtF3rmQG+imuo11D1Dh6bLAwx+dqHp1BY6SzFV05Qs4w2OoecBM3Og7amOY1ybdYxans1SbPBOPfbxBDMUIIYq2YbpuKqpQW8CXVVXXfWlypoouEQhZ6LOfVYmT1X3LtUx57A952ZWyQhIyrONlbF5ksWBY6qukNHk0A2Hpx/PmrmX1szUPU5XSsRPfpgFeIJShukVTSKHoThANBhfCij4WRvPdO4ux4W2d2VbEz6lmLpq69748qXKSPnyfUJZWMtHNxPHq9bskzobu0XjW7BF6IlEWTSumW1nIdaSrIjsifrZmMwmHreoMsiejaGnWbNNaJfib/C6FH7/IZV2337CtNgFEm++oXVVdU7yU+1EcU9OBCMgPbzdOWXkQAGQCZCUvpylBBLobhwPw8ozzgFI5Slm94DXGEI006iGEPFDMaNrPwo8svAWiIQZxWKQ/AG8/QFESbCiyRyDOfVizBYkZJg2h9uJxZ14CTczuugj5RhIdSYJfMwtiZIgC3Eq81A4C5Q6zRnXsmmMZhrGVJTcNFd5y5byZWVTtbxlLZ+OZ9eGmS+pcF3LqkYPYKiZhndpmJ5u2Mqbczn3MhclxnLxWrPMK+NaUFSseYfPG/xtkh2QimdXR2tsaF/yowbJRyHxkxYtqx1zkn4nRaK2S3NKgiWWqv9nFMXpClFeOb9bnhyhFA771ZL4sne9jJNY+BW8erWFUcbTCDR0+29bJ05C8v+UdE5iBc+Z2DhAZiBGDMhyRZ8fmORHRFoSM0RinrZiRiK8s880y3RVwxRZy3SNCcxtDRL/nm/6HTNzWF28HtQ04yJLofnbSpRuaTe8MBQ9PPwMtfIgyRjDkYiq/DgqKP5VcvFnfNImPj4LFPyEfS9liLJNkD0iwrxFQr0FCUXMn4M3YNgvrRn7/In3u+pq//a46dbMFcC8sPKokNE1yDUB9UvgulETnuW9q7F67RTHZPoRVvkhRtSLeDXxVjRZoaVo9rxFiJbpxtDNTf4/9tzk73VLl74H+aVY6gJKTJcbW3PjlADhKIlr6T2hgAASg+HrFP8N3giFTv8FgqSRToCcVjpWwGA3xgD4629wcnYCfmkhf/UKzClG921RI4QMCZC5Etwnbdh7PZAnmuLYs4r7EOMVeLMtNUhi3HvUWmythufL3ds8NW7uONNwLbtaPa6evW/PyVESE5bQM54D6P7o6RZy0EbpQDlUySJAjlfzYAX7l6TprcNO5DfZr+hfdo5KdXglGsxayOAFb0fLo+Mee9swDjKUMxYFJpf7gKgSknkpU/ybNyR7xNdQDpbMVS7KSlnm8oOiQplkxfJ2UPHLOlUdpJ+ldwc4pBv9m/ZBoUMTozo9bwnOqgbme2CM5jg0k6DW2I3VSzj2TEuHzgFOCDmAHHOEfkd0wB5kfYN3X4w2NNoboF3Q37QqNRWOuSb7NclojMJqJb5uUrk4MXZYQtESj1Z89koZb862KYRCE/Q0S/HozfX266K36nx/ldBHRAM3cdZpmCxHa5zmJC/gYzFVlM7+K9c12BT/LmcUHHLJcYxHbj6kvDGkSTgNUdxoCoc2vLQs14a/zQwb6hyVt4mmVY3nYhJhNOMN81YNpZhlNK73Cr3TWZ5X3DF4+1EJ8IMSZ2EI/DDjo75M4kWyFX8fHB7vpmFeV9uFj1MUIyay7gTFZIFTphPayL4T1TSuoOPqhj3ankajgicfz6L7gFAgr3LVanxc0CMluRxN5MuGiDz/CAF3SYSVYdUuKmdc2hahiLJhg1GpZeGmGhVJ7cZm2MSqXcRsJF9s/mwDOpC8Bs974p/4pNgGVo2RLaw7Fz8DWZYHaEWK444L8PBmUCx8ejGQy01wkWNjysiC+IhhGWXsLqGEreUAMXQBbqWhptbv8G+lQiJ9wPSiUbrya54BADGKsGAtD41+081bic/lDD+xXIH870KBQptdFlBMC9uvZBREJBYEXcIyysdAuRS0S3FP4uAC5HttwIUIxdrgatKEMoXSRODXnFe5rOaUXdcVjPd43cpwA7/cSgMJfOxcaflpc8qh5a5TM5akPgoxVYMgiavQ0cYzYYU6cy1xJ2B7qq435or6yRHnTZUyQ6AKVA7wKkzWEY7Z2RpFYU9h6pV4WHUqjr3S519yVxn6x+fha7Gxhz03XNUF5OnzUtqjSLsgB/sHSMqvxL5FWprNGzY1r/e+Bbm4XayBVzeM3wJLlxvA5t3XUajVrlU1Q0viGPss2dqwqiYObEyo8blDs6E4WVfHzmj4ekVJzBZA+norFRsjuJV4+PxPeit9D8qn+b1m801569p82rxrbb5DPiMPWCdUKLmGcbBKSMxmNMzpyi/lw2RJ4rOI+DRJkwVL4pDE+MxPolvp+9vqvjS/jaCdKJF4L+KrPKCKlFZFrila3ZVXlKXSJcySvzx7JHGQPKZnMWYFRvp3OKkkHKKEn1Bcx7n48P79uwJsicIQ93ikeL9jSPRNGii30osEpL4E0Ps6j1pOUkWEoG/GXv0rhkdwftr8xKj8SOgerwFN0cX7859/LL4aypvm+sdD7378If94KMnYzhl98a2LT3h9yuOAk56tcJR/SnQIA3+Wc5RfF41mjuK4I01VxqMHQlmGwnIOtXYezLafaObWE6klJG/gl9HwdeGjo8x6PD9tC3Fou8cBbsx+LBeoCbm/wjUBDy9uXXKOrmsUB+nH52FnthPJtEvabnY2UYQ/Pm+l5qMwuFN3FYK2exTKPV7vgNzAL/0Ym6ogPplJQuKvt4qC+Nomv1A4ZHU5jrwSQK2LW/ddlUk/Piv1epAq27lBaXz8kiqt7m7XVBxgfQetq8H/BQAA//+oI6qBejYAAA== + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUz07jPBTF93kKq2LxfQsnbalQEfIChsBUVAwiRSxKFTnOJbHq2JHvTWlnmHcf9Q9oSosGhJfX53fOkWXd8Z3VNAnOAZXXNWlnxVWTgQEKvjmb6+XkRlIZzzUSiqhBHxmnpIkybaPpRhoE4wT8TCuYBLeAJD0JaZ7kAoPYzrR3tgJLF9qAiIBUlMOjbAy98kmjFCDGc00JSWpQdHqHQTwHlSy9bjyIVV4msWSRqymSPxsPkXKWpLbg8cUqxHIPV01z7RmvWTSTPjI6e03+iFZZ/V4XrlhLP7IxO/ivco0l9swKDzV7aL1NemixZ/akGDf/M26AtdmEnTAqwbJ17ArnPNM236m5Ozhhj7q1r/3GppJT4FhKD7tuQfAGxCWJC1RkGH9iFijU9awXkqpTD+Q1YFf0/w0p5yFEV8m5ctaKztFhv/eJqErOU1zYNJNqalzxUX6VWgGiLCBVDkn02p+kssYjiX77ow9jQRdluPnFYaFSKj1g2RG99vHR1zy6ot857n7N43DzdNsufG0DGcnMADJOzMrlXzEaaa9U12+ltqnAa7UDvbMY2EPANodzsEszjuBn4LdurMuBG5mBQdE6+HV1dxYP41F6/eM8ToenZ/Ew+d3aAmaiy7YHzjQV8No0hbY81369Z5Y1vAUCjNaKtQD/Yg9e0i6Gp5fJvovb+HKQjOLbVZ894/vB6Hs6Oh1cj5IgGA8skjRmEtxLS5CfLUTVGNK8QfAhSV8A/QkAAP//lvMgPnQFAAA= + + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + + + +- path: /etc/systemd/system/docker.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 --bip= + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/docker/daemon.json + permissions: "0644" + owner: root + content: | + { + "live-restore": true, + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "5" + } + } + + + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS=--address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroups-per-qos=true --client-ca-file=/etc/kubernetes/certs/ca.crt --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --enforce-node-allocatable=pods --event-qps=0 --eviction-hard=memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5% --feature-gates=RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y --image-gc-high-threshold=85 --image-gc-low-threshold=80 --kube-reserved=cpu=100m,memory=1638Mi --max-pods=110 --node-status-update-frequency=10s --pod-manifest-path=/etc/kubernetes/manifests --pod-max-pids=-1 --protect-kernel-defaults=true --read-only-port=10255 --resolv-conf=/etc/resolv.conf --rotate-certificates=true --streaming-connection-idle-timeout=4h0m0s --system-reserved=cpu=2,memory=1Gi --tls-cert-file=/etc/kubernetes/certs/kubeletserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-private-key-file=/etc/kubernetes/certs/kubeletserver.key + KUBELET_REGISTER_SCHEDULABLE=true + KUBELET_IMAGE=hyperkube-amd64:v1.15.7 + + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,node-role.kubernetes.io/agent=,kubernetes.io/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line16.sh b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line16.sh new file mode 100644 index 00000000000..e51aeb3fea9 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line16.sh @@ -0,0 +1,160 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser +cleanUpContainerd + + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line23.sh b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line23.sh new file mode 100644 index 00000000000..1f074afdfa1 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line23.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line30.sh b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line30.sh new file mode 100644 index 00000000000..ce857cb431e --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line30.sh @@ -0,0 +1,337 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + +} + + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line43.sh b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line43.sh new file mode 100644 index 00000000000..e708f006a14 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line43.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line9.sh b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+AKSCustomCloud/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/CSECommand b/pkg/agent/testdata/AKSUbuntu1604+K8S115/CSECommand new file mode 100644 index 00000000000..f650d780f22 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.15.7 HYPERKUBE_URL=hyperkube-amd64:v1.15.7 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=southcentralus VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME= CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=true GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/CustomData b/pkg/agent/testdata/AKSUbuntu1604+K8S115/CustomData new file mode 100644 index 00000000000..48415a3fb4a --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/CustomData @@ -0,0 +1,160 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEorCTDmE3y3qoaiLXyvlb/+mE8iXSM7mrwrAM1baW27f7y+IfxpC/oFQpp7Fyc9aOh9yEwlTjuhmedSuFdzYm9rP01Pa+drz84Cg+jTVBhopDAjGcEGdBFPEPIBa70lpHGm5wLpbUKVOI2WZCIplEa06wQeE/ifhgGWqWsCum8+SM3dIPowvWDgTeqIDLcr+8QoweKHNbtVaKTJSR9uYGdfMcc7Bmh08lwGA1GQdgdDqN7WVzneJ2jQRBdnPXvatABnZ/t9GwQTqQuz4uzPhAqleAwLRQYstkRcsYVpLxgpLaD67W4YZiRF0an3mTUr14au8Gb+lwV+aaxJu8no3BSaSx9D2mvdmrRLq5qsOLjMud9zOUmPKXTHyr18RbkJc3zsklzZARZQlGuji879BEYq+XIpG6mglDV10eXutZp9Qum6AJ1S5U7I1TXXFyOs2JG2Xr1vJhihqrLyPmSDyoYnv2IgAwGf9wrO1xgrFCr+RgzmqJUfSosqxR6iGtNCOdvZOVXbzSwrNWpJbrlr3/xQrC4RLg7TdXUPDqS+tuu2DYhrQRbr+3m6xO0XzWPEnt69Hvbjlsn7RZiu/kaEd6BI2+lMy2kc7XQf0nJQs78KioUzZyCTSkjmxlkOW+0juifv9zKn6wGDqrEEUlDc3626ouUWmJhqF3vxpoJc9XQ0W4Q5+QkR0E5ockTCi/OVTRDFeWFmCG0m3DUhFa7CXEeJ3Ns2xqVphNdEtZFdzjod8OBN4pc3+80re54EAWuf+H6UX8URL4b+gM36LSbKwQVAWO704EXjVzQq1hhRtll40UVznfVmYnZd4NO/WAHA0P96+6Td3pABiYzzi+LfFtMw7k7tIIw6NT/uYIbhKZNmtsxWm76rgnni1rr+E3j+KjRap80Wse1F/CwAO8FyrDd+Zsgqtjv/m/iuwbs0PPOJ+PotDsYPpx1H6Fpl46UbhisItrzRqN1WH9vVr17WqbK73uaW81mlcl/mLLq4WXOErCv/tqRMXj16kjPqI+Ii9FqImKK1/DM9kBzfx7YLwclWA1KL6Fc0FTPOEH98GrBgrJC4b45tn5o6ru4JueFIvyagS2gBXu1n6CuOFeLWFz+0R1SVtx0Z8gUFGzOM7K806uZfZpqp5DCyeh0zRvyVipckAaJaXa7VP9YDOsXbK1nHqpQvlQhpYzKOZKGLg6QKK5Q6BGGYaLvRUjmmFxCwgn+owb17Sz+zZMYGdn5IF5cEirAzne/R/QQoHiRzB/9urdyCXFxk15fa67eHFpL2rlsZHwGe5ZVDjL3XLCeu96p9f8AAAD//xsNyOSCEAAA + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9waaXPaSPY7v+JFVm2uaQmcxJtNhuxikD2UMbg4MplNpVSN1IgeJLWm1XJMCP99q3ULhI9kNrs1qQqG7tev331JR4/0OfX1OQ6XjUa3a06M8ft+1zD7Q3N6edXWWSB0/CXiRLeYLzD1CQ91y0IBZzdrLST8mlpEo358dtS9MKb3PcqsFRHxyWHf7I6GZ/1zs9cftxWdCEu3fKr7RGi2Eu+f9ofppsQqN+fUT7Z6o1+Hg1GnN9kBsNln32XYDpVGdzScdvpDY9yrh87Is8uHLl5PaqFX0ZxwnwgSlqFnp7PhdGaOjYHRmRht9Ykbzk1OXIJDAogDCp82Gpx47Jpcsvn6yVPYNAAAcCCQQwQEEXcIoDV4bL5GxHeoT5Lvlksb2+xsNyf1DgwFT/Kw5RLsR0FxuueeUZeEORLuAeILUA9JSiKhfiiw6/ZIUDpHBF9bnm3ShbnA1I04MX1mhgKLEFrHTXgFx6/AirgLaBFOBrAUIgjf6HqArRV2SKh51OIsZAuhWcyTqlhQR4/mkS8iXd1UxbrNj6H8mDQnW7PJHN6BLrzgVpCvX4HcUAGqMR6blxPzajzqmT3jNGfWnPYvjdFseog5OJEstZpgBysHEP2OK68uzs1Or2eedfqDTJEe5qtfOwPqRzcdh/gClsy1s03TIcKMAhsLUsXauZqas6teZ2pUyM+O2DSU5xyO7ZqDvf5Enj4fd3rV4wvGYxQpc0B9wAG2luQYRYK6YWx4gmM/DBgXKFYszF02X0Qhabe0ltYCCyOLcEEX1MKChGCRYIks5nnMB8vhLAqQSwUBiy7CFKvFfF9wbK3Akp8unSMe+YJ6BMhc4LlLQiBiKRhzQd4EDhXguFEoCF+E0ltiuQkWAF3Enz4VKFyHgnhoSdyA8BAoi3cCziJBjoEGIRFAgxT973+AS+cB9lDw+Y8Iu1Ss44XsB5KXh+CxyBfgy0sTjgLqfIGQWVhAuA6lD4DkhMS3gGQPuVK1cPMlZfYLDd6CzWJ5A9AFPMq1lnobvGhCC06aTVBLyngLYkn89BjA7yziPnYt4QJCPkMBdggHFFXO5NBV/feHk2lnMKioHmBB4y8285NzdAEfP4KibjqzXn/aM41h53Rg9LYKtNsgeETg06cKUbfzgiObCvu+XCTQ30L/gpYC1/nVrMfpNeFF+PJWNuWAAlDPr2Zmz5hMpT9/X2Tzr6lNseZQsYzmGmXpArJlwuO6EzgrsoZ31St1HIjsYOBUvTSGG/ffG+NJLbefMRXmgnFTittl1io8GL1yuqXvSjKwbf8vCPlTJJkkif0cUYHSXBqKPWnXgGSc/ghWsbibnncQ10E4EHrIIm6RMF7X7O8nvppLDhrLC0nui9hd0xIj82Mk46Ef3aAlwTbhIVKfRD72ZKHzFBzLAg+vCNgrL3yYAR2k4qSZ2cdgkptHFGpZAaalpiJLCEFCF+uJfN/rw/f9Xr+D4pSKbl6fmCcvkbpJNrcaj3xADNIFYzItrCeJFAXswzgRXtCjvL0fVuK4+CSNUjLrICIpCGhAJMfphmXLWJsg2Srp4v10lIkEKmZy3FbUTSILsyfL9LEpae+PhtvnuxuT2dlZ/8NWeRjHTyvR/P7nKiF6cv5hN0QTa8lA6Sf71Hdgcv4BEu0kgnGZhV1IuYlX0u9t9YnDSQCywhn3T7MAkfjVM5QW51/BigSgBRwDskFpK09jHJas29UUE9CEL6X1Wmu+TCHkv8n5h5Q5czYetJXMNHO7bLY0xh2d+oK4KHRudNvCAWppx3rsQPHPLD3K9iiJaRPCrwmPL9ND58aMYc2bk5dmwrjZ0lrHptVqNZutYy3uhDKK3r5NKT35kZSefAelz0pUJsp+T3hImV8Sfwg+ExBGgSw1ia1Ua4HWLk4SYqtRMo6rTveic25M2kocmGSEksFJeWj63Pc2tM6DoprdAlXHKSS/7wAlGguwRlVfbfXJHIckjq9qVY1PS+dHRtyj1nfdjOyUO5sEfNu4u1QoJWZ1U71/mwTPFJde3t7eXwixuy09ZgN+fnMIXQz14Ksm0854mnRYRZDJu+Bx0lfkoSYrcvMm2BzPhpLKuMpVkkiq7Fa6kFmA7OsbpeKzdGWl5e/OxmNjODWLMJVgtgGh69T0v0IcupRevAPpsgJptLJB+UmRQatVWoF45UVp5XkC87TKXfX6hDd1czk6/S3PCTVMJq6ZUaqWwaV/YpcTbOfeQOyfIFzRIJARW3Kf56XE6YgbFg1JMRbJlxwi5O+rlZMvxTd2B/32Dq2FGrIeJYNMuhPlhdbUXtZwVEEaQ70oAkuqw3K5lDn6cTNuZI6b5VnNLlXP8uFNu0TQM0AIuy77jKQ44o58p0SKQQ/nyUajkMy3DGG+d/zCmZ2Xp7Jw3Rl4VIrRgh/Zpw36k+mdxV4rna5YwWH0h8ri77j3G6W1IuuwIFHDoSX9NnAAIZtg7jG+J6b9pupyYp5fnZsXxm/3H0IdFFOMPheQ4FEoiC0X9+Vzx7UPnDcVoW5IxGfGV1du5FB/L7Ruhsb019H4wrwazM77MtC0QYnT1eG42pHb3WG/HFuzUJ6u5uPL3Xkw/E2SlsWe7rBf0/rvHUqi9LBvTs//HY+y1Y38lZA8kYnv6OiZvoUjmIUE5jhcwjyirqA+HB2BYGlAAxksrCXmITyRVQ5EgdwUSwIL6mMXFF2pqlgKXGA+lwcTS1T2qdMTalLatgrsUbeTEMsYygrLpJLJ9xtF835oTM3/b/nUkfhAIdUM3mtm5UltrW4ObJmnMopanJbG86gCnaUPLemvsWefvNQE5przRdnVzaFR/Q5tJSOupypW1j0EfeA+vYK4Ypb19+1KvgZqP6yU7fMHumYStx7JMuo+prYXwkqRpxy+qh6WPtxKBgeYA7r5cr/rUHcfgbVkn31AY+CMiTfyow5GVttoDH9/9aq6Wwh8Lyr8aK//s0Rflz6q8i8ePt4qwT0wKcRcgju7/yUNF/rpe0UBSD3HXFCXBFgs23oUcj3uCeMnu9Rz9n2b3BArip92pO6tllFAPh3AVog8yjnjWpykie0QzSdCYpX/USlMoeum9ko7UcDdKWn7l+e1nk1uBMeW+GUdEL6K5kUf1h30zeloNGirSWcfs6XoS+YRfZlBo/zhq65uLmanxnhoTI1J0cIkR6NSv9f3sENAzdCDuvnltytjLA/HcanaJqVQd/R+uZ4VVZJZNBBH0F0SayXxfSZgYR9SfkFS7xIB2Lfj75ZwwaZc/l3DgjMPciYfhzCnPuZrWDDXJrzc6yQkQTzCRLIAQsQXfB0w6gtQFEDXEJP0Jv7cYxby5/6ALFBkKVk1nE1K508pjdsEnVLTRXnXKft6ekYBZQdbuoFu0VU9Nku49dgs4d4HGyci4qU2ttx4JmFiR4wPFluuLdgxgTTYVK6kngORLxsKQCwzmnpLXNBGySLV0URaotodjY3RxBxNzGHn0tg3SCszxYIuKb/szQWpnMa+pGthpegLvPmEpowLysD73B4g5ptM4wCxDzaMysD5Irm749sXycmdjiVJPw+6pKaP+ca5UizOnSCZgx+26CJB3IGJes6esWYJ8vqBajoErtyG7hYhHgBXdqaF9ZfWLWf2mbaKB5h7duCo3DiUf5Iecz/THMxn6XOW/mXnPHb3tnp8sNU/aUJL5uhmKW/Jq0DdxXKgqo7T79WsNE3K3geaBVV6i2cui8i3BGU+pHC5zSRw46ho7JOYJldNwUxbCpHkE810BxBaMO5hAY83G21MAhZSwfh6u32z2WhT7Gy3j7OJJ7o2QKm1CfVr7bL2sYn+8en5gV1Uv2wq2X2Pi1xbPIZIJupSmKbFbNJW/7njy2q+B4/a0DzkulIZOWTJYfNpyK7ktorEpxyIBVmq8ijsH/34r0/bXWfe1mpTap0z1yX8EvvYybT/l9Kq5bLIjrvshFPkJaz+ZZVMbgLG4+eYB322Hu6wNdwxfjxuQlYG3X7lQ1DcQk0Rtmpe5JFRfbwo3qrIFxcPeI2iLjLae1fc9ookuyacU5ukQ9BuPE4vD4xki3jWHxhXnekv2Zuu0lSTT81aOJqtv26acbdl+kRalcQhd5LcJ1hkLZPhShlZYgUWFvDzz2CMzuDduzogGwucyOFNfCBujt+UHnUE7lpeK6lPr34DCyxLDGN01tg2juSf/wQAAP//M+NLdjUsAAA= + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xae3OjRrb/X5+il+iOx7XBeB5JJt5oqjC0vcQSKIAmMzdOUS1oyR3zUJrGtnbs736rm4dAAiTV3Nr8oXHgnN95dJ9XN9/9Q5mTWJmj9G5gWjr0DFOHn0fD13dJymIUYfAMGCIhkH3w9jQnMdUJrFGcDsgC/PEHGFoOGI3AULNsaDme5QhC8Oef/wLsDscDAACY2sYn1YWeMR0NX5MVQCC9Sx4BZnfn4BksKV4BeZqAExJjBm5v/rgNzv7858npAIcp3gWodJQN8Az8jAE5OAEnQF68OR0syAC6mu5NIbS9mT0eSXeMrdILRRl+3aC8XLx99+Fcykm1sQFNt5/4p5+lwcBP4gVZZhSrQUTiWYrp69OvQj//Di0xkCGQ33Ct+G8EzoE8AT/z/4A0/KrqE8OcOdB+keos4fa7l5ocB/sUs7SUok4NB9qfoO2Vyt3AL95Udf89khTMfOU+m2MaY4ZTxceUpQpakRTTB0zP7vE6l8uSzL8TQjvRKg2jJADnP56fH0iePMaAJgm74D97eQSTph5si49ajGjhb9W+j25H7XZiQS02zJGLgJkfdK5CH2CrJQcw5IEp/0fYYjkTy/FmtvEiNYOyNJ2rd8F/DkRfkJoritg5xhV+SHDMulzRAdjtin0MO6u7h2tjm0ghx1i2wpgOv27y6UuXka3I3Sb2kX/rWvdhN1a62BQatF3jytA4+YGb3qesZ9NvA2654f37gxnaV7qba2cXH2NbsYs7bOsA7LZtH0PvLu61TSzwMZbt7uIOI1uRu03sJ283sItHMKWYgX8+iT+xf5d0FpwXCTyDOUrxj++BLAfYTwIMPh5U00rcZkXoBuwuMyVSR4LthjwgIzewdzPbHuz+BNrAFsuxH7Evp7T5obbAh/mhYxe1+eFw7P4Q3PUDJzzEES2AL4MBjtOMYnuqvT4FeXeXrlOGI5+FMEbzEKtx4DBEGaArf07iADw/A/xEGBhC2/acL44LJ5o79hxXtV3vSjXG+1DklCF2CE6lnpoFhOmFhnml4VEz0w1X96CpXo6h/iLx9p/RDDdLTpciiGMeZk3V/pMFQCsGQpIyIMskThkKQxyUs8NJjnnSKHhoxbwlZt4qo0sM3p6Dd+fgzdvzUv6rssKJn3rTfZPNcYiZI4qXhimrVuhmdgnH0D22/7vPAbdbwC00vk2OgBE5WeAkKxynaQiWOKYpAnKSMTDcoyp4e/7+Q4Ob4r+BHONHID/9cP4zkAO0TsFP787PgXyP1/sBW8VWNgE5zeZ/AUnRzFFRW/iUuDXu3HxId3x98HTQ1VV2AbW2W3uJd8pUD8f2zDa7HBvaESPbbtntBGutuvup+wa2HZbcnP+d2dD71bHMDiPQfzKKz/5Kk3hb9yZn+4zZQbOr5w5hV0fQsj7dSXvP8rd0GZWXDmoyWpdB/HAKQ4NcsqkZU3VcliMHajZ0R8OveygU5fZWub29vX35f8OTOJ6U4/mIgV9+AdC6KozaWYA8bCU/TLJAugCSyjfCNJuHxNfEs+9zAoZjFDND0Ay/utBUTdcz9JfyfZrNU5+SFSNJXFI5s0tHs42pa1hmnRahQBNxXxF2GdXGlJ+u7GHMvVExU5wmGfXxNU2yVc5qQ8ea2Rr0rm1rNq0ow8RH3IacaGxpKte+ev0QuesVzl9+mnjulymsuyDGzEQRrsw3azqk2M8oYWuhw4bKhO7vln3DNZ7ZhvtlS5+HBuQnw3Zn6tgrmBpU9q6NW+Reh800yRh2ebnfSLKtmQs9l/cKFd2KkgjRtfqASIjmJCRs7dS1m9rGRLW/eOon1Rirl8aYm+PUXFAAOD4KcSuno6lj2GARG3NKkwcSYHqJ/PtksZgkQcGnja2ZPrWtT4YObe9S1W6sqytvYumwF0C6AB28Lz1cNmaU4LSb2bOhaxvQ6QOBT6skxjHrQYGfp5YJTbcPRs9ouU27YPSZne/dHphfCWOY9oD8argutFshbMTwmESkzRRbdeHYmBjtNlScv02dPmbvt2m7LyuAy8y/x70KeJcz7Qbu1+N3Shjep4z3u2248BCV9sPlejURsxRPUIyWODACHDPC1vCJ4TgtF3rmQG+imuo11D1Dh6bLAwx+dqHp1BY6SzFV05Qs4w2OoecBM3Og7amOY1ybdYxans1SbPBOPfbxBDMUIIYq2YbpuKqpQW8CXVVXXfWlypoouEQhZ6LOfVYmT1X3LtUx57A952ZWyQhIyrONlbF5ksWBY6qukNHk0A2Hpx/PmrmX1szUPU5XSsRPfpgFeIJShukVTSKHoThANBhfCij4WRvPdO4ux4W2d2VbEz6lmLpq69748qXKSPnyfUJZWMtHNxPHq9bskzobu0XjW7BF6IlEWTSumW1nIdaSrIjsifrZmMwmHreoMsiejaGnWbNNaJfib/C6FH7/IZV2337CtNgFEm++oXVVdU7yU+1EcU9OBCMgPbzdOWXkQAGQCZCUvpylBBLobhwPw8ozzgFI5Slm94DXGEI006iGEPFDMaNrPwo8svAWiIQZxWKQ/AG8/QFESbCiyRyDOfVizBYkZJg2h9uJxZ14CTczuugj5RhIdSYJfMwtiZIgC3Eq81A4C5Q6zRnXsmmMZhrGVJTcNFd5y5byZWVTtbxlLZ+OZ9eGmS+pcF3LqkYPYKiZhndpmJ5u2Mqbczn3MhclxnLxWrPMK+NaUFSseYfPG/xtkh2QimdXR2tsaF/yowbJRyHxkxYtqx1zkn4nRaK2S3NKgiWWqv9nFMXpClFeOb9bnhyhFA771ZL4sne9jJNY+BW8erWFUcbTCDR0+29bJ05C8v+UdE5iBc+Z2DhAZiBGDMhyRZ8fmORHRFoSM0RinrZiRiK8s880y3RVwxRZy3SNCcxtDRL/nm/6HTNzWF28HtQ04yJLofnbSpRuaTe8MBQ9PPwMtfIgyRjDkYiq/DgqKP5VcvFnfNImPj4LFPyEfS9liLJNkD0iwrxFQr0FCUXMn4M3YNgvrRn7/In3u+pq//a46dbMFcC8sPKokNE1yDUB9UvgulETnuW9q7F67RTHZPoRVvkhRtSLeDXxVjRZoaVo9rxFiJbpxtDNTf4/9tzk73VLl74H+aVY6gJKTJcbW3PjlADhKIlr6T2hgAASg+HrFP8N3giFTv8FgqSRToCcVjpWwGA3xgD4629wcnYCfmkhf/UKzClG921RI4QMCZC5Etwnbdh7PZAnmuLYs4r7EOMVeLMtNUhi3HvUWmythufL3ds8NW7uONNwLbtaPa6evW/PyVESE5bQM54D6P7o6RZy0EbpQDlUySJAjlfzYAX7l6TprcNO5DfZr+hfdo5KdXglGsxayOAFb0fLo+Mee9swDjKUMxYFJpf7gKgSknkpU/ybNyR7xNdQDpbMVS7KSlnm8oOiQplkxfJ2UPHLOlUdpJ+ldwc4pBv9m/ZBoUMTozo9bwnOqgbme2CM5jg0k6DW2I3VSzj2TEuHzgFOCDmAHHOEfkd0wB5kfYN3X4w2NNoboF3Q37QqNRWOuSb7NclojMJqJb5uUrk4MXZYQtESj1Z89koZb862KYRCE/Q0S/HozfX266K36nx/ldBHRAM3cdZpmCxHa5zmJC/gYzFVlM7+K9c12BT/LmcUHHLJcYxHbj6kvDGkSTgNUdxoCoc2vLQs14a/zQwb6hyVt4mmVY3nYhJhNOMN81YNpZhlNK73Cr3TWZ5X3DF4+1EJ8IMSZ2EI/DDjo75M4kWyFX8fHB7vpmFeV9uFj1MUIyay7gTFZIFTphPayL4T1TSuoOPqhj3ankajgicfz6L7gFAgr3LVanxc0CMluRxN5MuGiDz/CAF3SYSVYdUuKmdc2hahiLJhg1GpZeGmGhVJ7cZm2MSqXcRsJF9s/mwDOpC8Bs974p/4pNgGVo2RLaw7Fz8DWZYHaEWK444L8PBmUCx8ejGQy01wkWNjysiC+IhhGWXsLqGEreUAMXQBbqWhptbv8G+lQiJ9wPSiUbrya54BADGKsGAtD41+081bic/lDD+xXIH870KBQptdFlBMC9uvZBREJBYEXcIyysdAuRS0S3FP4uAC5HttwIUIxdrgatKEMoXSRODXnFe5rOaUXdcVjPd43cpwA7/cSgMJfOxcaflpc8qh5a5TM5akPgoxVYMgiavQ0cYzYYU6cy1xJ2B7qq435or6yRHnTZUyQ6AKVA7wKkzWEY7Z2RpFYU9h6pV4WHUqjr3S519yVxn6x+fha7Gxhz03XNUF5OnzUtqjSLsgB/sHSMqvxL5FWprNGzY1r/e+Bbm4XayBVzeM3wJLlxvA5t3XUajVrlU1Q0viGPss2dqwqiYObEyo8blDs6E4WVfHzmj4ekVJzBZA+norFRsjuJV4+PxPeit9D8qn+b1m801569p82rxrbb5DPiMPWCdUKLmGcbBKSMxmNMzpyi/lw2RJ4rOI+DRJkwVL4pDE+MxPolvp+9vqvjS/jaCdKJF4L+KrPKCKlFZFrila3ZVXlKXSJcySvzx7JHGQPKZnMWYFRvp3OKkkHKKEn1Bcx7n48P79uwJsicIQ93ikeL9jSPRNGii30osEpL4E0Ps6j1pOUkWEoG/GXv0rhkdwftr8xKj8SOgerwFN0cX7859/LL4aypvm+sdD7378If94KMnYzhl98a2LT3h9yuOAk56tcJR/SnQIA3+Wc5RfF41mjuK4I01VxqMHQlmGwnIOtXYezLafaObWE6klJG/gl9HwdeGjo8x6PD9tC3Fou8cBbsx+LBeoCbm/wjUBDy9uXXKOrmsUB+nH52FnthPJtEvabnY2UYQ/Pm+l5qMwuFN3FYK2exTKPV7vgNzAL/0Ym6ogPplJQuKvt4qC+Nomv1A4ZHU5jrwSQK2LW/ddlUk/Piv1epAq27lBaXz8kiqt7m7XVBxgfQetq8H/BQAA//+oI6qBejYAAA== + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUz07jPBTF93kKq2LxfQsnbalQEfIChsBUVAwiRSxKFTnOJbHq2JHvTWlnmHcf9Q9oSosGhJfX53fOkWXd8Z3VNAnOAZXXNWlnxVWTgQEKvjmb6+XkRlIZzzUSiqhBHxmnpIkybaPpRhoE4wT8TCuYBLeAJD0JaZ7kAoPYzrR3tgJLF9qAiIBUlMOjbAy98kmjFCDGc00JSWpQdHqHQTwHlSy9bjyIVV4msWSRqymSPxsPkXKWpLbg8cUqxHIPV01z7RmvWTSTPjI6e03+iFZZ/V4XrlhLP7IxO/ivco0l9swKDzV7aL1NemixZ/akGDf/M26AtdmEnTAqwbJ17ArnPNM236m5Ozhhj7q1r/3GppJT4FhKD7tuQfAGxCWJC1RkGH9iFijU9awXkqpTD+Q1YFf0/w0p5yFEV8m5ctaKztFhv/eJqErOU1zYNJNqalzxUX6VWgGiLCBVDkn02p+kssYjiX77ow9jQRdluPnFYaFSKj1g2RG99vHR1zy6ot857n7N43DzdNsufG0DGcnMADJOzMrlXzEaaa9U12+ltqnAa7UDvbMY2EPANodzsEszjuBn4LdurMuBG5mBQdE6+HV1dxYP41F6/eM8ToenZ/Ew+d3aAmaiy7YHzjQV8No0hbY81369Z5Y1vAUCjNaKtQD/Yg9e0i6Gp5fJvovb+HKQjOLbVZ894/vB6Hs6Oh1cj5IgGA8skjRmEtxLS5CfLUTVGNK8QfAhSV8A/QkAAP//lvMgPnQFAAA= + + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + + + +- path: /etc/systemd/system/docker.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 --bip= + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/docker/daemon.json + permissions: "0644" + owner: root + content: | + { + "live-restore": true, + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "5" + } + } + + + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS=--address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroups-per-qos=true --client-ca-file=/etc/kubernetes/certs/ca.crt --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --enforce-node-allocatable=pods --event-qps=0 --eviction-hard=memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5% --feature-gates=RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y --image-gc-high-threshold=85 --image-gc-low-threshold=80 --kube-reserved=cpu=100m,memory=1638Mi --max-pods=110 --node-status-update-frequency=10s --pod-manifest-path=/etc/kubernetes/manifests --pod-max-pids=-1 --protect-kernel-defaults=true --read-only-port=10255 --resolv-conf=/etc/resolv.conf --rotate-certificates=true --streaming-connection-idle-timeout=4h0m0s --system-reserved=cpu=2,memory=1Gi --tls-cert-file=/etc/kubernetes/certs/kubeletserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-private-key-file=/etc/kubernetes/certs/kubeletserver.key + KUBELET_REGISTER_SCHEDULABLE=true + KUBELET_IMAGE=hyperkube-amd64:v1.15.7 + + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,node-role.kubernetes.io/agent=,kubernetes.io/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/line16.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line16.sh new file mode 100644 index 00000000000..e51aeb3fea9 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line16.sh @@ -0,0 +1,160 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser +cleanUpContainerd + + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/line23.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line23.sh new file mode 100644 index 00000000000..1f074afdfa1 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line23.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/line30.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line30.sh new file mode 100644 index 00000000000..ce857cb431e --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line30.sh @@ -0,0 +1,337 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + +} + + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/line43.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line43.sh new file mode 100644 index 00000000000..e708f006a14 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line43.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S115/line9.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S115/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/CSECommand b/pkg/agent/testdata/AKSUbuntu1604+K8S117/CSECommand new file mode 100644 index 00000000000..231d98c7ae9 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.17.7 HYPERKUBE_URL=k8s.gcr.io/hyperkube-amd64:v1.17.7 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=southcentralus VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME= CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=true GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/CustomData b/pkg/agent/testdata/AKSUbuntu1604+K8S117/CustomData new file mode 100644 index 00000000000..6be3c4ee4f7 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/CustomData @@ -0,0 +1,158 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEorCTDmE3y3qoaiLXyvlb/+mE8iXSM7mrwrAM1baW27f7y+IfxpC/oFQpp7Fyc9aOh9yEwlTjuhmedSuFdzYm9rP01Pa+drz84Cg+jTVBhopDAjGcEGdBFPEPIBa70lpHGm5wLpbUKVOI2WZCIplEa06wQeE/ifhgGWqWsCum8+SM3dIPowvWDgTeqIDLcr+8QoweKHNbtVaKTJSR9uYGdfMcc7Bmh08lwGA1GQdgdDqN7WVzneJ2jQRBdnPXvatABnZ/t9GwQTqQuz4uzPhAqleAwLRQYstkRcsYVpLxgpLaD67W4YZiRF0an3mTUr14au8Gb+lwV+aaxJu8no3BSaSx9D2mvdmrRLq5qsOLjMud9zOUmPKXTHyr18RbkJc3zsklzZARZQlGuji879BEYq+XIpG6mglDV10eXutZp9Qum6AJ1S5U7I1TXXFyOs2JG2Xr1vJhihqrLyPmSDyoYnv2IgAwGf9wrO1xgrFCr+RgzmqJUfSosqxR6iGtNCOdvZOVXbzSwrNWpJbrlr3/xQrC4RLg7TdXUPDqS+tuu2DYhrQRbr+3m6xO0XzWPEnt69Hvbjlsn7RZiu/kaEd6BI2+lMy2kc7XQf0nJQs78KioUzZyCTSkjmxlkOW+0juifv9zKn6wGDqrEEUlDc3626ouUWmJhqF3vxpoJc9XQ0W4Q5+QkR0E5ockTCi/OVTRDFeWFmCG0m3DUhFa7CXEeJ3Ns2xqVphNdEtZFdzjod8OBN4pc3+80re54EAWuf+H6UX8URL4b+gM36LSbKwQVAWO704EXjVzQq1hhRtll40UVznfVmYnZd4NO/WAHA0P96+6Td3pABiYzzi+LfFtMw7k7tIIw6NT/uYIbhKZNmtsxWm76rgnni1rr+E3j+KjRap80Wse1F/CwAO8FyrDd+Zsgqtjv/m/iuwbs0PPOJ+PotDsYPpx1H6Fpl46UbhisItrzRqN1WH9vVr17WqbK73uaW81mlcl/mLLq4WXOErCv/tqRMXj16kjPqI+Ii9FqImKK1/DM9kBzfx7YLwclWA1KL6Fc0FTPOEH98GrBgrJC4b45tn5o6ru4JueFIvyagS2gBXu1n6CuOFeLWFz+0R1SVtx0Z8gUFGzOM7K806uZfZpqp5DCyeh0zRvyVipckAaJaXa7VP9YDOsXbK1nHqpQvlQhpYzKOZKGLg6QKK5Q6BGGYaLvRUjmmFxCwgn+owb17Sz+zZMYGdn5IF5cEirAzne/R/QQoHiRzB/9urdyCXFxk15fa67eHFpL2rlsZHwGe5ZVDjL3XLCeu96p9f8AAAD//xsNyOSCEAAA + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9waaXPaSPY7v+JFVm2uaQmcxJtNhuxikD2UMbg4MplNpVSN1IgeJLWm1XJMCP99q3ULhI9kNrs1qQqG7tev331JR4/0OfX1OQ6XjUa3a06M8ft+1zD7Q3N6edXWWSB0/CXiRLeYLzD1CQ91y0IBZzdrLST8mlpEo358dtS9MKb3PcqsFRHxyWHf7I6GZ/1zs9cftxWdCEu3fKr7RGi2Eu+f9ofppsQqN+fUT7Z6o1+Hg1GnN9kBsNln32XYDpVGdzScdvpDY9yrh87Is8uHLl5PaqFX0ZxwnwgSlqFnp7PhdGaOjYHRmRht9Ykbzk1OXIJDAogDCp82Gpx47Jpcsvn6yVPYNAAAcCCQQwQEEXcIoDV4bL5GxHeoT5Lvlksb2+xsNyf1DgwFT/Kw5RLsR0FxuueeUZeEORLuAeILUA9JSiKhfiiw6/ZIUDpHBF9bnm3ShbnA1I04MX1mhgKLEFrHTXgFx6/AirgLaBFOBrAUIgjf6HqArRV2SKh51OIsZAuhWcyTqlhQR4/mkS8iXd1UxbrNj6H8mDQnW7PJHN6BLrzgVpCvX4HcUAGqMR6blxPzajzqmT3jNGfWnPYvjdFseog5OJEstZpgBysHEP2OK68uzs1Or2eedfqDTJEe5qtfOwPqRzcdh/gClsy1s03TIcKMAhsLUsXauZqas6teZ2pUyM+O2DSU5xyO7ZqDvf5Enj4fd3rV4wvGYxQpc0B9wAG2luQYRYK6YWx4gmM/DBgXKFYszF02X0Qhabe0ltYCCyOLcEEX1MKChGCRYIks5nnMB8vhLAqQSwUBiy7CFKvFfF9wbK3Akp8unSMe+YJ6BMhc4LlLQiBiKRhzQd4EDhXguFEoCF+E0ltiuQkWAF3Enz4VKFyHgnhoSdyA8BAoi3cCziJBjoEGIRFAgxT973+AS+cB9lDw+Y8Iu1Ss44XsB5KXh+CxyBfgy0sTjgLqfIGQWVhAuA6lD4DkhMS3gGQPuVK1cPMlZfYLDd6CzWJ5A9AFPMq1lnobvGhCC06aTVBLyngLYkn89BjA7yziPnYt4QJCPkMBdggHFFXO5NBV/feHk2lnMKioHmBB4y8285NzdAEfP4KibjqzXn/aM41h53Rg9LYKtNsgeETg06cKUbfzgiObCvu+XCTQ30L/gpYC1/nVrMfpNeFF+PJWNuWAAlDPr2Zmz5hMpT9/X2Tzr6lNseZQsYzmGmXpArJlwuO6EzgrsoZ31St1HIjsYOBUvTSGG/ffG+NJLbefMRXmgnFTittl1io8GL1yuqXvSjKwbf8vCPlTJJkkif0cUYHSXBqKPWnXgGSc/ghWsbibnncQ10E4EHrIIm6RMF7X7O8nvppLDhrLC0nui9hd0xIj82Mk46Ef3aAlwTbhIVKfRD72ZKHzFBzLAg+vCNgrL3yYAR2k4qSZ2cdgkptHFGpZAaalpiJLCEFCF+uJfN/rw/f9Xr+D4pSKbl6fmCcvkbpJNrcaj3xADNIFYzItrCeJFAXswzgRXtCjvL0fVuK4+CSNUjLrICIpCGhAJMfphmXLWJsg2Srp4v10lIkEKmZy3FbUTSILsyfL9LEpae+PhtvnuxuT2dlZ/8NWeRjHTyvR/P7nKiF6cv5hN0QTa8lA6Sf71Hdgcv4BEu0kgnGZhV1IuYlX0u9t9YnDSQCywhn3T7MAkfjVM5QW51/BigSgBRwDskFpK09jHJas29UUE9CEL6X1Wmu+TCHkv8n5h5Q5czYetJXMNHO7bLY0xh2d+oK4KHRudNvCAWppx3rsQPHPLD3K9iiJaRPCrwmPL9ND58aMYc2bk5dmwrjZ0lrHptVqNZutYy3uhDKK3r5NKT35kZSefAelz0pUJsp+T3hImV8Sfwg+ExBGgSw1ia1Ua4HWLk4SYqtRMo6rTveic25M2kocmGSEksFJeWj63Pc2tM6DoprdAlXHKSS/7wAlGguwRlVfbfXJHIckjq9qVY1PS+dHRtyj1nfdjOyUO5sEfNu4u1QoJWZ1U71/mwTPFJde3t7eXwixuy09ZgN+fnMIXQz14Ksm0854mnRYRZDJu+Bx0lfkoSYrcvMm2BzPhpLKuMpVkkiq7Fa6kFmA7OsbpeKzdGWl5e/OxmNjODWLMJVgtgGh69T0v0IcupRevAPpsgJptLJB+UmRQatVWoF45UVp5XkC87TKXfX6hDd1czk6/S3PCTVMJq6ZUaqWwaV/YpcTbOfeQOyfIFzRIJARW3Kf56XE6YgbFg1JMRbJlxwi5O+rlZMvxTd2B/32Dq2FGrIeJYNMuhPlhdbUXtZwVEEaQ70oAkuqw3K5lDn6cTNuZI6b5VnNLlXP8uFNu0TQM0AIuy77jKQ44o58p0SKQQ/nyUajkMy3DGG+d/zCmZ2Xp7Jw3Rl4VIrRgh/Zpw36k+mdxV4rna5YwWH0h8ri77j3G6W1IuuwIFHDoSX9NnAAIZtg7jG+J6b9pupyYp5fnZsXxm/3H0IdFFOMPheQ4FEoiC0X9+Vzx7UPnDcVoW5IxGfGV1du5FB/L7Ruhsb019H4wrwazM77MtC0QYnT1eG42pHb3WG/HFuzUJ6u5uPL3Xkw/E2SlsWe7rBf0/rvHUqi9LBvTs//HY+y1Y38lZA8kYnv6OiZvoUjmIUE5jhcwjyirqA+HB2BYGlAAxksrCXmITyRVQ5EgdwUSwIL6mMXFF2pqlgKXGA+lwcTS1T2qdMTalLatgrsUbeTEMsYygrLpJLJ9xtF835oTM3/b/nUkfhAIdUM3mtm5UltrW4ObJmnMopanJbG86gCnaUPLemvsWefvNQE5przRdnVzaFR/Q5tJSOupypW1j0EfeA+vYK4Ypb19+1KvgZqP6yU7fMHumYStx7JMuo+prYXwkqRpxy+qh6WPtxKBgeYA7r5cr/rUHcfgbVkn31AY+CMiTfyow5GVttoDH9/9aq6Wwh8Lyr8aK//s0Rflz6q8i8ePt4qwT0wKcRcgju7/yUNF/rpe0UBSD3HXFCXBFgs23oUcj3uCeMnu9Rz9n2b3BArip92pO6tllFAPh3AVog8yjnjWpykie0QzSdCYpX/USlMoeum9ko7UcDdKWn7l+e1nk1uBMeW+GUdEL6K5kUf1h30zeloNGirSWcfs6XoS+YRfZlBo/zhq65uLmanxnhoTI1J0cIkR6NSv9f3sENAzdCDuvnltytjLA/HcanaJqVQd/R+uZ4VVZJZNBBH0F0SayXxfSZgYR9SfkFS7xIB2Lfj75ZwwaZc/l3DgjMPciYfhzCnPuZrWDDXJrzc6yQkQTzCRLIAQsQXfB0w6gtQFEDXEJP0Jv7cYxby5/6ALFBkKVk1nE1K508pjdsEnVLTRXnXKft6ekYBZQdbuoFu0VU9Nku49dgs4d4HGyci4qU2ttx4JmFiR4wPFluuLdgxgTTYVK6kngORLxsKQCwzmnpLXNBGySLV0URaotodjY3RxBxNzGHn0tg3SCszxYIuKb/szQWpnMa+pGthpegLvPmEpowLysD73B4g5ptM4wCxDzaMysD5Irm749sXycmdjiVJPw+6pKaP+ca5UizOnSCZgx+26CJB3IGJes6esWYJ8vqBajoErtyG7hYhHgBXdqaF9ZfWLWf2mbaKB5h7duCo3DiUf5Iecz/THMxn6XOW/mXnPHb3tnp8sNU/aUJL5uhmKW/Jq0DdxXKgqo7T79WsNE3K3geaBVV6i2cui8i3BGU+pHC5zSRw46ho7JOYJldNwUxbCpHkE810BxBaMO5hAY83G21MAhZSwfh6u32z2WhT7Gy3j7OJJ7o2QKm1CfVr7bL2sYn+8en5gV1Uv2wq2X2Pi1xbPIZIJupSmKbFbNJW/7njy2q+B4/a0DzkulIZOWTJYfNpyK7ktorEpxyIBVmq8ijsH/34r0/bXWfe1mpTap0z1yX8EvvYybT/l9Kq5bLIjrvshFPkJaz+ZZVMbgLG4+eYB322Hu6wNdwxfjxuQlYG3X7lQ1DcQk0Rtmpe5JFRfbwo3qrIFxcPeI2iLjLae1fc9ookuyacU5ukQ9BuPE4vD4xki3jWHxhXnekv2Zuu0lSTT81aOJqtv26acbdl+kRalcQhd5LcJ1hkLZPhShlZYgUWFvDzz2CMzuDduzogGwucyOFNfCBujt+UHnUE7lpeK6lPr34DCyxLDGN01tg2juSf/wQAAP//M+NLdjUsAAA= + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xae3OjRrb/X5+il+iOx7XBeB5JJt5oqjC0vcQSKIAmMzdOUS1oyR3zUJrGtnbs736rm4dAAiTV3Nr8oXHgnN95dJ9XN9/9Q5mTWJmj9G5gWjr0DFOHn0fD13dJymIUYfAMGCIhkH3w9jQnMdUJrFGcDsgC/PEHGFoOGI3AULNsaDme5QhC8Oef/wLsDscDAACY2sYn1YWeMR0NX5MVQCC9Sx4BZnfn4BksKV4BeZqAExJjBm5v/rgNzv7858npAIcp3gWodJQN8Az8jAE5OAEnQF68OR0syAC6mu5NIbS9mT0eSXeMrdILRRl+3aC8XLx99+Fcykm1sQFNt5/4p5+lwcBP4gVZZhSrQUTiWYrp69OvQj//Di0xkCGQ33Ct+G8EzoE8AT/z/4A0/KrqE8OcOdB+keos4fa7l5ocB/sUs7SUok4NB9qfoO2Vyt3AL95Udf89khTMfOU+m2MaY4ZTxceUpQpakRTTB0zP7vE6l8uSzL8TQjvRKg2jJADnP56fH0iePMaAJgm74D97eQSTph5si49ajGjhb9W+j25H7XZiQS02zJGLgJkfdK5CH2CrJQcw5IEp/0fYYjkTy/FmtvEiNYOyNJ2rd8F/DkRfkJoritg5xhV+SHDMulzRAdjtin0MO6u7h2tjm0ghx1i2wpgOv27y6UuXka3I3Sb2kX/rWvdhN1a62BQatF3jytA4+YGb3qesZ9NvA2654f37gxnaV7qba2cXH2NbsYs7bOsA7LZtH0PvLu61TSzwMZbt7uIOI1uRu03sJ283sItHMKWYgX8+iT+xf5d0FpwXCTyDOUrxj++BLAfYTwIMPh5U00rcZkXoBuwuMyVSR4LthjwgIzewdzPbHuz+BNrAFsuxH7Evp7T5obbAh/mhYxe1+eFw7P4Q3PUDJzzEES2AL4MBjtOMYnuqvT4FeXeXrlOGI5+FMEbzEKtx4DBEGaArf07iADw/A/xEGBhC2/acL44LJ5o79hxXtV3vSjXG+1DklCF2CE6lnpoFhOmFhnml4VEz0w1X96CpXo6h/iLx9p/RDDdLTpciiGMeZk3V/pMFQCsGQpIyIMskThkKQxyUs8NJjnnSKHhoxbwlZt4qo0sM3p6Dd+fgzdvzUv6rssKJn3rTfZPNcYiZI4qXhimrVuhmdgnH0D22/7vPAbdbwC00vk2OgBE5WeAkKxynaQiWOKYpAnKSMTDcoyp4e/7+Q4Ob4r+BHONHID/9cP4zkAO0TsFP787PgXyP1/sBW8VWNgE5zeZ/AUnRzFFRW/iUuDXu3HxId3x98HTQ1VV2AbW2W3uJd8pUD8f2zDa7HBvaESPbbtntBGutuvup+wa2HZbcnP+d2dD71bHMDiPQfzKKz/5Kk3hb9yZn+4zZQbOr5w5hV0fQsj7dSXvP8rd0GZWXDmoyWpdB/HAKQ4NcsqkZU3VcliMHajZ0R8OveygU5fZWub29vX35f8OTOJ6U4/mIgV9+AdC6KozaWYA8bCU/TLJAugCSyjfCNJuHxNfEs+9zAoZjFDND0Ay/utBUTdcz9JfyfZrNU5+SFSNJXFI5s0tHs42pa1hmnRahQBNxXxF2GdXGlJ+u7GHMvVExU5wmGfXxNU2yVc5qQ8ea2Rr0rm1rNq0ow8RH3IacaGxpKte+ev0QuesVzl9+mnjulymsuyDGzEQRrsw3azqk2M8oYWuhw4bKhO7vln3DNZ7ZhvtlS5+HBuQnw3Zn6tgrmBpU9q6NW+Reh800yRh2ebnfSLKtmQs9l/cKFd2KkgjRtfqASIjmJCRs7dS1m9rGRLW/eOon1Rirl8aYm+PUXFAAOD4KcSuno6lj2GARG3NKkwcSYHqJ/PtksZgkQcGnja2ZPrWtT4YObe9S1W6sqytvYumwF0C6AB28Lz1cNmaU4LSb2bOhaxvQ6QOBT6skxjHrQYGfp5YJTbcPRs9ouU27YPSZne/dHphfCWOY9oD8argutFshbMTwmESkzRRbdeHYmBjtNlScv02dPmbvt2m7LyuAy8y/x70KeJcz7Qbu1+N3Shjep4z3u2248BCV9sPlejURsxRPUIyWODACHDPC1vCJ4TgtF3rmQG+imuo11D1Dh6bLAwx+dqHp1BY6SzFV05Qs4w2OoecBM3Og7amOY1ybdYxans1SbPBOPfbxBDMUIIYq2YbpuKqpQW8CXVVXXfWlypoouEQhZ6LOfVYmT1X3LtUx57A952ZWyQhIyrONlbF5ksWBY6qukNHk0A2Hpx/PmrmX1szUPU5XSsRPfpgFeIJShukVTSKHoThANBhfCij4WRvPdO4ux4W2d2VbEz6lmLpq69748qXKSPnyfUJZWMtHNxPHq9bskzobu0XjW7BF6IlEWTSumW1nIdaSrIjsifrZmMwmHreoMsiejaGnWbNNaJfib/C6FH7/IZV2337CtNgFEm++oXVVdU7yU+1EcU9OBCMgPbzdOWXkQAGQCZCUvpylBBLobhwPw8ozzgFI5Slm94DXGEI006iGEPFDMaNrPwo8svAWiIQZxWKQ/AG8/QFESbCiyRyDOfVizBYkZJg2h9uJxZ14CTczuugj5RhIdSYJfMwtiZIgC3Eq81A4C5Q6zRnXsmmMZhrGVJTcNFd5y5byZWVTtbxlLZ+OZ9eGmS+pcF3LqkYPYKiZhndpmJ5u2Mqbczn3MhclxnLxWrPMK+NaUFSseYfPG/xtkh2QimdXR2tsaF/yowbJRyHxkxYtqx1zkn4nRaK2S3NKgiWWqv9nFMXpClFeOb9bnhyhFA771ZL4sne9jJNY+BW8erWFUcbTCDR0+29bJ05C8v+UdE5iBc+Z2DhAZiBGDMhyRZ8fmORHRFoSM0RinrZiRiK8s880y3RVwxRZy3SNCcxtDRL/nm/6HTNzWF28HtQ04yJLofnbSpRuaTe8MBQ9PPwMtfIgyRjDkYiq/DgqKP5VcvFnfNImPj4LFPyEfS9liLJNkD0iwrxFQr0FCUXMn4M3YNgvrRn7/In3u+pq//a46dbMFcC8sPKokNE1yDUB9UvgulETnuW9q7F67RTHZPoRVvkhRtSLeDXxVjRZoaVo9rxFiJbpxtDNTf4/9tzk73VLl74H+aVY6gJKTJcbW3PjlADhKIlr6T2hgAASg+HrFP8N3giFTv8FgqSRToCcVjpWwGA3xgD4629wcnYCfmkhf/UKzClG921RI4QMCZC5Etwnbdh7PZAnmuLYs4r7EOMVeLMtNUhi3HvUWmythufL3ds8NW7uONNwLbtaPa6evW/PyVESE5bQM54D6P7o6RZy0EbpQDlUySJAjlfzYAX7l6TprcNO5DfZr+hfdo5KdXglGsxayOAFb0fLo+Mee9swDjKUMxYFJpf7gKgSknkpU/ybNyR7xNdQDpbMVS7KSlnm8oOiQplkxfJ2UPHLOlUdpJ+ldwc4pBv9m/ZBoUMTozo9bwnOqgbme2CM5jg0k6DW2I3VSzj2TEuHzgFOCDmAHHOEfkd0wB5kfYN3X4w2NNoboF3Q37QqNRWOuSb7NclojMJqJb5uUrk4MXZYQtESj1Z89koZb862KYRCE/Q0S/HozfX266K36nx/ldBHRAM3cdZpmCxHa5zmJC/gYzFVlM7+K9c12BT/LmcUHHLJcYxHbj6kvDGkSTgNUdxoCoc2vLQs14a/zQwb6hyVt4mmVY3nYhJhNOMN81YNpZhlNK73Cr3TWZ5X3DF4+1EJ8IMSZ2EI/DDjo75M4kWyFX8fHB7vpmFeV9uFj1MUIyay7gTFZIFTphPayL4T1TSuoOPqhj3ankajgicfz6L7gFAgr3LVanxc0CMluRxN5MuGiDz/CAF3SYSVYdUuKmdc2hahiLJhg1GpZeGmGhVJ7cZm2MSqXcRsJF9s/mwDOpC8Bs974p/4pNgGVo2RLaw7Fz8DWZYHaEWK444L8PBmUCx8ejGQy01wkWNjysiC+IhhGWXsLqGEreUAMXQBbqWhptbv8G+lQiJ9wPSiUbrya54BADGKsGAtD41+081bic/lDD+xXIH870KBQptdFlBMC9uvZBREJBYEXcIyysdAuRS0S3FP4uAC5HttwIUIxdrgatKEMoXSRODXnFe5rOaUXdcVjPd43cpwA7/cSgMJfOxcaflpc8qh5a5TM5akPgoxVYMgiavQ0cYzYYU6cy1xJ2B7qq435or6yRHnTZUyQ6AKVA7wKkzWEY7Z2RpFYU9h6pV4WHUqjr3S519yVxn6x+fha7Gxhz03XNUF5OnzUtqjSLsgB/sHSMqvxL5FWprNGzY1r/e+Bbm4XayBVzeM3wJLlxvA5t3XUajVrlU1Q0viGPss2dqwqiYObEyo8blDs6E4WVfHzmj4ekVJzBZA+norFRsjuJV4+PxPeit9D8qn+b1m801569p82rxrbb5DPiMPWCdUKLmGcbBKSMxmNMzpyi/lw2RJ4rOI+DRJkwVL4pDE+MxPolvp+9vqvjS/jaCdKJF4L+KrPKCKlFZFrila3ZVXlKXSJcySvzx7JHGQPKZnMWYFRvp3OKkkHKKEn1Bcx7n48P79uwJsicIQ93ikeL9jSPRNGii30osEpL4E0Ps6j1pOUkWEoG/GXv0rhkdwftr8xKj8SOgerwFN0cX7859/LL4aypvm+sdD7378If94KMnYzhl98a2LT3h9yuOAk56tcJR/SnQIA3+Wc5RfF41mjuK4I01VxqMHQlmGwnIOtXYezLafaObWE6klJG/gl9HwdeGjo8x6PD9tC3Fou8cBbsx+LBeoCbm/wjUBDy9uXXKOrmsUB+nH52FnthPJtEvabnY2UYQ/Pm+l5qMwuFN3FYK2exTKPV7vgNzAL/0Ym6ogPplJQuKvt4qC+Nomv1A4ZHU5jrwSQK2LW/ddlUk/Piv1epAq27lBaXz8kiqt7m7XVBxgfQetq8H/BQAA//+oI6qBejYAAA== + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUz07jPBTF93kKq2LxfQsnbalQEfIChsBUVAwiRSxKFTnOJbHq2JHvTWlnmHcf9Q9oSosGhJfX53fOkWXd8Z3VNAnOAZXXNWlnxVWTgQEKvjmb6+XkRlIZzzUSiqhBHxmnpIkybaPpRhoE4wT8TCuYBLeAJD0JaZ7kAoPYzrR3tgJLF9qAiIBUlMOjbAy98kmjFCDGc00JSWpQdHqHQTwHlSy9bjyIVV4msWSRqymSPxsPkXKWpLbg8cUqxHIPV01z7RmvWTSTPjI6e03+iFZZ/V4XrlhLP7IxO/ivco0l9swKDzV7aL1NemixZ/akGDf/M26AtdmEnTAqwbJ17ArnPNM236m5Ozhhj7q1r/3GppJT4FhKD7tuQfAGxCWJC1RkGH9iFijU9awXkqpTD+Q1YFf0/w0p5yFEV8m5ctaKztFhv/eJqErOU1zYNJNqalzxUX6VWgGiLCBVDkn02p+kssYjiX77ow9jQRdluPnFYaFSKj1g2RG99vHR1zy6ot857n7N43DzdNsufG0DGcnMADJOzMrlXzEaaa9U12+ltqnAa7UDvbMY2EPANodzsEszjuBn4LdurMuBG5mBQdE6+HV1dxYP41F6/eM8ToenZ/Ew+d3aAmaiy7YHzjQV8No0hbY81369Z5Y1vAUCjNaKtQD/Yg9e0i6Gp5fJvovb+HKQjOLbVZ894/vB6Hs6Oh1cj5IgGA8skjRmEtxLS5CfLUTVGNK8QfAhSV8A/QkAAP//lvMgPnQFAAA= + + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + + + +- path: /etc/systemd/system/docker.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 --bip= + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/docker/daemon.json + permissions: "0644" + owner: root + content: | + { + "live-restore": true, + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "5" + } + } + + + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS=--address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroups-per-qos=true --client-ca-file=/etc/kubernetes/certs/ca.crt --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --enforce-node-allocatable=pods --event-qps=0 --eviction-hard=memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5% --feature-gates=RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y --image-gc-high-threshold=85 --image-gc-low-threshold=80 --kube-reserved=cpu=100m,memory=1638Mi --max-pods=110 --node-status-update-frequency=10s --pod-manifest-path=/etc/kubernetes/manifests --pod-max-pids=-1 --protect-kernel-defaults=true --read-only-port=10255 --resolv-conf=/etc/resolv.conf --rotate-certificates=true --streaming-connection-idle-timeout=4h0m0s --system-reserved=cpu=2,memory=1Gi --tls-cert-file=/etc/kubernetes/certs/kubeletserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-private-key-file=/etc/kubernetes/certs/kubeletserver.key + KUBELET_REGISTER_SCHEDULABLE=true + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/line16.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line16.sh new file mode 100644 index 00000000000..e51aeb3fea9 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line16.sh @@ -0,0 +1,160 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser +cleanUpContainerd + + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/line23.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line23.sh new file mode 100644 index 00000000000..1f074afdfa1 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line23.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/line30.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line30.sh new file mode 100644 index 00000000000..ce857cb431e --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line30.sh @@ -0,0 +1,337 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + +} + + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/line43.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line43.sh new file mode 100644 index 00000000000..e708f006a14 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line43.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S117/line9.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S117/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/CSECommand b/pkg/agent/testdata/AKSUbuntu1604+K8S118/CSECommand new file mode 100644 index 00000000000..ba62a0deed2 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.18.2 HYPERKUBE_URL=k8s.gcr.io/hyperkube-amd64:v1.18.2 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=southcentralus VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME= CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=true GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/CustomData b/pkg/agent/testdata/AKSUbuntu1604+K8S118/CustomData new file mode 100644 index 00000000000..6be3c4ee4f7 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/CustomData @@ -0,0 +1,158 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEorCTDmE3y3qoaiLXyvlb/+mE8iXSM7mrwrAM1baW27f7y+IfxpC/oFQpp7Fyc9aOh9yEwlTjuhmedSuFdzYm9rP01Pa+drz84Cg+jTVBhopDAjGcEGdBFPEPIBa70lpHGm5wLpbUKVOI2WZCIplEa06wQeE/ifhgGWqWsCum8+SM3dIPowvWDgTeqIDLcr+8QoweKHNbtVaKTJSR9uYGdfMcc7Bmh08lwGA1GQdgdDqN7WVzneJ2jQRBdnPXvatABnZ/t9GwQTqQuz4uzPhAqleAwLRQYstkRcsYVpLxgpLaD67W4YZiRF0an3mTUr14au8Gb+lwV+aaxJu8no3BSaSx9D2mvdmrRLq5qsOLjMud9zOUmPKXTHyr18RbkJc3zsklzZARZQlGuji879BEYq+XIpG6mglDV10eXutZp9Qum6AJ1S5U7I1TXXFyOs2JG2Xr1vJhihqrLyPmSDyoYnv2IgAwGf9wrO1xgrFCr+RgzmqJUfSosqxR6iGtNCOdvZOVXbzSwrNWpJbrlr3/xQrC4RLg7TdXUPDqS+tuu2DYhrQRbr+3m6xO0XzWPEnt69Hvbjlsn7RZiu/kaEd6BI2+lMy2kc7XQf0nJQs78KioUzZyCTSkjmxlkOW+0juifv9zKn6wGDqrEEUlDc3626ouUWmJhqF3vxpoJc9XQ0W4Q5+QkR0E5ockTCi/OVTRDFeWFmCG0m3DUhFa7CXEeJ3Ns2xqVphNdEtZFdzjod8OBN4pc3+80re54EAWuf+H6UX8URL4b+gM36LSbKwQVAWO704EXjVzQq1hhRtll40UVznfVmYnZd4NO/WAHA0P96+6Td3pABiYzzi+LfFtMw7k7tIIw6NT/uYIbhKZNmtsxWm76rgnni1rr+E3j+KjRap80Wse1F/CwAO8FyrDd+Zsgqtjv/m/iuwbs0PPOJ+PotDsYPpx1H6Fpl46UbhisItrzRqN1WH9vVr17WqbK73uaW81mlcl/mLLq4WXOErCv/tqRMXj16kjPqI+Ii9FqImKK1/DM9kBzfx7YLwclWA1KL6Fc0FTPOEH98GrBgrJC4b45tn5o6ru4JueFIvyagS2gBXu1n6CuOFeLWFz+0R1SVtx0Z8gUFGzOM7K806uZfZpqp5DCyeh0zRvyVipckAaJaXa7VP9YDOsXbK1nHqpQvlQhpYzKOZKGLg6QKK5Q6BGGYaLvRUjmmFxCwgn+owb17Sz+zZMYGdn5IF5cEirAzne/R/QQoHiRzB/9urdyCXFxk15fa67eHFpL2rlsZHwGe5ZVDjL3XLCeu96p9f8AAAD//xsNyOSCEAAA + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9waaXPaSPY7v+JFVm2uaQmcxJtNhuxikD2UMbg4MplNpVSN1IgeJLWm1XJMCP99q3ULhI9kNrs1qQqG7tev331JR4/0OfX1OQ6XjUa3a06M8ft+1zD7Q3N6edXWWSB0/CXiRLeYLzD1CQ91y0IBZzdrLST8mlpEo358dtS9MKb3PcqsFRHxyWHf7I6GZ/1zs9cftxWdCEu3fKr7RGi2Eu+f9ofppsQqN+fUT7Z6o1+Hg1GnN9kBsNln32XYDpVGdzScdvpDY9yrh87Is8uHLl5PaqFX0ZxwnwgSlqFnp7PhdGaOjYHRmRht9Ykbzk1OXIJDAogDCp82Gpx47Jpcsvn6yVPYNAAAcCCQQwQEEXcIoDV4bL5GxHeoT5Lvlksb2+xsNyf1DgwFT/Kw5RLsR0FxuueeUZeEORLuAeILUA9JSiKhfiiw6/ZIUDpHBF9bnm3ShbnA1I04MX1mhgKLEFrHTXgFx6/AirgLaBFOBrAUIgjf6HqArRV2SKh51OIsZAuhWcyTqlhQR4/mkS8iXd1UxbrNj6H8mDQnW7PJHN6BLrzgVpCvX4HcUAGqMR6blxPzajzqmT3jNGfWnPYvjdFseog5OJEstZpgBysHEP2OK68uzs1Or2eedfqDTJEe5qtfOwPqRzcdh/gClsy1s03TIcKMAhsLUsXauZqas6teZ2pUyM+O2DSU5xyO7ZqDvf5Enj4fd3rV4wvGYxQpc0B9wAG2luQYRYK6YWx4gmM/DBgXKFYszF02X0Qhabe0ltYCCyOLcEEX1MKChGCRYIks5nnMB8vhLAqQSwUBiy7CFKvFfF9wbK3Akp8unSMe+YJ6BMhc4LlLQiBiKRhzQd4EDhXguFEoCF+E0ltiuQkWAF3Enz4VKFyHgnhoSdyA8BAoi3cCziJBjoEGIRFAgxT973+AS+cB9lDw+Y8Iu1Ss44XsB5KXh+CxyBfgy0sTjgLqfIGQWVhAuA6lD4DkhMS3gGQPuVK1cPMlZfYLDd6CzWJ5A9AFPMq1lnobvGhCC06aTVBLyngLYkn89BjA7yziPnYt4QJCPkMBdggHFFXO5NBV/feHk2lnMKioHmBB4y8285NzdAEfP4KibjqzXn/aM41h53Rg9LYKtNsgeETg06cKUbfzgiObCvu+XCTQ30L/gpYC1/nVrMfpNeFF+PJWNuWAAlDPr2Zmz5hMpT9/X2Tzr6lNseZQsYzmGmXpArJlwuO6EzgrsoZ31St1HIjsYOBUvTSGG/ffG+NJLbefMRXmgnFTittl1io8GL1yuqXvSjKwbf8vCPlTJJkkif0cUYHSXBqKPWnXgGSc/ghWsbibnncQ10E4EHrIIm6RMF7X7O8nvppLDhrLC0nui9hd0xIj82Mk46Ef3aAlwTbhIVKfRD72ZKHzFBzLAg+vCNgrL3yYAR2k4qSZ2cdgkptHFGpZAaalpiJLCEFCF+uJfN/rw/f9Xr+D4pSKbl6fmCcvkbpJNrcaj3xADNIFYzItrCeJFAXswzgRXtCjvL0fVuK4+CSNUjLrICIpCGhAJMfphmXLWJsg2Srp4v10lIkEKmZy3FbUTSILsyfL9LEpae+PhtvnuxuT2dlZ/8NWeRjHTyvR/P7nKiF6cv5hN0QTa8lA6Sf71Hdgcv4BEu0kgnGZhV1IuYlX0u9t9YnDSQCywhn3T7MAkfjVM5QW51/BigSgBRwDskFpK09jHJas29UUE9CEL6X1Wmu+TCHkv8n5h5Q5czYetJXMNHO7bLY0xh2d+oK4KHRudNvCAWppx3rsQPHPLD3K9iiJaRPCrwmPL9ND58aMYc2bk5dmwrjZ0lrHptVqNZutYy3uhDKK3r5NKT35kZSefAelz0pUJsp+T3hImV8Sfwg+ExBGgSw1ia1Ua4HWLk4SYqtRMo6rTveic25M2kocmGSEksFJeWj63Pc2tM6DoprdAlXHKSS/7wAlGguwRlVfbfXJHIckjq9qVY1PS+dHRtyj1nfdjOyUO5sEfNu4u1QoJWZ1U71/mwTPFJde3t7eXwixuy09ZgN+fnMIXQz14Ksm0854mnRYRZDJu+Bx0lfkoSYrcvMm2BzPhpLKuMpVkkiq7Fa6kFmA7OsbpeKzdGWl5e/OxmNjODWLMJVgtgGh69T0v0IcupRevAPpsgJptLJB+UmRQatVWoF45UVp5XkC87TKXfX6hDd1czk6/S3PCTVMJq6ZUaqWwaV/YpcTbOfeQOyfIFzRIJARW3Kf56XE6YgbFg1JMRbJlxwi5O+rlZMvxTd2B/32Dq2FGrIeJYNMuhPlhdbUXtZwVEEaQ70oAkuqw3K5lDn6cTNuZI6b5VnNLlXP8uFNu0TQM0AIuy77jKQ44o58p0SKQQ/nyUajkMy3DGG+d/zCmZ2Xp7Jw3Rl4VIrRgh/Zpw36k+mdxV4rna5YwWH0h8ri77j3G6W1IuuwIFHDoSX9NnAAIZtg7jG+J6b9pupyYp5fnZsXxm/3H0IdFFOMPheQ4FEoiC0X9+Vzx7UPnDcVoW5IxGfGV1du5FB/L7Ruhsb019H4wrwazM77MtC0QYnT1eG42pHb3WG/HFuzUJ6u5uPL3Xkw/E2SlsWe7rBf0/rvHUqi9LBvTs//HY+y1Y38lZA8kYnv6OiZvoUjmIUE5jhcwjyirqA+HB2BYGlAAxksrCXmITyRVQ5EgdwUSwIL6mMXFF2pqlgKXGA+lwcTS1T2qdMTalLatgrsUbeTEMsYygrLpJLJ9xtF835oTM3/b/nUkfhAIdUM3mtm5UltrW4ObJmnMopanJbG86gCnaUPLemvsWefvNQE5przRdnVzaFR/Q5tJSOupypW1j0EfeA+vYK4Ypb19+1KvgZqP6yU7fMHumYStx7JMuo+prYXwkqRpxy+qh6WPtxKBgeYA7r5cr/rUHcfgbVkn31AY+CMiTfyow5GVttoDH9/9aq6Wwh8Lyr8aK//s0Rflz6q8i8ePt4qwT0wKcRcgju7/yUNF/rpe0UBSD3HXFCXBFgs23oUcj3uCeMnu9Rz9n2b3BArip92pO6tllFAPh3AVog8yjnjWpykie0QzSdCYpX/USlMoeum9ko7UcDdKWn7l+e1nk1uBMeW+GUdEL6K5kUf1h30zeloNGirSWcfs6XoS+YRfZlBo/zhq65uLmanxnhoTI1J0cIkR6NSv9f3sENAzdCDuvnltytjLA/HcanaJqVQd/R+uZ4VVZJZNBBH0F0SayXxfSZgYR9SfkFS7xIB2Lfj75ZwwaZc/l3DgjMPciYfhzCnPuZrWDDXJrzc6yQkQTzCRLIAQsQXfB0w6gtQFEDXEJP0Jv7cYxby5/6ALFBkKVk1nE1K508pjdsEnVLTRXnXKft6ekYBZQdbuoFu0VU9Nku49dgs4d4HGyci4qU2ttx4JmFiR4wPFluuLdgxgTTYVK6kngORLxsKQCwzmnpLXNBGySLV0URaotodjY3RxBxNzGHn0tg3SCszxYIuKb/szQWpnMa+pGthpegLvPmEpowLysD73B4g5ptM4wCxDzaMysD5Irm749sXycmdjiVJPw+6pKaP+ca5UizOnSCZgx+26CJB3IGJes6esWYJ8vqBajoErtyG7hYhHgBXdqaF9ZfWLWf2mbaKB5h7duCo3DiUf5Iecz/THMxn6XOW/mXnPHb3tnp8sNU/aUJL5uhmKW/Jq0DdxXKgqo7T79WsNE3K3geaBVV6i2cui8i3BGU+pHC5zSRw46ho7JOYJldNwUxbCpHkE810BxBaMO5hAY83G21MAhZSwfh6u32z2WhT7Gy3j7OJJ7o2QKm1CfVr7bL2sYn+8en5gV1Uv2wq2X2Pi1xbPIZIJupSmKbFbNJW/7njy2q+B4/a0DzkulIZOWTJYfNpyK7ktorEpxyIBVmq8ijsH/34r0/bXWfe1mpTap0z1yX8EvvYybT/l9Kq5bLIjrvshFPkJaz+ZZVMbgLG4+eYB322Hu6wNdwxfjxuQlYG3X7lQ1DcQk0Rtmpe5JFRfbwo3qrIFxcPeI2iLjLae1fc9ookuyacU5ukQ9BuPE4vD4xki3jWHxhXnekv2Zuu0lSTT81aOJqtv26acbdl+kRalcQhd5LcJ1hkLZPhShlZYgUWFvDzz2CMzuDduzogGwucyOFNfCBujt+UHnUE7lpeK6lPr34DCyxLDGN01tg2juSf/wQAAP//M+NLdjUsAAA= + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xae3OjRrb/X5+il+iOx7XBeB5JJt5oqjC0vcQSKIAmMzdOUS1oyR3zUJrGtnbs736rm4dAAiTV3Nr8oXHgnN95dJ9XN9/9Q5mTWJmj9G5gWjr0DFOHn0fD13dJymIUYfAMGCIhkH3w9jQnMdUJrFGcDsgC/PEHGFoOGI3AULNsaDme5QhC8Oef/wLsDscDAACY2sYn1YWeMR0NX5MVQCC9Sx4BZnfn4BksKV4BeZqAExJjBm5v/rgNzv7858npAIcp3gWodJQN8Az8jAE5OAEnQF68OR0syAC6mu5NIbS9mT0eSXeMrdILRRl+3aC8XLx99+Fcykm1sQFNt5/4p5+lwcBP4gVZZhSrQUTiWYrp69OvQj//Di0xkCGQ33Ct+G8EzoE8AT/z/4A0/KrqE8OcOdB+keos4fa7l5ocB/sUs7SUok4NB9qfoO2Vyt3AL95Udf89khTMfOU+m2MaY4ZTxceUpQpakRTTB0zP7vE6l8uSzL8TQjvRKg2jJADnP56fH0iePMaAJgm74D97eQSTph5si49ajGjhb9W+j25H7XZiQS02zJGLgJkfdK5CH2CrJQcw5IEp/0fYYjkTy/FmtvEiNYOyNJ2rd8F/DkRfkJoritg5xhV+SHDMulzRAdjtin0MO6u7h2tjm0ghx1i2wpgOv27y6UuXka3I3Sb2kX/rWvdhN1a62BQatF3jytA4+YGb3qesZ9NvA2654f37gxnaV7qba2cXH2NbsYs7bOsA7LZtH0PvLu61TSzwMZbt7uIOI1uRu03sJ283sItHMKWYgX8+iT+xf5d0FpwXCTyDOUrxj++BLAfYTwIMPh5U00rcZkXoBuwuMyVSR4LthjwgIzewdzPbHuz+BNrAFsuxH7Evp7T5obbAh/mhYxe1+eFw7P4Q3PUDJzzEES2AL4MBjtOMYnuqvT4FeXeXrlOGI5+FMEbzEKtx4DBEGaArf07iADw/A/xEGBhC2/acL44LJ5o79hxXtV3vSjXG+1DklCF2CE6lnpoFhOmFhnml4VEz0w1X96CpXo6h/iLx9p/RDDdLTpciiGMeZk3V/pMFQCsGQpIyIMskThkKQxyUs8NJjnnSKHhoxbwlZt4qo0sM3p6Dd+fgzdvzUv6rssKJn3rTfZPNcYiZI4qXhimrVuhmdgnH0D22/7vPAbdbwC00vk2OgBE5WeAkKxynaQiWOKYpAnKSMTDcoyp4e/7+Q4Ob4r+BHONHID/9cP4zkAO0TsFP787PgXyP1/sBW8VWNgE5zeZ/AUnRzFFRW/iUuDXu3HxId3x98HTQ1VV2AbW2W3uJd8pUD8f2zDa7HBvaESPbbtntBGutuvup+wa2HZbcnP+d2dD71bHMDiPQfzKKz/5Kk3hb9yZn+4zZQbOr5w5hV0fQsj7dSXvP8rd0GZWXDmoyWpdB/HAKQ4NcsqkZU3VcliMHajZ0R8OveygU5fZWub29vX35f8OTOJ6U4/mIgV9+AdC6KozaWYA8bCU/TLJAugCSyjfCNJuHxNfEs+9zAoZjFDND0Ay/utBUTdcz9JfyfZrNU5+SFSNJXFI5s0tHs42pa1hmnRahQBNxXxF2GdXGlJ+u7GHMvVExU5wmGfXxNU2yVc5qQ8ea2Rr0rm1rNq0ow8RH3IacaGxpKte+ev0QuesVzl9+mnjulymsuyDGzEQRrsw3azqk2M8oYWuhw4bKhO7vln3DNZ7ZhvtlS5+HBuQnw3Zn6tgrmBpU9q6NW+Reh800yRh2ebnfSLKtmQs9l/cKFd2KkgjRtfqASIjmJCRs7dS1m9rGRLW/eOon1Rirl8aYm+PUXFAAOD4KcSuno6lj2GARG3NKkwcSYHqJ/PtksZgkQcGnja2ZPrWtT4YObe9S1W6sqytvYumwF0C6AB28Lz1cNmaU4LSb2bOhaxvQ6QOBT6skxjHrQYGfp5YJTbcPRs9ouU27YPSZne/dHphfCWOY9oD8argutFshbMTwmESkzRRbdeHYmBjtNlScv02dPmbvt2m7LyuAy8y/x70KeJcz7Qbu1+N3Shjep4z3u2248BCV9sPlejURsxRPUIyWODACHDPC1vCJ4TgtF3rmQG+imuo11D1Dh6bLAwx+dqHp1BY6SzFV05Qs4w2OoecBM3Og7amOY1ybdYxans1SbPBOPfbxBDMUIIYq2YbpuKqpQW8CXVVXXfWlypoouEQhZ6LOfVYmT1X3LtUx57A952ZWyQhIyrONlbF5ksWBY6qukNHk0A2Hpx/PmrmX1szUPU5XSsRPfpgFeIJShukVTSKHoThANBhfCij4WRvPdO4ux4W2d2VbEz6lmLpq69748qXKSPnyfUJZWMtHNxPHq9bskzobu0XjW7BF6IlEWTSumW1nIdaSrIjsifrZmMwmHreoMsiejaGnWbNNaJfib/C6FH7/IZV2337CtNgFEm++oXVVdU7yU+1EcU9OBCMgPbzdOWXkQAGQCZCUvpylBBLobhwPw8ozzgFI5Slm94DXGEI006iGEPFDMaNrPwo8svAWiIQZxWKQ/AG8/QFESbCiyRyDOfVizBYkZJg2h9uJxZ14CTczuugj5RhIdSYJfMwtiZIgC3Eq81A4C5Q6zRnXsmmMZhrGVJTcNFd5y5byZWVTtbxlLZ+OZ9eGmS+pcF3LqkYPYKiZhndpmJ5u2Mqbczn3MhclxnLxWrPMK+NaUFSseYfPG/xtkh2QimdXR2tsaF/yowbJRyHxkxYtqx1zkn4nRaK2S3NKgiWWqv9nFMXpClFeOb9bnhyhFA771ZL4sne9jJNY+BW8erWFUcbTCDR0+29bJ05C8v+UdE5iBc+Z2DhAZiBGDMhyRZ8fmORHRFoSM0RinrZiRiK8s880y3RVwxRZy3SNCcxtDRL/nm/6HTNzWF28HtQ04yJLofnbSpRuaTe8MBQ9PPwMtfIgyRjDkYiq/DgqKP5VcvFnfNImPj4LFPyEfS9liLJNkD0iwrxFQr0FCUXMn4M3YNgvrRn7/In3u+pq//a46dbMFcC8sPKokNE1yDUB9UvgulETnuW9q7F67RTHZPoRVvkhRtSLeDXxVjRZoaVo9rxFiJbpxtDNTf4/9tzk73VLl74H+aVY6gJKTJcbW3PjlADhKIlr6T2hgAASg+HrFP8N3giFTv8FgqSRToCcVjpWwGA3xgD4629wcnYCfmkhf/UKzClG921RI4QMCZC5Etwnbdh7PZAnmuLYs4r7EOMVeLMtNUhi3HvUWmythufL3ds8NW7uONNwLbtaPa6evW/PyVESE5bQM54D6P7o6RZy0EbpQDlUySJAjlfzYAX7l6TprcNO5DfZr+hfdo5KdXglGsxayOAFb0fLo+Mee9swDjKUMxYFJpf7gKgSknkpU/ybNyR7xNdQDpbMVS7KSlnm8oOiQplkxfJ2UPHLOlUdpJ+ldwc4pBv9m/ZBoUMTozo9bwnOqgbme2CM5jg0k6DW2I3VSzj2TEuHzgFOCDmAHHOEfkd0wB5kfYN3X4w2NNoboF3Q37QqNRWOuSb7NclojMJqJb5uUrk4MXZYQtESj1Z89koZb862KYRCE/Q0S/HozfX266K36nx/ldBHRAM3cdZpmCxHa5zmJC/gYzFVlM7+K9c12BT/LmcUHHLJcYxHbj6kvDGkSTgNUdxoCoc2vLQs14a/zQwb6hyVt4mmVY3nYhJhNOMN81YNpZhlNK73Cr3TWZ5X3DF4+1EJ8IMSZ2EI/DDjo75M4kWyFX8fHB7vpmFeV9uFj1MUIyay7gTFZIFTphPayL4T1TSuoOPqhj3ankajgicfz6L7gFAgr3LVanxc0CMluRxN5MuGiDz/CAF3SYSVYdUuKmdc2hahiLJhg1GpZeGmGhVJ7cZm2MSqXcRsJF9s/mwDOpC8Bs974p/4pNgGVo2RLaw7Fz8DWZYHaEWK444L8PBmUCx8ejGQy01wkWNjysiC+IhhGWXsLqGEreUAMXQBbqWhptbv8G+lQiJ9wPSiUbrya54BADGKsGAtD41+081bic/lDD+xXIH870KBQptdFlBMC9uvZBREJBYEXcIyysdAuRS0S3FP4uAC5HttwIUIxdrgatKEMoXSRODXnFe5rOaUXdcVjPd43cpwA7/cSgMJfOxcaflpc8qh5a5TM5akPgoxVYMgiavQ0cYzYYU6cy1xJ2B7qq435or6yRHnTZUyQ6AKVA7wKkzWEY7Z2RpFYU9h6pV4WHUqjr3S519yVxn6x+fha7Gxhz03XNUF5OnzUtqjSLsgB/sHSMqvxL5FWprNGzY1r/e+Bbm4XayBVzeM3wJLlxvA5t3XUajVrlU1Q0viGPss2dqwqiYObEyo8blDs6E4WVfHzmj4ekVJzBZA+norFRsjuJV4+PxPeit9D8qn+b1m801569p82rxrbb5DPiMPWCdUKLmGcbBKSMxmNMzpyi/lw2RJ4rOI+DRJkwVL4pDE+MxPolvp+9vqvjS/jaCdKJF4L+KrPKCKlFZFrila3ZVXlKXSJcySvzx7JHGQPKZnMWYFRvp3OKkkHKKEn1Bcx7n48P79uwJsicIQ93ikeL9jSPRNGii30osEpL4E0Ps6j1pOUkWEoG/GXv0rhkdwftr8xKj8SOgerwFN0cX7859/LL4aypvm+sdD7378If94KMnYzhl98a2LT3h9yuOAk56tcJR/SnQIA3+Wc5RfF41mjuK4I01VxqMHQlmGwnIOtXYezLafaObWE6klJG/gl9HwdeGjo8x6PD9tC3Fou8cBbsx+LBeoCbm/wjUBDy9uXXKOrmsUB+nH52FnthPJtEvabnY2UYQ/Pm+l5qMwuFN3FYK2exTKPV7vgNzAL/0Ym6ogPplJQuKvt4qC+Nomv1A4ZHU5jrwSQK2LW/ddlUk/Piv1epAq27lBaXz8kiqt7m7XVBxgfQetq8H/BQAA//+oI6qBejYAAA== + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUz07jPBTF93kKq2LxfQsnbalQEfIChsBUVAwiRSxKFTnOJbHq2JHvTWlnmHcf9Q9oSosGhJfX53fOkWXd8Z3VNAnOAZXXNWlnxVWTgQEKvjmb6+XkRlIZzzUSiqhBHxmnpIkybaPpRhoE4wT8TCuYBLeAJD0JaZ7kAoPYzrR3tgJLF9qAiIBUlMOjbAy98kmjFCDGc00JSWpQdHqHQTwHlSy9bjyIVV4msWSRqymSPxsPkXKWpLbg8cUqxHIPV01z7RmvWTSTPjI6e03+iFZZ/V4XrlhLP7IxO/ivco0l9swKDzV7aL1NemixZ/akGDf/M26AtdmEnTAqwbJ17ArnPNM236m5Ozhhj7q1r/3GppJT4FhKD7tuQfAGxCWJC1RkGH9iFijU9awXkqpTD+Q1YFf0/w0p5yFEV8m5ctaKztFhv/eJqErOU1zYNJNqalzxUX6VWgGiLCBVDkn02p+kssYjiX77ow9jQRdluPnFYaFSKj1g2RG99vHR1zy6ot857n7N43DzdNsufG0DGcnMADJOzMrlXzEaaa9U12+ltqnAa7UDvbMY2EPANodzsEszjuBn4LdurMuBG5mBQdE6+HV1dxYP41F6/eM8ToenZ/Ew+d3aAmaiy7YHzjQV8No0hbY81369Z5Y1vAUCjNaKtQD/Yg9e0i6Gp5fJvovb+HKQjOLbVZ894/vB6Hs6Oh1cj5IgGA8skjRmEtxLS5CfLUTVGNK8QfAhSV8A/QkAAP//lvMgPnQFAAA= + + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + + + +- path: /etc/systemd/system/docker.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 --bip= + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/docker/daemon.json + permissions: "0644" + owner: root + content: | + { + "live-restore": true, + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "5" + } + } + + + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS=--address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroups-per-qos=true --client-ca-file=/etc/kubernetes/certs/ca.crt --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --enforce-node-allocatable=pods --event-qps=0 --eviction-hard=memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5% --feature-gates=RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y --image-gc-high-threshold=85 --image-gc-low-threshold=80 --kube-reserved=cpu=100m,memory=1638Mi --max-pods=110 --node-status-update-frequency=10s --pod-manifest-path=/etc/kubernetes/manifests --pod-max-pids=-1 --protect-kernel-defaults=true --read-only-port=10255 --resolv-conf=/etc/resolv.conf --rotate-certificates=true --streaming-connection-idle-timeout=4h0m0s --system-reserved=cpu=2,memory=1Gi --tls-cert-file=/etc/kubernetes/certs/kubeletserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-private-key-file=/etc/kubernetes/certs/kubeletserver.key + KUBELET_REGISTER_SCHEDULABLE=true + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/line16.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line16.sh new file mode 100644 index 00000000000..e51aeb3fea9 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line16.sh @@ -0,0 +1,160 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser +cleanUpContainerd + + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/line23.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line23.sh new file mode 100644 index 00000000000..1f074afdfa1 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line23.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/line30.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line30.sh new file mode 100644 index 00000000000..ce857cb431e --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line30.sh @@ -0,0 +1,337 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + +} + + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/line43.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line43.sh new file mode 100644 index 00000000000..e708f006a14 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line43.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+K8S118/line9.sh b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+K8S118/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CSECommand b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CSECommand new file mode 100644 index 00000000000..0126922bc3d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.15.7 HYPERKUBE_URL=hyperkube-amd64:v1.15.7 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=southcentralus VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME=containerd CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=true GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CustomData b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CustomData new file mode 100644 index 00000000000..e5112aa752a --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/CustomData @@ -0,0 +1,215 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEoVq7W6l8/jCeRDshdDZ51oKZV1rZ9TTKM2ST/MJ70Bb1CIY3Si7N+NPQ+BKbsxt3wrFOpsqs5sZeFvubitaf1B0fhYWgJKkwUEpjxjCADuohnCLnAld4yrHiTc6G0VoFK3CYLEtE0SmOaFQLvSSz96K06YKBVyqqQTpI/ckM3iC5cPxh4owoiQ/T6wjB6oMhh3UslOllC0jcZ2Ml3zMGeETqdDIfRYBSE3eEwupeydULXORoE0cVZ/64GHdD52U7PBuFE6lq8OOsDoVIJDtNCgWGWHSFnXEHKC0ZqO4hdixs6GXlhdOpNRv3qDbEbvCnGVUVvumjyfjIKJ5Uu0peO9mqnFu3iqgYrPi5z3sdcbsJTOv2hUh9vQV7SPC87MkdGkCUU5er4sh0fgbFajkzqzikIVX19dKlrnVa/YIou0LJWOyNU11xcjrNiRtl69byYYoaqy8j5svkrGJ79iG0MBn/cK9tZYKxQq/kYM5qiVH0qLKsUeohr3f3nb2TlV280sKzVqSW65a9/8UKwuES4O03V1Dw6kvrbrtg2Ia0EW6/t5usTtF81jxJ7evR7245bJ+0WYrv5GhHegSNvpTMtpHO10H9JyULO/CoqFM2cgk0pI5uBYzlctI7on7/cyp+sBg6qxBFJQxN8tuqLlFpiYXhc78aaCXPV0NFuEOfkJEdBOaHJEwovzlU0QxXlhZghtJtw1IRWuwlxHidzbNsalaYTXRLWRXc46HfDgTeKXN/vNK3ueBAFrn/h+lF/FES+G/oDN+i0mysEFQFju9OBF41c0KtYYUbZZeNFFc531Znx2HeDTv1gBwND/evuk3d6GgYmM84vi3xbTMO5O7SCMOjU/7mCG4SmTZrbMVpu+q4J54ta6/hN4/io0WqfNFrHtRfwsADvBcqw3fmbIKrY7/5v4rsG7NDzzifj6LQ7GD4cbB+haZeOlG4YrCLa80ajdVh/b1a9e1qmyu97mlvNZpXJf5iy6uFlzhKwr/7akTF49epID6SPiIvRaiJiitfwzPb0cn8e2C+nIlhNRS+hXNBUzzhB/cpqwYKyQuG+ObZ+Veq7uCbnhSL8moEtoAV7tZ+grjhXi1hc/tEdUlbcdGfIFBRszjOyvNOrmX2aaqeQwsnodM0b8lYqXJAGiWl2u1T/WAzr52qtZ16lUD5LIaWMyjmShi4OkCiuUOgRhmGi70VI5phcQsIJ/qMG9e0s/s37FxnZ+fpdXBIqwM53Pz70EKB4kcwf/ZS3cglxcZNeX2uu3hxaS9q5bGR8BnuWVQ4y91ywnrveqfX/AAAA//8lfXdwbxAAAA== + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9w6a5Paxpbf+RUnGur6kbQEY3s21wnexYAn1MzAFA8nWV+XqpEa0RdJrbRaM2CG/e1b3XojMQ/7bvZWXGUGuk+fPu+XdPKdsaC+scDhqtHo9czpYPJx2BuYw5E5u7ruGCwQBv4ScWJYzBeY+oSHhmWhgLPNVg8Jv6EW0amvzo57F4PZY48ya02EOjkamr3x6MPw3OwPJx3NIMIyLJ8aPhG6ran998NRsimxys0F9eOt/vjX0eW4258eANjs1ncZtkOt0RuPZt3haDDp10On5NnFQxc/Tmuh19GCcJ8IEhah5+/no9ncnAwuB93poNN87oYLkxOX4JAA4oDCF40GJx67IVdssX3+AnYNAAAcCOQQAUHEHQJoCx5bbBHxHeqT+Lvl0sY+PdvLSH0AQ86TPGy5BPtRkJ/uux+oS8IMCfcA8SU0j0lKIqF+KLDr9klQOEcE31qebdKlucTUjTgxfWaGAosQ2qcteAOnb8CKuAtoGU4vYSVEEL41jABba+yQUPeoxVnIlkK3mCdVsaSOES0iX0RGc1cW6z47hrJj0pxs3SYLeAeG8IJ7Qe7ugGyogOZgMjGvpub1ZNw3+4P3GbPmbHg1GM9nx5iDM8lSuwV2sHYA0W+48vri3Oz2++aH7vAyVaSH+frX7iX1o03XIb6AFXPtdNN0iDCjwMaClLF2r2fm/LrfnQ1K5KdHbBrKcw7Hds3B/nAqT59Puv3y8SXjCkXCHFAfcICtFTlFkaBuqAxPcOyHAeMCKcXCwmWLZRSSTltv622wMLIIF3RJLSxICBYJVshinsd8sBzOogC5VBCw6DJMsFrM9wXH1hos+enSBeKRL6hHgCwEXrgkBCJWgjEX5E3gUAGOG4WC8GUovUXJTbAA6FJ9+lSgcBsK4qEVcQPCQ6BM7QScRYKcAg1CIoAGCfp//gEuXQTYQ8HtHxF2qdiqhfQHkpeH4LHIF+DLS2OOAup8gZBZWEC4DaUPgOSEqFtAsodcqVrYfEmY/UKDn8BmSt4AdAnfZVpLvA1etaANZ60WNAvK+AnEivjJMYB/soj72LWECwj5DAXYIRxQVDqTQZf1PxxNZ93Ly5LqAZZUfbGZH5+jS/j0CbTmrjvvD2d9czDqvr8c9PcadDogeETg8+cSUffzgiObCvuxXMTQX0P/khYC1/n1vM/pDeF5+PLWNuWAAmieX8/N/mA6k/78bZHNv6E2xbpDxSpa6JQlC8iWCY8bTuCsyRbela80cCDSg4FT9lIFNxl+HEymtdzeYirMJeOmFLfLrHV4NHpldEvflWRg2/7/IORfIsk4SVRzRAlKd2koKtKuAUk5/TNYxeJhet6BqoNwIIyQRdwioVrX7W8nvpxLjhrLK0nuK+WuSYmR+jGS8dCPNmhFsE14iJrPIx97stB5AY5lgYfXBOy1Fz7NgI5ScdZK7eNymplHFOppAaYnpiJLCEFCFxuxfD8ao4/D/rCLVEpFmx/PzLPXqLmLN/c6j3xADJKFwXSWW08cKXLYp3EivKBPeacaVlRcfJ5EKZl1EJEUBDQgkuNkw7JlrI2R7LVk8XE6SkUCJTM57WjNXSwLsy/L9IkpaR+OR/vvDzem8w8fhr/ttadx/KIUzR9/rhSip+e/HYZoYq0YaMN4n/oOTM9/g1g7sWBcZmEXEm7USvK903zucBKArHAmw/dpgIj96iVKivM7sCIBaAmngGzQOtoLhcOSdXszwQQ05ktr/6i3XicQ8t/0/LeEOXM+uexoqWlmdtlq64w7BvUFcVHobAzbwgFq66eGciD1M02Psj2KY9qU8BvC1WVG6GxMBWtuzl6bMeNmW2+fmla73Wq1T3XVCaUU/fRTQunZn0np2TdQ+rJAZazsj4SHlPkF8YfgMwFhFMhSk9hauRZoH+IkIbYaBeO47vYuuueDaUdTgUlGKBmctKemz6q3oW0WFJvpLVB2nFzyVQco0JiDNcr66jSfL3BIVHxtltX4onB+PFA9an3XzchBubOLwfeNh0uFQmJu7sr37+PgmeAyitv7xwtBudvKYzbg7zfH0CmoJ181nXUns7jDyoNM1gVP4r4iCzVpkZs1weZkPpJUqipXiyOpdljpQmoBsq9vFIrPw/28+463c4pKE4HefDIZjGZmHsXii21A6CbxjDtQkU3rqx1IljVIgpkN2g+ajGntwgqolVeFle9jmBdl5svXx6w3d1fj979nKaNGBrHnppQ2i+DSfbHLCbYzZyH2DxCuaRDIgC65z9JW7JPEDfN+JZ+aZEsOEfL39drJltSNvcth54DWXAtpC5NCxs2L9kpv6a9rOCohVVCv8riTqBgK1VQaB05bqs85bRVHOYdUvcxmO50CQS8BIey67BZJcaiG/aCCUqDH02jjBEZMkLfSaTnxhbuFFeY2spgtJS1YFq3a+iv9NUgHkN9f6xulpRtMXdkDN06A+RaBWwIrfEMgrhTt0nAK+/bhqEnC31LXTeClPOFmZcMioq5NePH+BROrikPa9zhB4ZaCG1SNOyQ2PAvvbu7unr2IXfBEkYV9AViAzPlCcb8BBD4htqQqxEviRJjbgB0sqQK8DhGRBZpQglvRUML9HJ8sOMx30qIKU7PcRTr/A+1/6J9eob9//of+qYX+/ll/ecxvOPkjIqGUcC2ySgL8IaPMJiHlxE5DgCRSqTY31Sq+jlYASSw58456XlQMOFDK8TBQ0FVKV/X0Q2GhYhx5aICK88XDz1rXe9iBoBBqntgGHThAp1Z+Lx/pw5AMYif3DGLhb4nW4gTSyCPh18xkv3Uay5mddauyjz2Yf5Z605z37vXMvBxOZw/2fu1k2GoFx9Ef65K/4d6vlNaabMOcRB2HlszTgQMI2QRzj/GKmKozlqupeX59bl4Mfn/8TPqomBT6TECCRzLEyMWqfB649onj57y0GRFxy/j62o0c6lcqrd1oMPt1PLkwry/n50MVaUBT1evxMqsrt3ujYSl6JbEiWc2eZhw+HoK/SdLSWqM3GtZMAiuH4oQ0Gpqz8/9WT7aaO/krJnkq6+CTk5fGHk5gHhJY4HClEp6gPpycyIAcFzAgQ4a1wjyE57LpgSiQmzJHLqmPXdAMraxiKXCB+ULla2WJWpU6I6YmoW2vQYW6g/q4iKGosFQqqXy/UjQfR4OZ+e8tnzoSnyikupKlGrHjVruUE4pb5nsZRS1OCxkE1WYQPR63Yc8+e60LzHXni3aom2NP7g5oKxhxPVVKWY8Q9JH7jBLiklnW33co+Rqoalgp2uef6JppzYeWjzK1SggrRJ5i+Cp7WPKsO54jYg5o8+Vx16FeFYG1Yrc+oAlwxsRb+VEHI5tvNIH/ePOmvJsLvBIV/myv/1eJvi59lOWfv4twrwQrYFKImQQPdv+PNJzrZ+jlBSD1HHNJXRJgseoYUcgNNSJSL3pQz6n6NtkQK1IPPxP3bhZRQDYsxFaIPMo547pK0sR2iO4TIbHK/6gQptBNS3+jn2ngHrSww6vzWs8mG8GxJX7ZBoSvo0U+luldDs3ZeHzZacaDPsWWZqyYR4xVCo2ydzGM5u5i/n4wGQ1mg2neq8RHo0JDMfSwQ6CZoofm7pffrwcTeVjFpfJYJIF6YBSU6VlrSjLzLuwEeitirSW+WwIW9iHhFyT1LhGqn5bfLeGCTbn8u4UlZx5kTD4LYUF9zLewZLKhLs42YpJAPdFAsgBCxBd8GzDqC9A0QDegSHqrPivMQvYaECALNFlKlg1nl9D5Q0LjPkan1UxNvJuEfSM5o4F2gC3ZQPfoqh6bJdx6bJZwH4ONExHxQr9aHDTFYeJAjE8WW6YtODCBJNiUrqSeA5EvGwpALDWaektc0kbBIpvjqbTEZm88GYyn5nhqjrpXg6pBWqkp5nRJ+aUvMknlNKqSroWVos/xZgPbIi4oAle5PULMV5nGEWKfbBil508X8d1d376ITx50LHH6edIlNX3MV46ZlTgPgmQGftyi8wTxACbqORVjTRPkzRPVdAxcuw/dPUI8Aq4dPDyov7RuObXPpFU8wtzLI0flxrH8E/eY1UxzNJ8lj12HV91z5e6d5unRVv+sBW2Zo1uFvCWvguYhliNVtUq/1/PC5Cl9PXAelOnNH8EuI98SlPmQwGU2E8NNoryxj2OaXDUFM20pRJI9wUh2AKEl4x4W8Gy30yckYCEVjG/3+7e7nT7Dzn7/LH3CgW4GoNXaRPOudjmetH5/ZBfVL5taet+zPNfmTyXjB2xSmKbFbNJp/ueBLzezPfiuA61jriuVkUEWHDabhhxKbq9JfNqRWJCmKo9C9ein//q8P3Tmfa02pdY5c13Cr7CPnVT7fymtWi6LbNVlx5wiL2b1L6tksgkYV681HPXZerjj1vDA+PG0BWkZdP+VT0FxDzV52Kp5ry+doqev32SLyye8VVUXGe3KFfe9Mc1uCOfUJskQtKfG6cWBkWwRPwwvB9fd2S/pi+/SVONP3Vo6um382DJVt2X6RFqVxCF34twnWGSt4uFKEVlsBRYW8PPPMBh/gHfv6oBsLHAsh7fqgGqO3xaergTuVl4rqU+ufgtLLEuMwfhDY984kX/+NwAA//9N3Q7LRDAAAA== + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xbe3OjRrb/X5+il+jG49pgeR5JJt7IVRjaXmIJFECT8Y1TVAtacsc8NE1jWzv2d7/VzUMgAcI1t9Z/yB5xzu88+ry6m/nuH6MFiUYLlNwNDFODrm5o8PN4+OYuTliEQgyeAUMkALIH3h1nJIYyhRWK4wFZgj//BEPTBuMxGKqmBU3bNW1BCP7661+A3eFoAAAAM0v/pDjQ1Wfj4RuyBggkd/EjwOzuFDyDFcVrIM9icEQizMDt9Z+3/slf/zw6HuAgwfsApY6yDp6BlzIg+0fgCMjLt8eDJRlAR9XcGYSWO7cmY+mOsXVyNhoNv25RXs7evf94KmWk6kSHhtNN/PMv0mDgxdGSrFKKFT8k0TzB9M3xV6Gfd4dWGMgQyG+5VvwzBKdAnoJf+A+Qhl8VbaobcxtaL1KVJdh99lKRY2OPYpYUUpSZbkPrE7TcQrlreOPOFOffY2mEmTe6TxeYRpjhZORhypIRWpME0wdMT+7xJpPL4tS7E0Jb0UoNw9gHpz+dnvYkjx8jQOOYnfGPgzyCSVV62+KhBiMa+Bu176LbU7uZWFCLgHnlImDm+a2r0AXYaEkPhiwx5f8IW0x7atru3NJfpHpSFqZz9c74R0/0Jam4Is+d17jCCwiOWJsrWgDbXXGIYW91D3BtbRMl5DWWrTGmw6/bevrSZmQjcruJXeTfutZd2LWVzoNChZajX+oqJ+8Z9B5lHUG/C7jjhg8fejM0r3Q7114Uv8a2PIpbbGsBbLftEENnFHfaJhb4NZbtR3GLkY3I7SZ2kzcb2MYjmBLMwD+fxJ/Yu4tbG86LBJ7BAiX4pw9Aln3sxT4G5716WoFb7wjtgO1tpkBqKbDtkD0qcg17v7IdwO4uoDVssRyHEbtqSpMfKgvczw8tUdTkh/7Y3Sm47wdO2McRDYAvgwGOkpRia6a+OQbZdJdsEoZDjwUwQosAK5FvM0QZoGtvQSIfPD8D/EQYGELLcu0b24FT1Zm4tqNYjnup6JNDKHLCEOuDU6qnpD5hWq5h1ml41sw13dFcaCgXE6i9SHz8ZzTF9ZbTpgjimP2sKcd/sgRozUBAEgZkmUQJQ0GA/WLvcJRhHtUaHlozd4WZu07pCoN3p+D9KXj77rSQ/33R4cRHdei+Thc4wMwWzUvFlJUrdD2/gBPovHb+u88Ad0fAHTQeJq+AETVZ4MRrHCVJAFY4ogkCcpwyMDygKnh3+uFjjZviL0CO8COQn348/QXIPtok4Of3p6dAvsebw4CNYkubgJyki7+BNFKNcd5b+C5xZ7tz/THZ83Xv3UHbVNkG1DhuHSTea1MdHLt7tvnFRFdfsWXbb7utYI1d9zB114ZtjyUz53/nFnR/s02jxQj0n5Tik7+TONrVvc7ZvMdsodnXc4+wbSJoWJ/2on1g+RumjNJLvYaMxmUQH5xCVyGXbKj6TJkU7ciGqgWd8fDrAYrR6PZ2dHt7e/vy/4YncTwpw/MQA7/+CqB5mRu1twBZ2kpeEKe+dAYkhQfCLF0ExFPFdz9kBAxHKGK6oBl+daChGI6ray/F8yRdJB4la0biqKCy5xe2aukzRzeNKi1CviryviRsM6qJKTtdOcCYeaNkpjiJU+rhKxqn64zVgrY5t1ToXlnmfFZSBrGHuA0Z0cRUFa59+fghdDZrnD38NHWdmxmsuiDCzEAhLs03Kjok2EspYRuhw5bKgM4fpnXNNZ5bunOzo89DDfKTbjlzZeLmTDUqa9/GHXK3xWYapww7vN1vJVnm3IGuw2eFkm5NSYjoRnlAJEALEhC2savazSx9qlg3rvJJ0SfKhT7h5tgVF+QAtocC3Mhpq8oE1lhEYM5o/EB8TC+Qdx8vl9PYz/nUiTnXZpb5Sdeg5V4o6rV5eelOTQ12AkhnoIX3pYPLwowSnLQzuxZ0LB3aXSDwaR1HOGIdKPDzzDSg4XTBaCktwrQNRptbWex2wPxGGMO0A+Q33XGg1QhhIYYnJCRNpliKAyf6VG+2oeT8fWZ3Mbu/z5p9WQJcpN497lTAvZir1/CwHn9QwvAhZdw/LN2BfVQ6DJfpVUdMEzxFEVphX/dxxAjbwCeGo6RY6LkN3aliKFdQc3UNGg5PMPjZgYZdWeg0wVRJErKKtji6liXM3IaWq9i2fmVUMSp1Nk2wzif1yMNTzJCPGCpl64btKIYK3Sl0FE1xlJeyaiL/AgWcidr3aVE8Fc29UCacw3Lt63kpwycJrzZmyhZxGvm2oThCRp1D021eflxz7lyYc0NzOV0hET95QerjKUoYppc0Dm2GIh9Rf3IhoOBndTLXuLtsB1rupWVO+S7F0BRLcycXL2VFypbvE0qDSj26ntpuuWaflPnEyQffnC1ETyRMw0nFbCsNsBqneWZPlc/6dD51uUWlQdZ8Al3VnG9TuxB/jTeF8PuPibT/9BOmeRRIfPiG5mU5OclPlRPFAzURjIH08G7vlJED+UAmQBp11ayRL4H2wbEfVlZxeiAVp5jtG7zaJkQ19HITIj4oZnTjhb5Llu4SkSClWGwkfwTvfgRh7K9pvMBgQd0IsyUJGKb1ze3U5E68gNs9upgj5QhIVSYJnGeWhLGfBjiReSqc+KMqzQnXsm6Mauj6TLTcJFN5x5biYWlTubxFL59N5le6kS2pcF3DqoYPYKgaunuhG66mW6O3p3LmZS5KbMvFY9U0LvUrQVGyZhM+H/B3SfZASp59Hc2Jrt5kRw2ShwLixQ1alhFzlHwnhaK3SwtK/BWWyn8ziqJkjSjvnN+tjl6hFA661ZL4src9jOJI+BV8//0ORpFPY1DT7b9tnTgJyX5GyYJEI7xgInCAzECEGJDlkj47MMmOiNQ4YohEvGxFjIR4L85U03AU3RBVy3D0Kcxs9WPvngf9npkZrCYeDyqa7RCUcv0y7Hc18nNdHhFh7jKm7pIEInVPwdss1bIzKj//PfJKzhO+BScePvFH+Al7bsIQZcJp9dy+1CfQ/UNx1H+73DRz7nTK2woYZTl6wuIw6AE5qJynAZLIyGPkAQNZ/pISzEDmTB5bb0oyN++NlaOvnKzh6E1zNVO95s3VMWeiUB0P2o/wtmb0O0wcVNe0DJBCYr7zgp+hWvDpEzhuWqDMgPbFEbiN7h92S+uxBADwcYjXMhldFa6sXt1XjZry3uxeTpQru/Rwf6u8ACPqhnwGcNc0XqOVGNHdZYBWydbQ7fsX/zjw/sVBt7Tp28sveYLmUOJMYGtrZtzIRziMo0pTjikggERg+CbBX8BbodDxv4Af15oAkJNSxxIY7FdGAP7+Ao5OjsCvDeTffw8WFKP7plonhAwJkLkS3CdN2Ac9kLWH/LC6rNYBxmvwdleqH0e484C8KUvL7Kyd9dcjztAdc5vPXD3rUMzJYRwRFtMTXrnp4expF9IrUFpQ+iqZJ8jr1eytYPeS1L3Vs/SV1S+fOvcOuDV4KbYFlZTBS76JKA78O+xtwuhlKGfMx4JM7gOio4AsCpnid9aiDoivoPSWzFXOh4FiOMmO93Jl4jXLhvhtwyyvP06Sux4OaUf/pjjIdahjlHceDclZziZZDEzQAgdG7FfG8YlyASeuYWrQ7uGEgAPIEUfodkQLbC/ra7yHcrSm0cEEbYP+plWpqPCay83f4pRGKChX4uu2lItzfpvFFK3weM13zAnjI/UuhVBoip7mCR6/vdp9nE/Erc8vY/qIqO/E9iYJ4tV4g5OM5AWcn9cH1L8zXf1t829zRs4hFxyv8cj1x4QPzzQOZgGKaqP80IIXpulY8Pe5bkGNo/Lh3jDLQxWxf2Q05ducnR5KMUtpVJ0VOvfUWV1xJuDd+cjHD6MoDQLgBWnCMJVJtIx38u+jzfPd0I2rMlz4JphixETVnaKILHHCNEJr1XeqGPoltB1Nt8a7ZwhhzpNtqsN7n1AgrzPVKnxc0CMlmRxV1MuaiKz+CAF3cYhHw3JcHJ1waTuEIsuGNcZRpQrX1ShJxLPsnm1Yx6pcn20ln23/bALqSV6B5zPxz3x/3wRWbv4bWPeu6wayLA/QmuSHVGfg4e0gX/jkbCAXQXCWYWPKyJJ4iGEZpewupoRtZB8xdAZupaGqVN+8uJVyifQB07Na68ou5wYARCjEgrU46vtdM26lAS+/+IllCmR/5wrk2uyzgHy3sPtIRn5IIkHQJiylfPMuF4L2Ke5J5J+BLNYGXIhQrAmuIk0okytNBH7FeaXLKk7Zd13OeI83jQzX8OZWGkjgvHWl5aft2ZSauU5JWZx4KMBU8f04KlNHncyFFcrcMcVNjuUqmlbbV1TP+zhvMioqBCpBZR+vg3gT4oidbFAYdDSmTon9ulN+WJk8/5q5StfOn4dvRGAPO+4ly2vj4+eVdECRZkE29npIyi4yv0Vaki5qNtUvZb8FOb8TroCX98LfAktXW8D6jeWrUMuoVVRdjaMIeyzeCVhFFcdsBlT5vkO1oLgPUSb2ePhmTUnElkD6eivlgeHfSjx9/ie5lX4AxbfZbXT9SXFXXv+2fkNef5YdDGmECiU3MPLXMYnYnAYZXfH/G4J4RaKTkHg0TuIli6OARPjEi8Nb6Yfb8pY7u0OirSiheC7yqzhWDEeNilxRtL4rLpYLpQuYFX948kgiP35MTiLMcozkSzAtJfRRwospruKcffzw4X0OtkJBgDs8kj/fMyT8Jg1Gt9KLBKSuAtD5OMtaTlJmhKCv51713ZNHcHpcfzGseLXrHm8ATdDZh9Nffsrf9cqG5uorX+9/+jF75StO2d7NSv6Gkkd4f8rygJOerHGYvQDWh4F/l3EU74SN5/bIdsaqMpqMHwhlKQqKfai598V89xvV2PlGakjJa3gzHr7JffQqsx5Pj5tSHFrO6wC3Zj8WC1SHPNzh6oD9m1ubnFf3NYr95Px52FrtRDFtk7ZfnQ0U4vPnndL8Kgzu1H2FoOW8CuUeb/ZAruFNN8a2K4gXneKAeJudpiDekcqugfqsLseR1wKocXGrvisr6fnzqNoPktFubRjVXllKRo3ubtZUHGB9B83Lwf8FAAD//60EdekwOAAA + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUX0/zNhTG7/MprIqL7cJJW7pXfffKFzACq6gYIkVclCpynENj1bEzn+PSbuy7T/0Do7RoIHrVHD+/5zk5cs741mqaRGeAyuuGtLPiMhRggKLfnC31qnItqUoXGglFEtAnxilpkkLbZLaVRtE4Az/XCibRDSBJT0KaR7nEKLVz7Z2twdK5NiASIJWU8CCDoRc+C0oBYrrQlJGkgKLTO47SBahs5XXtQazzCokVS1xDifwreEiUsyS1BY/PVjFWB7h6VmrPeMOSufSJ0cVL8ke0yur3euGKtfQDG7Ojn2oXLLEnNvXQsPvW26T7Fntij4px8zPjBlibTdgPRhVYtold45wX2pZ7be4XfrAH3TrU/damljPgWEkP+25R9AbEFYlLVGQYf2QWKNbNvBeTanIP5DVgV/T/H1LOQ4yulgvlrBWdb8f93ieiarnIcWnzQqqZcdOP8uvUGhDlFHLlkESv/UmqCB5J9NsfHYwFPa3i7S2OpyqnygNWHdFrf//2NY+u6He+d7/mcbwd3a4L39hAQbIwgIwTs3J1V4xGOijVzVupDTV4rfagdxYDu4/Y9sc52JUZR/Bz8Dsn1pXAjSzAoGgd/X15e5oO01F+9cdZmg9PTtNh9k9rB5iLLuP85evnPljSNQgPtSNg/LnAPfwZAImvHlwg0fmlPgRysGXjtCURrF78miSJD/a/7VK++hujU7PdZpwJNfDGhKm2vNR+s+NWI/AWCDDZKDYCfMUePb/p+fDkIjt0cJNeDLJRerOexYHy3WD0ez46GVyNsigaDyySNGYS3UlLUJ4uRR0MaR4QfEzST4H+DQAA//+W9CWC8AUAAA== + + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + + + +- path: /etc/containerd/config.toml + permissions: "0644" + owner: root + content: | + subreaper = false + root = "/mnt/containers" + oom_score = 0 + [plugins.cri] + sandbox_image = "oss/kubernetes/pause:1.3.1" + [plugins.cri.containerd.untrusted_workload_runtime] + runtime_type = "io.containerd.runtime.v1.linux" + runtime_engine = "/usr/bin/runc" + [plugins.cri.containerd.default_runtime] + runtime_type = "io.containerd.runtime.v1.linux" + runtime_engine = "/usr/bin/runc" + + #EOF + +- path: /etc/containerd/kubenet_template.conf + permissions: "0644" + owner: root + content: | + { + "cniVersion": "0.3.1", + "name": "kubenet", + "plugins": [{ + "type": "bridge", + "bridge": "cbr0", + "mtu": 1500, + "addIf": "eth0", + "isGateway": true, + "ipMasq": true, + "promisMode": true, + "hairpinMode": false, + "ipam": { + "type": "host-local", + "subnet": "{{.PodCIDR}}", + "routes": [{ "dst": "0.0.0.0/0" }] + } + }] + } + +- path: /etc/systemd/system/containerd.service + permissions: "0644" + owner: root + content: | + [Unit] + Description=containerd daemon + After=network.target + + [Service] + ExecStartPre=/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + Delegate=yes + KillMode=process + OOMScoreAdjust=-999 + LimitNOFILE=1048576 + LimitNPROC=infinity + LimitCORE=infinity + + [Install] + WantedBy=multi-user.target + + #EOF + +- path: /etc/systemd/system/containerd.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/crictl.yaml + permissions: "0644" + owner: root + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + #EOF + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS= + KUBELET_REGISTER_SCHEDULABLE=true + KUBELET_IMAGE=hyperkube-amd64:v1.15.7 + + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,node-role.kubernetes.io/agent=,kubernetes.io/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line16.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line16.sh new file mode 100644 index 00000000000..7058a85e6f8 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line16.sh @@ -0,0 +1,158 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line23.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line23.sh new file mode 100644 index 00000000000..784bb0e1999 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line23.sh @@ -0,0 +1,296 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + + installContainerd + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + +# Note: currently hard-coding to install 1.3.4 until 1.4.x is available +# once we have updated moby-engine and moby-containerd we will update the vhd builder to install both +installContainerd() { + CURRENT_VERSION=$(containerd -version | cut -d " " -f 3 | sed 's|v||') + # we want at least 1.3.x - need to safeguard against aks-e setting this to < 1.3.x + if [[ ! "${CONTAINERD_VERSION}" =~ 1\.[3-9]\.[0-9].* ]]; then + echo "requested ${CONTAINERD_VERSION} is not supported, setting desired version to 1.3.4" + CONTAINERD_VERSION="1.3.4" + fi + if [[ "${CONTAINERD_VERSION}" == "${CURRENT_VERSION}" ]]; then + echo "containerd version ${CURRENT_VERSION} is already installed, skipping installContainerd" + else + apt_get_purge 20 30 120 moby-engine || exit $ERR_MOBY_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 3600 apt-get install -y moby-containerd=${CONTAINERD_VERSION}* || exit $ERR_MOBY_INSTALL_TIMEOUT + rm -Rf $CONTAINERD_DOWNLOADS_DIR & + fi +} + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line30.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line30.sh new file mode 100644 index 00000000000..0e9a81a152e --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line30.sh @@ -0,0 +1,347 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + + ensureContainerd + +} + + +ensureContainerd() { + wait_for_file 1200 1 /etc/systemd/system/containerd.service.d/exec_start.conf || exit $ERR_FILE_WATCH_TIMEOUT + wait_for_file 1200 1 /etc/containerd/config.toml || exit $ERR_FILE_WATCH_TIMEOUT + + systemctl is-active --quiet docker && (systemctl_disable 20 30 120 docker || exit $ERR_SYSTEMD_DOCKER_STOP_FAIL) + systemctlEnableAndStart containerd || exit $ERR_SYSTEMCTL_START_FAIL +} + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line43.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line43.sh new file mode 100644 index 00000000000..8a2c830577f --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line43.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line9.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk+Containerd/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/CSECommand b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/CSECommand new file mode 100644 index 00000000000..f650d780f22 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.15.7 HYPERKUBE_URL=hyperkube-amd64:v1.15.7 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=southcentralus VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME= CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=true GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/CustomData b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/CustomData new file mode 100644 index 00000000000..8ba5071f05b --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/CustomData @@ -0,0 +1,161 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEorCTDmE3y3qoaiLXyvlb/+mE8iXSM7mrwrAM1baW27f7y+IfxpC/oFQpp7Fyc9aOh9yEwlTjuhmedSuFdzYm9rP01Pa+drz84Cg+jTVBhopDAjGcEGdBFPEPIBa70lpHGm5wLpbUKVOI2WZCIplEa06wQeE/ifhgGWqWsCum8+SM3dIPowvWDgTeqIDLcr+8QoweKHNbtVaKTJSR9uYGdfMcc7Bmh08lwGA1GQdgdDqN7WVzneJ2jQRBdnPXvatABnZ/t9GwQTqQuz4uzPhAqleAwLRQYstkRcsYVpLxgpLaD67W4YZiRF0an3mTUr14au8Gb+lwV+aaxJu8no3BSaSx9D2mvdmrRLq5qsOLjMud9zOUmPKXTHyr18RbkJc3zsklzZARZQlGuji879BEYq+XIpG6mglDV10eXutZp9Qum6AJ1S5U7I1TXXFyOs2JG2Xr1vJhihqrLyPmSDyoYnv2IgAwGf9wrO1xgrFCr+RgzmqJUfSosqxR6iGtNCOdvZOVXbzSwrNWpJbrlr3/xQrC4RLg7TdXUPDqS+tuu2DYhrQRbr+3m6xO0XzWPEnt69Hvbjlsn7RZiu/kaEd6BI2+lMy2kc7XQf0nJQs78KioUzZyCTSkjmxlkOW+0juifv9zKn6wGDqrEEUlDc3626ouUWmJhqF3vxpoJc9XQ0W4Q5+QkR0E5ockTCi/OVTRDFeWFmCG0m3DUhFa7CXEeJ3Ns2xqVphNdEtZFdzjod8OBN4pc3+80re54EAWuf+H6UX8URL4b+gM36LSbKwQVAWO704EXjVzQq1hhRtll40UVznfVmYnZd4NO/WAHA0P96+6Td3pABiYzzi+LfFtMw7k7tIIw6NT/uYIbhKZNmtsxWm76rgnni1rr+E3j+KjRap80Wse1F/CwAO8FyrDd+Zsgqtjv/m/iuwbs0PPOJ+PotDsYPpx1H6Fpl46UbhisItrzRqN1WH9vVr17WqbK73uaW81mlcl/mLLq4WXOErCv/tqRMXj16kjPqI+Ii9FqImKK1/DM9kBzfx7YLwclWA1KL6Fc0FTPOEH98GrBgrJC4b45tn5o6ru4JueFIvyagS2gBXu1n6CuOFeLWFz+0R1SVtx0Z8gUFGzOM7K806uZfZpqp5DCyeh0zRvyVipckAaJaXa7VP9YDOsXbK1nHqpQvlQhpYzKOZKGLg6QKK5Q6BGGYaLvRUjmmFxCwgn+owb17Sz+zZMYGdn5IF5cEirAzne/R/QQoHiRzB/9urdyCXFxk15fa67eHFpL2rlsZHwGe5ZVDjL3XLCeu96p9f8AAAD//xsNyOSCEAAA + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9waaXPaSPY7v+JFVm2uaQmcxJtNhuxikD2UMbg4MplNpVSN1IgeJLWm1XJMCP99q3ULhI9kNrs1qQqG7tev331JR4/0OfX1OQ6XjUa3a06M8ft+1zD7Q3N6edXWWSB0/CXiRLeYLzD1CQ91y0IBZzdrLST8mlpEo358dtS9MKb3PcqsFRHxyWHf7I6GZ/1zs9cftxWdCEu3fKr7RGi2Eu+f9ofppsQqN+fUT7Z6o1+Hg1GnN9kBsNln32XYDpVGdzScdvpDY9yrh87Is8uHLl5PaqFX0ZxwnwgSlqFnp7PhdGaOjYHRmRht9Ykbzk1OXIJDAogDCp82Gpx47Jpcsvn6yVPYNAAAcCCQQwQEEXcIoDV4bL5GxHeoT5Lvlksb2+xsNyf1DgwFT/Kw5RLsR0FxuueeUZeEORLuAeILUA9JSiKhfiiw6/ZIUDpHBF9bnm3ShbnA1I04MX1mhgKLEFrHTXgFx6/AirgLaBFOBrAUIgjf6HqArRV2SKh51OIsZAuhWcyTqlhQR4/mkS8iXd1UxbrNj6H8mDQnW7PJHN6BLrzgVpCvX4HcUAGqMR6blxPzajzqmT3jNGfWnPYvjdFseog5OJEstZpgBysHEP2OK68uzs1Or2eedfqDTJEe5qtfOwPqRzcdh/gClsy1s03TIcKMAhsLUsXauZqas6teZ2pUyM+O2DSU5xyO7ZqDvf5Enj4fd3rV4wvGYxQpc0B9wAG2luQYRYK6YWx4gmM/DBgXKFYszF02X0Qhabe0ltYCCyOLcEEX1MKChGCRYIks5nnMB8vhLAqQSwUBiy7CFKvFfF9wbK3Akp8unSMe+YJ6BMhc4LlLQiBiKRhzQd4EDhXguFEoCF+E0ltiuQkWAF3Enz4VKFyHgnhoSdyA8BAoi3cCziJBjoEGIRFAgxT973+AS+cB9lDw+Y8Iu1Ss44XsB5KXh+CxyBfgy0sTjgLqfIGQWVhAuA6lD4DkhMS3gGQPuVK1cPMlZfYLDd6CzWJ5A9AFPMq1lnobvGhCC06aTVBLyngLYkn89BjA7yziPnYt4QJCPkMBdggHFFXO5NBV/feHk2lnMKioHmBB4y8285NzdAEfP4KibjqzXn/aM41h53Rg9LYKtNsgeETg06cKUbfzgiObCvu+XCTQ30L/gpYC1/nVrMfpNeFF+PJWNuWAAlDPr2Zmz5hMpT9/X2Tzr6lNseZQsYzmGmXpArJlwuO6EzgrsoZ31St1HIjsYOBUvTSGG/ffG+NJLbefMRXmgnFTittl1io8GL1yuqXvSjKwbf8vCPlTJJkkif0cUYHSXBqKPWnXgGSc/ghWsbibnncQ10E4EHrIIm6RMF7X7O8nvppLDhrLC0nui9hd0xIj82Mk46Ef3aAlwTbhIVKfRD72ZKHzFBzLAg+vCNgrL3yYAR2k4qSZ2cdgkptHFGpZAaalpiJLCEFCF+uJfN/rw/f9Xr+D4pSKbl6fmCcvkbpJNrcaj3xADNIFYzItrCeJFAXswzgRXtCjvL0fVuK4+CSNUjLrICIpCGhAJMfphmXLWJsg2Srp4v10lIkEKmZy3FbUTSILsyfL9LEpae+PhtvnuxuT2dlZ/8NWeRjHTyvR/P7nKiF6cv5hN0QTa8lA6Sf71Hdgcv4BEu0kgnGZhV1IuYlX0u9t9YnDSQCywhn3T7MAkfjVM5QW51/BigSgBRwDskFpK09jHJas29UUE9CEL6X1Wmu+TCHkv8n5h5Q5czYetJXMNHO7bLY0xh2d+oK4KHRudNvCAWppx3rsQPHPLD3K9iiJaRPCrwmPL9ND58aMYc2bk5dmwrjZ0lrHptVqNZutYy3uhDKK3r5NKT35kZSefAelz0pUJsp+T3hImV8Sfwg+ExBGgSw1ia1Ua4HWLk4SYqtRMo6rTveic25M2kocmGSEksFJeWj63Pc2tM6DoprdAlXHKSS/7wAlGguwRlVfbfXJHIckjq9qVY1PS+dHRtyj1nfdjOyUO5sEfNu4u1QoJWZ1U71/mwTPFJde3t7eXwixuy09ZgN+fnMIXQz14Ksm0854mnRYRZDJu+Bx0lfkoSYrcvMm2BzPhpLKuMpVkkiq7Fa6kFmA7OsbpeKzdGWl5e/OxmNjODWLMJVgtgGh69T0v0IcupRevAPpsgJptLJB+UmRQatVWoF45UVp5XkC87TKXfX6hDd1czk6/S3PCTVMJq6ZUaqWwaV/YpcTbOfeQOyfIFzRIJARW3Kf56XE6YgbFg1JMRbJlxwi5O+rlZMvxTd2B/32Dq2FGrIeJYNMuhPlhdbUXtZwVEEaQ70oAkuqw3K5lDn6cTNuZI6b5VnNLlXP8uFNu0TQM0AIuy77jKQ44o58p0SKQQ/nyUajkMy3DGG+d/zCmZ2Xp7Jw3Rl4VIrRgh/Zpw36k+mdxV4rna5YwWH0h8ri77j3G6W1IuuwIFHDoSX9NnAAIZtg7jG+J6b9pupyYp5fnZsXxm/3H0IdFFOMPheQ4FEoiC0X9+Vzx7UPnDcVoW5IxGfGV1du5FB/L7Ruhsb019H4wrwazM77MtC0QYnT1eG42pHb3WG/HFuzUJ6u5uPL3Xkw/E2SlsWe7rBf0/rvHUqi9LBvTs//HY+y1Y38lZA8kYnv6OiZvoUjmIUE5jhcwjyirqA+HB2BYGlAAxksrCXmITyRVQ5EgdwUSwIL6mMXFF2pqlgKXGA+lwcTS1T2qdMTalLatgrsUbeTEMsYygrLpJLJ9xtF835oTM3/b/nUkfhAIdUM3mtm5UltrW4ObJmnMopanJbG86gCnaUPLemvsWefvNQE5przRdnVzaFR/Q5tJSOupypW1j0EfeA+vYK4Ypb19+1KvgZqP6yU7fMHumYStx7JMuo+prYXwkqRpxy+qh6WPtxKBgeYA7r5cr/rUHcfgbVkn31AY+CMiTfyow5GVttoDH9/9aq6Wwh8Lyr8aK//s0Rflz6q8i8ePt4qwT0wKcRcgju7/yUNF/rpe0UBSD3HXFCXBFgs23oUcj3uCeMnu9Rz9n2b3BArip92pO6tllFAPh3AVog8yjnjWpykie0QzSdCYpX/USlMoeum9ko7UcDdKWn7l+e1nk1uBMeW+GUdEL6K5kUf1h30zeloNGirSWcfs6XoS+YRfZlBo/zhq65uLmanxnhoTI1J0cIkR6NSv9f3sENAzdCDuvnltytjLA/HcanaJqVQd/R+uZ4VVZJZNBBH0F0SayXxfSZgYR9SfkFS7xIB2Lfj75ZwwaZc/l3DgjMPciYfhzCnPuZrWDDXJrzc6yQkQTzCRLIAQsQXfB0w6gtQFEDXEJP0Jv7cYxby5/6ALFBkKVk1nE1K508pjdsEnVLTRXnXKft6ekYBZQdbuoFu0VU9Nku49dgs4d4HGyci4qU2ttx4JmFiR4wPFluuLdgxgTTYVK6kngORLxsKQCwzmnpLXNBGySLV0URaotodjY3RxBxNzGHn0tg3SCszxYIuKb/szQWpnMa+pGthpegLvPmEpowLysD73B4g5ptM4wCxDzaMysD5Irm749sXycmdjiVJPw+6pKaP+ca5UizOnSCZgx+26CJB3IGJes6esWYJ8vqBajoErtyG7hYhHgBXdqaF9ZfWLWf2mbaKB5h7duCo3DiUf5Iecz/THMxn6XOW/mXnPHb3tnp8sNU/aUJL5uhmKW/Jq0DdxXKgqo7T79WsNE3K3geaBVV6i2cui8i3BGU+pHC5zSRw46ho7JOYJldNwUxbCpHkE810BxBaMO5hAY83G21MAhZSwfh6u32z2WhT7Gy3j7OJJ7o2QKm1CfVr7bL2sYn+8en5gV1Uv2wq2X2Pi1xbPIZIJupSmKbFbNJW/7njy2q+B4/a0DzkulIZOWTJYfNpyK7ktorEpxyIBVmq8ijsH/34r0/bXWfe1mpTap0z1yX8EvvYybT/l9Kq5bLIjrvshFPkJaz+ZZVMbgLG4+eYB322Hu6wNdwxfjxuQlYG3X7lQ1DcQk0Rtmpe5JFRfbwo3qrIFxcPeI2iLjLae1fc9ookuyacU5ukQ9BuPE4vD4xki3jWHxhXnekv2Zuu0lSTT81aOJqtv26acbdl+kRalcQhd5LcJ1hkLZPhShlZYgUWFvDzz2CMzuDduzogGwucyOFNfCBujt+UHnUE7lpeK6lPr34DCyxLDGN01tg2juSf/wQAAP//M+NLdjUsAAA= + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xae3OjRrb/X5+il+iOx7XBeB5JJt5oqjC0vcQSKIAmMzdOUS1oyR3zUJrGtnbs736rm4dAAiTV3Nr8oXHgnN95dJ9XN9/9Q5mTWJmj9G5gWjr0DFOHn0fD13dJymIUYfAMGCIhkH3w9jQnMdUJrFGcDsgC/PEHGFoOGI3AULNsaDme5QhC8Oef/wLsDscDAACY2sYn1YWeMR0NX5MVQCC9Sx4BZnfn4BksKV4BeZqAExJjBm5v/rgNzv7858npAIcp3gWodJQN8Az8jAE5OAEnQF68OR0syAC6mu5NIbS9mT0eSXeMrdILRRl+3aC8XLx99+Fcykm1sQFNt5/4p5+lwcBP4gVZZhSrQUTiWYrp69OvQj//Di0xkCGQ33Ct+G8EzoE8AT/z/4A0/KrqE8OcOdB+keos4fa7l5ocB/sUs7SUok4NB9qfoO2Vyt3AL95Udf89khTMfOU+m2MaY4ZTxceUpQpakRTTB0zP7vE6l8uSzL8TQjvRKg2jJADnP56fH0iePMaAJgm74D97eQSTph5si49ajGjhb9W+j25H7XZiQS02zJGLgJkfdK5CH2CrJQcw5IEp/0fYYjkTy/FmtvEiNYOyNJ2rd8F/DkRfkJoritg5xhV+SHDMulzRAdjtin0MO6u7h2tjm0ghx1i2wpgOv27y6UuXka3I3Sb2kX/rWvdhN1a62BQatF3jytA4+YGb3qesZ9NvA2654f37gxnaV7qba2cXH2NbsYs7bOsA7LZtH0PvLu61TSzwMZbt7uIOI1uRu03sJ283sItHMKWYgX8+iT+xf5d0FpwXCTyDOUrxj++BLAfYTwIMPh5U00rcZkXoBuwuMyVSR4LthjwgIzewdzPbHuz+BNrAFsuxH7Evp7T5obbAh/mhYxe1+eFw7P4Q3PUDJzzEES2AL4MBjtOMYnuqvT4FeXeXrlOGI5+FMEbzEKtx4DBEGaArf07iADw/A/xEGBhC2/acL44LJ5o79hxXtV3vSjXG+1DklCF2CE6lnpoFhOmFhnml4VEz0w1X96CpXo6h/iLx9p/RDDdLTpciiGMeZk3V/pMFQCsGQpIyIMskThkKQxyUs8NJjnnSKHhoxbwlZt4qo0sM3p6Dd+fgzdvzUv6rssKJn3rTfZPNcYiZI4qXhimrVuhmdgnH0D22/7vPAbdbwC00vk2OgBE5WeAkKxynaQiWOKYpAnKSMTDcoyp4e/7+Q4Ob4r+BHONHID/9cP4zkAO0TsFP787PgXyP1/sBW8VWNgE5zeZ/AUnRzFFRW/iUuDXu3HxId3x98HTQ1VV2AbW2W3uJd8pUD8f2zDa7HBvaESPbbtntBGutuvup+wa2HZbcnP+d2dD71bHMDiPQfzKKz/5Kk3hb9yZn+4zZQbOr5w5hV0fQsj7dSXvP8rd0GZWXDmoyWpdB/HAKQ4NcsqkZU3VcliMHajZ0R8OveygU5fZWub29vX35f8OTOJ6U4/mIgV9+AdC6KozaWYA8bCU/TLJAugCSyjfCNJuHxNfEs+9zAoZjFDND0Ay/utBUTdcz9JfyfZrNU5+SFSNJXFI5s0tHs42pa1hmnRahQBNxXxF2GdXGlJ+u7GHMvVExU5wmGfXxNU2yVc5qQ8ea2Rr0rm1rNq0ow8RH3IacaGxpKte+ev0QuesVzl9+mnjulymsuyDGzEQRrsw3azqk2M8oYWuhw4bKhO7vln3DNZ7ZhvtlS5+HBuQnw3Zn6tgrmBpU9q6NW+Reh800yRh2ebnfSLKtmQs9l/cKFd2KkgjRtfqASIjmJCRs7dS1m9rGRLW/eOon1Rirl8aYm+PUXFAAOD4KcSuno6lj2GARG3NKkwcSYHqJ/PtksZgkQcGnja2ZPrWtT4YObe9S1W6sqytvYumwF0C6AB28Lz1cNmaU4LSb2bOhaxvQ6QOBT6skxjHrQYGfp5YJTbcPRs9ouU27YPSZne/dHphfCWOY9oD8argutFshbMTwmESkzRRbdeHYmBjtNlScv02dPmbvt2m7LyuAy8y/x70KeJcz7Qbu1+N3Shjep4z3u2248BCV9sPlejURsxRPUIyWODACHDPC1vCJ4TgtF3rmQG+imuo11D1Dh6bLAwx+dqHp1BY6SzFV05Qs4w2OoecBM3Og7amOY1ybdYxans1SbPBOPfbxBDMUIIYq2YbpuKqpQW8CXVVXXfWlypoouEQhZ6LOfVYmT1X3LtUx57A952ZWyQhIyrONlbF5ksWBY6qukNHk0A2Hpx/PmrmX1szUPU5XSsRPfpgFeIJShukVTSKHoThANBhfCij4WRvPdO4ux4W2d2VbEz6lmLpq69748qXKSPnyfUJZWMtHNxPHq9bskzobu0XjW7BF6IlEWTSumW1nIdaSrIjsifrZmMwmHreoMsiejaGnWbNNaJfib/C6FH7/IZV2337CtNgFEm++oXVVdU7yU+1EcU9OBCMgPbzdOWXkQAGQCZCUvpylBBLobhwPw8ozzgFI5Slm94DXGEI006iGEPFDMaNrPwo8svAWiIQZxWKQ/AG8/QFESbCiyRyDOfVizBYkZJg2h9uJxZ14CTczuugj5RhIdSYJfMwtiZIgC3Eq81A4C5Q6zRnXsmmMZhrGVJTcNFd5y5byZWVTtbxlLZ+OZ9eGmS+pcF3LqkYPYKiZhndpmJ5u2Mqbczn3MhclxnLxWrPMK+NaUFSseYfPG/xtkh2QimdXR2tsaF/yowbJRyHxkxYtqx1zkn4nRaK2S3NKgiWWqv9nFMXpClFeOb9bnhyhFA771ZL4sne9jJNY+BW8erWFUcbTCDR0+29bJ05C8v+UdE5iBc+Z2DhAZiBGDMhyRZ8fmORHRFoSM0RinrZiRiK8s880y3RVwxRZy3SNCcxtDRL/nm/6HTNzWF28HtQ04yJLofnbSpRuaTe8MBQ9PPwMtfIgyRjDkYiq/DgqKP5VcvFnfNImPj4LFPyEfS9liLJNkD0iwrxFQr0FCUXMn4M3YNgvrRn7/In3u+pq//a46dbMFcC8sPKokNE1yDUB9UvgulETnuW9q7F67RTHZPoRVvkhRtSLeDXxVjRZoaVo9rxFiJbpxtDNTf4/9tzk73VLl74H+aVY6gJKTJcbW3PjlADhKIlr6T2hgAASg+HrFP8N3giFTv8FgqSRToCcVjpWwGA3xgD4629wcnYCfmkhf/UKzClG921RI4QMCZC5Etwnbdh7PZAnmuLYs4r7EOMVeLMtNUhi3HvUWmythufL3ds8NW7uONNwLbtaPa6evW/PyVESE5bQM54D6P7o6RZy0EbpQDlUySJAjlfzYAX7l6TprcNO5DfZr+hfdo5KdXglGsxayOAFb0fLo+Mee9swDjKUMxYFJpf7gKgSknkpU/ybNyR7xNdQDpbMVS7KSlnm8oOiQplkxfJ2UPHLOlUdpJ+ldwc4pBv9m/ZBoUMTozo9bwnOqgbme2CM5jg0k6DW2I3VSzj2TEuHzgFOCDmAHHOEfkd0wB5kfYN3X4w2NNoboF3Q37QqNRWOuSb7NclojMJqJb5uUrk4MXZYQtESj1Z89koZb862KYRCE/Q0S/HozfX266K36nx/ldBHRAM3cdZpmCxHa5zmJC/gYzFVlM7+K9c12BT/LmcUHHLJcYxHbj6kvDGkSTgNUdxoCoc2vLQs14a/zQwb6hyVt4mmVY3nYhJhNOMN81YNpZhlNK73Cr3TWZ5X3DF4+1EJ8IMSZ2EI/DDjo75M4kWyFX8fHB7vpmFeV9uFj1MUIyay7gTFZIFTphPayL4T1TSuoOPqhj3ankajgicfz6L7gFAgr3LVanxc0CMluRxN5MuGiDz/CAF3SYSVYdUuKmdc2hahiLJhg1GpZeGmGhVJ7cZm2MSqXcRsJF9s/mwDOpC8Bs974p/4pNgGVo2RLaw7Fz8DWZYHaEWK444L8PBmUCx8ejGQy01wkWNjysiC+IhhGWXsLqGEreUAMXQBbqWhptbv8G+lQiJ9wPSiUbrya54BADGKsGAtD41+081bic/lDD+xXIH870KBQptdFlBMC9uvZBREJBYEXcIyysdAuRS0S3FP4uAC5HttwIUIxdrgatKEMoXSRODXnFe5rOaUXdcVjPd43cpwA7/cSgMJfOxcaflpc8qh5a5TM5akPgoxVYMgiavQ0cYzYYU6cy1xJ2B7qq435or6yRHnTZUyQ6AKVA7wKkzWEY7Z2RpFYU9h6pV4WHUqjr3S519yVxn6x+fha7Gxhz03XNUF5OnzUtqjSLsgB/sHSMqvxL5FWprNGzY1r/e+Bbm4XayBVzeM3wJLlxvA5t3XUajVrlU1Q0viGPss2dqwqiYObEyo8blDs6E4WVfHzmj4ekVJzBZA+norFRsjuJV4+PxPeit9D8qn+b1m801569p82rxrbb5DPiMPWCdUKLmGcbBKSMxmNMzpyi/lw2RJ4rOI+DRJkwVL4pDE+MxPolvp+9vqvjS/jaCdKJF4L+KrPKCKlFZFrila3ZVXlKXSJcySvzx7JHGQPKZnMWYFRvp3OKkkHKKEn1Bcx7n48P79uwJsicIQ93ikeL9jSPRNGii30osEpL4E0Ps6j1pOUkWEoG/GXv0rhkdwftr8xKj8SOgerwFN0cX7859/LL4aypvm+sdD7378If94KMnYzhl98a2LT3h9yuOAk56tcJR/SnQIA3+Wc5RfF41mjuK4I01VxqMHQlmGwnIOtXYezLafaObWE6klJG/gl9HwdeGjo8x6PD9tC3Fou8cBbsx+LBeoCbm/wjUBDy9uXXKOrmsUB+nH52FnthPJtEvabnY2UYQ/Pm+l5qMwuFN3FYK2exTKPV7vgNzAL/0Ym6ogPplJQuKvt4qC+Nomv1A4ZHU5jrwSQK2LW/ddlUk/Piv1epAq27lBaXz8kiqt7m7XVBxgfQetq8H/BQAA//+oI6qBejYAAA== + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUz07jPBTF93kKq2LxfQsnbalQEfIChsBUVAwiRSxKFTnOJbHq2JHvTWlnmHcf9Q9oSosGhJfX53fOkWXd8Z3VNAnOAZXXNWlnxVWTgQEKvjmb6+XkRlIZzzUSiqhBHxmnpIkybaPpRhoE4wT8TCuYBLeAJD0JaZ7kAoPYzrR3tgJLF9qAiIBUlMOjbAy98kmjFCDGc00JSWpQdHqHQTwHlSy9bjyIVV4msWSRqymSPxsPkXKWpLbg8cUqxHIPV01z7RmvWTSTPjI6e03+iFZZ/V4XrlhLP7IxO/ivco0l9swKDzV7aL1NemixZ/akGDf/M26AtdmEnTAqwbJ17ArnPNM236m5Ozhhj7q1r/3GppJT4FhKD7tuQfAGxCWJC1RkGH9iFijU9awXkqpTD+Q1YFf0/w0p5yFEV8m5ctaKztFhv/eJqErOU1zYNJNqalzxUX6VWgGiLCBVDkn02p+kssYjiX77ow9jQRdluPnFYaFSKj1g2RG99vHR1zy6ot857n7N43DzdNsufG0DGcnMADJOzMrlXzEaaa9U12+ltqnAa7UDvbMY2EPANodzsEszjuBn4LdurMuBG5mBQdE6+HV1dxYP41F6/eM8ToenZ/Ew+d3aAmaiy7YHzjQV8No0hbY81369Z5Y1vAUCjNaKtQD/Yg9e0i6Gp5fJvovb+HKQjOLbVZ894/vB6Hs6Oh1cj5IgGA8skjRmEtxLS5CfLUTVGNK8QfAhSV8A/QkAAP//lvMgPnQFAAA= + + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + + + +- path: /etc/systemd/system/docker.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 --bip= + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/docker/daemon.json + permissions: "0644" + owner: root + content: | + { + "live-restore": true, + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "5" + }, + "data-root": "/mnt/containers" + } + + + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS=--address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroups-per-qos=true --client-ca-file=/etc/kubernetes/certs/ca.crt --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --enforce-node-allocatable=pods --event-qps=0 --eviction-hard=memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5% --feature-gates=RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y --image-gc-high-threshold=85 --image-gc-low-threshold=80 --kube-reserved=cpu=100m,memory=1638Mi --max-pods=110 --node-status-update-frequency=10s --pod-manifest-path=/etc/kubernetes/manifests --pod-max-pids=-1 --protect-kernel-defaults=true --read-only-port=10255 --resolv-conf=/etc/resolv.conf --rotate-certificates=true --streaming-connection-idle-timeout=4h0m0s --system-reserved=cpu=2,memory=1Gi --tls-cert-file=/etc/kubernetes/certs/kubeletserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-private-key-file=/etc/kubernetes/certs/kubeletserver.key + KUBELET_REGISTER_SCHEDULABLE=true + KUBELET_IMAGE=hyperkube-amd64:v1.15.7 + + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,node-role.kubernetes.io/agent=,kubernetes.io/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line16.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line16.sh new file mode 100644 index 00000000000..e51aeb3fea9 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line16.sh @@ -0,0 +1,160 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser +cleanUpContainerd + + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line23.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line23.sh new file mode 100644 index 00000000000..1f074afdfa1 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line23.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line30.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line30.sh new file mode 100644 index 00000000000..ce857cb431e --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line30.sh @@ -0,0 +1,337 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + +} + + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line43.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line43.sh new file mode 100644 index 00000000000..e708f006a14 --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line43.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line9.sh b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/AKSUbuntu1604+TempDisk/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/RawUbuntu/CSECommand b/pkg/agent/testdata/RawUbuntu/CSECommand new file mode 100644 index 00000000000..e9161e49c96 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/CSECommand @@ -0,0 +1 @@ +echo $(date),$(hostname); retrycmd_if_failure() { r=$1; w=$2; t=$3; shift && shift && shift; for i in $(seq 1 $r); do timeout $t ${@}; [ $? -eq 0 ] && break || if [ $i -eq $r ]; then return 1; else sleep $w; fi; done }; ERR_OUTBOUND_CONN_FAIL=50; retrycmd_if_failure 50 1 3 nc -vz mcr.microsoft.com 443 2>&1 || exit $ERR_OUTBOUND_CONN_FAIL; for i in $(seq 1 1200); do grep -Fq "EOF" /opt/azure/containers/provision.sh && break; if [ $i -eq 1200 ]; then exit 100; else sleep 1; fi; done; ADMINUSER=azureuser CONTAINERD_VERSION= MOBY_VERSION= TENANT_ID=tenantID KUBERNETES_VERSION=1.15.7 HYPERKUBE_URL=hyperkube-amd64:v1.15.7 APISERVER_PUBLIC_KEY= SUBSCRIPTION_ID=subID RESOURCE_GROUP=resourceGroupName LOCATION=southcentralus VM_TYPE=vmss SUBNET=subnet1 NETWORK_SECURITY_GROUP=aks-agentpool-36873793-nsg VIRTUAL_NETWORK=aks-vnet-07752737 VIRTUAL_NETWORK_RESOURCE_GROUP=MC_rg ROUTE_TABLE=aks-agentpool-36873793-routetable PRIMARY_AVAILABILITY_SET= PRIMARY_SCALE_SET=aks-agent2-36873793-vmss SERVICE_PRINCIPAL_CLIENT_ID=ClientID SERVICE_PRINCIPAL_CLIENT_SECRET='Secret' KUBELET_PRIVATE_KEY= NETWORK_PLUGIN= NETWORK_POLICY= VNET_CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/azure-cni/v1.1.3/binaries/azure-vnet-cni-linux-amd64-v1.1.3.tgz CNI_PLUGINS_URL=https://acs-mirror.azureedge.net/cni/cni-plugins-amd64-v0.7.6.tgz CLOUDPROVIDER_BACKOFF= CLOUDPROVIDER_BACKOFF_MODE= CLOUDPROVIDER_BACKOFF_RETRIES=0 CLOUDPROVIDER_BACKOFF_EXPONENT=0 CLOUDPROVIDER_BACKOFF_DURATION=0 CLOUDPROVIDER_BACKOFF_JITTER=0 CLOUDPROVIDER_RATELIMIT= CLOUDPROVIDER_RATELIMIT_QPS=0 CLOUDPROVIDER_RATELIMIT_QPS_WRITE=0 CLOUDPROVIDER_RATELIMIT_BUCKET=0 CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE=0 LOAD_BALANCER_DISABLE_OUTBOUND_SNAT= USE_MANAGED_IDENTITY_EXTENSION=false USE_INSTANCE_METADATA=false LOAD_BALANCER_SKU= EXCLUDE_MASTER_FROM_STANDARD_LB=true MAXIMUM_LOADBALANCER_RULE_COUNT=0 CONTAINER_RUNTIME= CONTAINERD_DOWNLOAD_URL_BASE=https://storage.googleapis.com/cri-containerd-release/ NETWORK_MODE= KUBE_BINARY_URL= USER_ASSIGNED_IDENTITY_ID=userAssignedID API_SERVER_NAME= IS_VHD=false GPU_NODE=false SGX_NODE=false AUDITD_ENABLED=false CONFIG_GPU_DRIVER_IF_NEEDED=true ENABLE_GPU_DEVICE_PLUGIN_IF_NEEDED=true /usr/bin/nohup /bin/bash -c "/bin/bash /opt/azure/containers/provision.sh >> /var/log/azure/cluster-provision.log 2>&1; systemctl --no-pager -l status kubelet 2>&1 | head -n 100" \ No newline at end of file diff --git a/pkg/agent/testdata/RawUbuntu/CustomData b/pkg/agent/testdata/RawUbuntu/CustomData new file mode 100644 index 00000000000..8ae2dbe2e66 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/CustomData @@ -0,0 +1,220 @@ +[base64(concat('#cloud-config + +write_files: +- path: /opt/azure/containers/provision_source.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9xZbVPbSBL+7l/R69UtkFvZlh1YEkq5FdbgqDCSSi+wXMJOCWlsqxCSI41JsoH/fjWjV7+QNdm9Ind8MupnerqffqanZf/4Q/c6jLvXXjZrtZBlYfvSdtDZ0Blj21EsB58o2lh+Cdw2HBuuijVdc7CjnSHDdeT93HKijRG+UJzh28pykFveGmMVXyhjTXd/U0ZId+RfcoOFxkix0QbAYQ5QTAdruu0o43Hl9FVu0h1zzST1alseemWRcovtGGaRn8qN9qU+VGtYP4epxvAUWWv++70ls2pc6GNDqZf3pSX7KbrcgFnegmXIcJV5sGRuFKBfVODMOL7ky8aaXSfYL6pwZuOROXpk64OGh7XcipqcHtrYcnVd00eVbdCrbWteB0XSp+4xYprRDQefGK6uyoMiVe1stGHZoF42Rk4z00GpNUN3FE1HuQfTbUQ7KNId6tq665dSxYVpGSpW0fEGUH8dZJ6OsKKqheAH0DwOasVYbj0suVRNyzhGxdNCnIbrHDMKWAp6btpvcKiYGraRdY6sJkDaCFB1G48N49Q1C1h/I0z5t2uhdXBJsuIomzVx0GsgVsV4IDWMq4I56K9USV33/kvhfejajnGGbaRYw7dYNc4UTbfzEA8LyMh0sWpp58iym1o4fLluXo3kcL+BQefaEGFz7I40fcnRQQ3S9BMDW8YZHhqW5ZoOUuXDQv726LdH93nVW8c0dngl1U1LVbRxzeOrRjtzTVVxUG0qFDO0EdPhuWZrho7toaWZDj9KFlLUy0aH6zU2YQ3ANUeWoqIGohGG6VqjQppSr6iXfWmzY2ohVilZ6hUSGWo2VmxbG+nYMgwHmxeyVHbNho13eRNZZ5rNIpWlsmWaCm9YQ8O85CBZkpqeTXN8iU3Fti8Mix+LE20kS9I+LB2xqusZhYAl6aA4hedv1Xzzur9IZUtktmNXG6sYWZZhyRLrhvmy/FzYjjI8xSPkYMU6w45xinRZKrv5KkRHzoVhnRZBupbi8ETL5r4Kt91jHTnYtNCJ9psssf7eMmxZ2M2SlIKYQpdQv/tCTElEvIzAPUy9jzewc+tRf7Yr9H6G7u+7morH2imSd/0kJUm2d6+p8m7nxd6e0P0ZvD34AvM0jCnQZDGfk3TXe9e/Au/d4GrvCMinkMLDzl7LPXZ1x8WGjXXlDMnt/P92y3qLxvVT9l+7NTQsZNj10/z/dqvo4nJ3kaXdKPG9iA8HN4tr4tOolRcot7LnQeLfkLRFPs1ZsvwAnssvpcOO1D/o9PpLBmQ7TbfxXRiEXks/11RNKSvPzhRju9/pdQatlYdSRxp0JFEq19T3g+XqTP9Ly3srrm33hBUoj1g67PRedfqi1Gp5c3rrpTcXyjiMF5+UKYnp7h58aQEAfPRCiidJir05xVHi32T8cUpo+tm/DXA4wRMvjBYpASanfejvgzenInMIggQfvYg59ZhTuL+H93x5OIF376AtSG2QZWjPkihow9XVEdAZiTmC/fGyChvnJ44h0YqfRfynnh4duDh2ErYeWq0NyVV8MFtIMlmQjnJusoiQuSz0j4CGtyRZUFkYHEE2CycUfvpp5UO+SZJCCGEMwm5GPoAEQuF07wiCpIq5cAdC9eHLrw/M03VKvJuay5JPEEIQyYfKG6ySwInwZwmgT8RfUBLA+7bw6/s2W8j2yI6WoCmhizQGqSYxysgSgqcOQk1DZZ2E/GOQxPmKP9n2YRPlOE5wRj2a/b9w/3cR2mBrSiimXnrtRVHFEvVS/DWmcjhnapFGsvCyrlBb+NJY/VAS3n6EugZ2hT4vBZH+MeEQtts21DW8fSN9ZbEOeuAv0gjESWaPQeAfkyqWv8444UL2rqOt2sIkjMjco7MG40dw50Vh4NEwibGXTjNZ2F+qwrYV2Mi+UO4Iwso2/30FlyUYbC5BGdnSGn92mwTwz0+PmLctUXVTMS9PLAyHzr07EsjdZE673h+LlHT9KFkEYhiHVGTYrMMRHDtNyRzEkw+sWKWfhzYIHMFoLqjqPaVwldOdH5FxstMoZFm352s6HEkCEENod1l43aC9UjAu3jrmN28KOpq1qaaIesCYhRGBySIjKXTvvLQbhdfdYH4zZUPSTf3Im9NuFGY0azz3PX9GuMVL/Vl4Rwrjm25A7rrxIoqg/+YnaYlkHuXOhRfSMJ7yypRjaTJhkwvw8HZay/QMmkpjSbA+sJgHHl2XmpQXnaFyBE4WdL6gcpfezlm04pRQMTd1ksWTLqZHxjGeWD5oquhYU3R8Yhm6g3RVjpM4jClJPZ+Gd3XlGcMgin4ST8Ipm95ED0RxkqQ+4Q8DMqmwRcggTkD8DGGc0WYb/QF2S0CeE+cc7oESAsIaC2z65zpH0P59990FunrdebF3v/uOoKs07bzYE9p7TO91e/I9usnP5l62Gfw3nZtCDPtbzjkrtJTzzmNzdS2sguJnGHyeVV9F2uymUOc309evjTm7urLXr+V2cy2f8UUxTsRihZgSP7m9JXGQMYU+97BWnv66RzxRMBURj+e4cXT/qqjmi3T6HO8xzyopnvSWgnp+4WynjiKnb9FAEGZMktPUC0opLN1aDNfEbLy5GEAsAMX9tYUUHpHB9hLYtvyP31bV9xLZLPk4S6J8lGvcX83M2OLVi2wDNU+7zqrbaZOndeF9Dd7aUpVLely7xLjiCr1tVtsSJ40rbKPOss8ZJbc+jXBKMuql9Gn9BrI7P/ZuSflS+q1v61UYEHjkNonFlESJF2yzoIgbhCKU76UdNLnNaDL/nyOWBf1dsxqE2bav9N8VsUXc3ye3eStgqTyN179KJiNGFHOSvh9C7kiahUmMp9VbIyUZhbawy3/bmMDOP7L38Q60hV/bcA/FzyfncA8z4gUgxiDt5d90C1K79dCq1YtipgElDuylrrvWj0Hq9WAfBj0Q8nwslP9+ZzuK49qy8K/ldZBRjy4yEKR8GJ17U5KCGMGb4s08mRZflgiSmGM7UTJtXE7LG4AYE+it0Vx87yWBnyyiAOKEwjUBHjEJ2q2NlSioDSfww9d/jqiTIXF+UqStts/RAVx/rl08HkuzGmp+JHk5Gq2aE1JHE/GbNQ5pxuaYBZ/32dRfThQfWEidjKR3oU/aa5wt3wdQZitITOaPEZrM501C17rfVm4KbOGH5/7jWzQ2kWUj46T1nwAAAP//FTwo004jAAA= + +- path: /opt/azure/containers/provision.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6xXe3PaOhb/35/ilDJ59NYYSG/abIfuUHAaNhSztsnd3d6Ox1jHoImRXEnO47b57juSeZiE3iad+o9MkI7O+Z3XT0fPnzlTypxpLOeW6/vR6WDoRn90w95ZFA4+ut4k7ByDJVGBfWPRFD6BnYLDc+XEfxUCnYQzFVOGQjq54FdUUs4aCV/kGSqEz29BzZFZYD5M5hxq3UxgTG5BxAwUB1kkCUoJeEMVZbNGo1Fbid9QBU0rpZZlTtYPSKzw8GX9YM6lYvECD1+CVLFQSSEVX8hE0Fy9e2fQLSwr5QIoUAb1A4lfoAVHx83m4Vsg3BgonZE/ciaSvBAJNuT8njcAM4E52KdfYP/5mTscu37geqf7T1C4twdTgfGl0ZjSDaw6BRu/GMAPrJqw1HfnyghhJnEtLTPEHForC4QztCQSsCnUnApsh9QeD9wq/3vCAes6pipKuYhSmmHpWOuH5ymTKs4yqWP17dvfe/5ISBWVPwkq4Syls1+JaaPRMp32243lhr1+NHZdP+q5ftipH5Qd8HV7fXA66HVDN7iDb5AUCmyy/2lfN2h7s/DZLLQ2Cy/NQv3goP515PXdaDDqu/+5+611eHhYMXvu/neX1bE/uOiGrt7+dVaX7GLo5RPUvQA6Haj3PN/1gsgLolH3owufq31QMklvHrMZZTMgmMZFpuCymGKiMphSBhlPYkU5K9nkfPLe7YXDjknFUswwy5rSrmLhiII5AqecK1vgl4IKJFvt57vvPS/03X9PBr7b7yhRoLXutnubaaw3tIkyvYXALllQNpEorCTDmE3y3qoaiLXyvlb/+mE8iXSM7mrwrAM1baW27f7y+IfxpC/oFQpp7Fyc9aOh9yEwlTjuhmedSuFdzYm9rP01Pa+drz84Cg+jTVBhopDAjGcEGdBFPEPIBa70lpHGm5wLpbUKVOI2WZCIplEa06wQeE/ifhgGWqWsCum8+SM3dIPowvWDgTeqIDLcr+8QoweKHNbtVaKTJSR9uYGdfMcc7Bmh08lwGA1GQdgdDqN7WVzneJ2jQRBdnPXvatABnZ/t9GwQTqQuz4uzPhAqleAwLRQYstkRcsYVpLxgpLaD67W4YZiRF0an3mTUr14au8Gb+lwV+aaxJu8no3BSaSx9D2mvdmrRLq5qsOLjMud9zOUmPKXTHyr18RbkJc3zsklzZARZQlGuji879BEYq+XIpG6mglDV10eXutZp9Qum6AJ1S5U7I1TXXFyOs2JG2Xr1vJhihqrLyPmSDyoYnv2IgAwGf9wrO1xgrFCr+RgzmqJUfSosqxR6iGtNCOdvZOVXbzSwrNWpJbrlr3/xQrC4RLg7TdXUPDqS+tuu2DYhrQRbr+3m6xO0XzWPEnt69Hvbjlsn7RZiu/kaEd6BI2+lMy2kc7XQf0nJQs78KioUzZyCTSkjmxlkOW+0juifv9zKn6wGDqrEEUlDc3626ouUWmJhqF3vxpoJc9XQ0W4Q5+QkR0E5ockTCi/OVTRDFeWFmCG0m3DUhFa7CXEeJ3Ns2xqVphNdEtZFdzjod8OBN4pc3+80re54EAWuf+H6UX8URL4b+gM36LSbKwQVAWO704EXjVzQq1hhRtll40UVznfVmYnZd4NO/WAHA0P96+6Td3pABiYzzi+LfFtMw7k7tIIw6NT/uYIbhKZNmtsxWm76rgnni1rr+E3j+KjRap80Wse1F/CwAO8FyrDd+Zsgqtjv/m/iuwbs0PPOJ+PotDsYPpx1H6Fpl46UbhisItrzRqN1WH9vVr17WqbK73uaW81mlcl/mLLq4WXOErCv/tqRMXj16kjPqI+Ii9FqImKK1/DM9kBzfx7YLwclWA1KL6Fc0FTPOEH98GrBgrJC4b45tn5o6ru4JueFIvyagS2gBXu1n6CuOFeLWFz+0R1SVtx0Z8gUFGzOM7K806uZfZpqp5DCyeh0zRvyVipckAaJaXa7VP9YDOsXbK1nHqpQvlQhpYzKOZKGLg6QKK5Q6BGGYaLvRUjmmFxCwgn+owb17Sz+zZMYGdn5IF5cEirAzne/R/QQoHiRzB/9urdyCXFxk15fa67eHFpL2rlsZHwGe5ZVDjL3XLCeu96p9f8AAAD//xsNyOSCEAAA + +- path: /opt/azure/containers/provision_installs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/9waaXPaSPY7v+JFVm2uaQmcxJtNhuxikD2UMbg4MplNpVSN1IgeJLWm1XJMCP99q3ULhI9kNrs1qQqG7tev331JR4/0OfX1OQ6XjUa3a06M8ft+1zD7Q3N6edXWWSB0/CXiRLeYLzD1CQ91y0IBZzdrLST8mlpEo358dtS9MKb3PcqsFRHxyWHf7I6GZ/1zs9cftxWdCEu3fKr7RGi2Eu+f9ofppsQqN+fUT7Z6o1+Hg1GnN9kBsNln32XYDpVGdzScdvpDY9yrh87Is8uHLl5PaqFX0ZxwnwgSlqFnp7PhdGaOjYHRmRht9Ykbzk1OXIJDAogDCp82Gpx47Jpcsvn6yVPYNAAAcCCQQwQEEXcIoDV4bL5GxHeoT5Lvlksb2+xsNyf1DgwFT/Kw5RLsR0FxuueeUZeEORLuAeILUA9JSiKhfiiw6/ZIUDpHBF9bnm3ShbnA1I04MX1mhgKLEFrHTXgFx6/AirgLaBFOBrAUIgjf6HqArRV2SKh51OIsZAuhWcyTqlhQR4/mkS8iXd1UxbrNj6H8mDQnW7PJHN6BLrzgVpCvX4HcUAGqMR6blxPzajzqmT3jNGfWnPYvjdFseog5OJEstZpgBysHEP2OK68uzs1Or2eedfqDTJEe5qtfOwPqRzcdh/gClsy1s03TIcKMAhsLUsXauZqas6teZ2pUyM+O2DSU5xyO7ZqDvf5Enj4fd3rV4wvGYxQpc0B9wAG2luQYRYK6YWx4gmM/DBgXKFYszF02X0Qhabe0ltYCCyOLcEEX1MKChGCRYIks5nnMB8vhLAqQSwUBiy7CFKvFfF9wbK3Akp8unSMe+YJ6BMhc4LlLQiBiKRhzQd4EDhXguFEoCF+E0ltiuQkWAF3Enz4VKFyHgnhoSdyA8BAoi3cCziJBjoEGIRFAgxT973+AS+cB9lDw+Y8Iu1Ss44XsB5KXh+CxyBfgy0sTjgLqfIGQWVhAuA6lD4DkhMS3gGQPuVK1cPMlZfYLDd6CzWJ5A9AFPMq1lnobvGhCC06aTVBLyngLYkn89BjA7yziPnYt4QJCPkMBdggHFFXO5NBV/feHk2lnMKioHmBB4y8285NzdAEfP4KibjqzXn/aM41h53Rg9LYKtNsgeETg06cKUbfzgiObCvu+XCTQ30L/gpYC1/nVrMfpNeFF+PJWNuWAAlDPr2Zmz5hMpT9/X2Tzr6lNseZQsYzmGmXpArJlwuO6EzgrsoZ31St1HIjsYOBUvTSGG/ffG+NJLbefMRXmgnFTittl1io8GL1yuqXvSjKwbf8vCPlTJJkkif0cUYHSXBqKPWnXgGSc/ghWsbibnncQ10E4EHrIIm6RMF7X7O8nvppLDhrLC0nui9hd0xIj82Mk46Ef3aAlwTbhIVKfRD72ZKHzFBzLAg+vCNgrL3yYAR2k4qSZ2cdgkptHFGpZAaalpiJLCEFCF+uJfN/rw/f9Xr+D4pSKbl6fmCcvkbpJNrcaj3xADNIFYzItrCeJFAXswzgRXtCjvL0fVuK4+CSNUjLrICIpCGhAJMfphmXLWJsg2Srp4v10lIkEKmZy3FbUTSILsyfL9LEpae+PhtvnuxuT2dlZ/8NWeRjHTyvR/P7nKiF6cv5hN0QTa8lA6Sf71Hdgcv4BEu0kgnGZhV1IuYlX0u9t9YnDSQCywhn3T7MAkfjVM5QW51/BigSgBRwDskFpK09jHJas29UUE9CEL6X1Wmu+TCHkv8n5h5Q5czYetJXMNHO7bLY0xh2d+oK4KHRudNvCAWppx3rsQPHPLD3K9iiJaRPCrwmPL9ND58aMYc2bk5dmwrjZ0lrHptVqNZutYy3uhDKK3r5NKT35kZSefAelz0pUJsp+T3hImV8Sfwg+ExBGgSw1ia1Ua4HWLk4SYqtRMo6rTveic25M2kocmGSEksFJeWj63Pc2tM6DoprdAlXHKSS/7wAlGguwRlVfbfXJHIckjq9qVY1PS+dHRtyj1nfdjOyUO5sEfNu4u1QoJWZ1U71/mwTPFJde3t7eXwixuy09ZgN+fnMIXQz14Ksm0854mnRYRZDJu+Bx0lfkoSYrcvMm2BzPhpLKuMpVkkiq7Fa6kFmA7OsbpeKzdGWl5e/OxmNjODWLMJVgtgGh69T0v0IcupRevAPpsgJptLJB+UmRQatVWoF45UVp5XkC87TKXfX6hDd1czk6/S3PCTVMJq6ZUaqWwaV/YpcTbOfeQOyfIFzRIJARW3Kf56XE6YgbFg1JMRbJlxwi5O+rlZMvxTd2B/32Dq2FGrIeJYNMuhPlhdbUXtZwVEEaQ70oAkuqw3K5lDn6cTNuZI6b5VnNLlXP8uFNu0TQM0AIuy77jKQ44o58p0SKQQ/nyUajkMy3DGG+d/zCmZ2Xp7Jw3Rl4VIrRgh/Zpw36k+mdxV4rna5YwWH0h8ri77j3G6W1IuuwIFHDoSX9NnAAIZtg7jG+J6b9pupyYp5fnZsXxm/3H0IdFFOMPheQ4FEoiC0X9+Vzx7UPnDcVoW5IxGfGV1du5FB/L7Ruhsb019H4wrwazM77MtC0QYnT1eG42pHb3WG/HFuzUJ6u5uPL3Xkw/E2SlsWe7rBf0/rvHUqi9LBvTs//HY+y1Y38lZA8kYnv6OiZvoUjmIUE5jhcwjyirqA+HB2BYGlAAxksrCXmITyRVQ5EgdwUSwIL6mMXFF2pqlgKXGA+lwcTS1T2qdMTalLatgrsUbeTEMsYygrLpJLJ9xtF835oTM3/b/nUkfhAIdUM3mtm5UltrW4ObJmnMopanJbG86gCnaUPLemvsWefvNQE5przRdnVzaFR/Q5tJSOupypW1j0EfeA+vYK4Ypb19+1KvgZqP6yU7fMHumYStx7JMuo+prYXwkqRpxy+qh6WPtxKBgeYA7r5cr/rUHcfgbVkn31AY+CMiTfyow5GVttoDH9/9aq6Wwh8Lyr8aK//s0Rflz6q8i8ePt4qwT0wKcRcgju7/yUNF/rpe0UBSD3HXFCXBFgs23oUcj3uCeMnu9Rz9n2b3BArip92pO6tllFAPh3AVog8yjnjWpykie0QzSdCYpX/USlMoeum9ko7UcDdKWn7l+e1nk1uBMeW+GUdEL6K5kUf1h30zeloNGirSWcfs6XoS+YRfZlBo/zhq65uLmanxnhoTI1J0cIkR6NSv9f3sENAzdCDuvnltytjLA/HcanaJqVQd/R+uZ4VVZJZNBBH0F0SayXxfSZgYR9SfkFS7xIB2Lfj75ZwwaZc/l3DgjMPciYfhzCnPuZrWDDXJrzc6yQkQTzCRLIAQsQXfB0w6gtQFEDXEJP0Jv7cYxby5/6ALFBkKVk1nE1K508pjdsEnVLTRXnXKft6ekYBZQdbuoFu0VU9Nku49dgs4d4HGyci4qU2ttx4JmFiR4wPFluuLdgxgTTYVK6kngORLxsKQCwzmnpLXNBGySLV0URaotodjY3RxBxNzGHn0tg3SCszxYIuKb/szQWpnMa+pGthpegLvPmEpowLysD73B4g5ptM4wCxDzaMysD5Irm749sXycmdjiVJPw+6pKaP+ca5UizOnSCZgx+26CJB3IGJes6esWYJ8vqBajoErtyG7hYhHgBXdqaF9ZfWLWf2mbaKB5h7duCo3DiUf5Iecz/THMxn6XOW/mXnPHb3tnp8sNU/aUJL5uhmKW/Jq0DdxXKgqo7T79WsNE3K3geaBVV6i2cui8i3BGU+pHC5zSRw46ho7JOYJldNwUxbCpHkE810BxBaMO5hAY83G21MAhZSwfh6u32z2WhT7Gy3j7OJJ7o2QKm1CfVr7bL2sYn+8en5gV1Uv2wq2X2Pi1xbPIZIJupSmKbFbNJW/7njy2q+B4/a0DzkulIZOWTJYfNpyK7ktorEpxyIBVmq8ijsH/34r0/bXWfe1mpTap0z1yX8EvvYybT/l9Kq5bLIjrvshFPkJaz+ZZVMbgLG4+eYB322Hu6wNdwxfjxuQlYG3X7lQ1DcQk0Rtmpe5JFRfbwo3qrIFxcPeI2iLjLae1fc9ookuyacU5ukQ9BuPE4vD4xki3jWHxhXnekv2Zuu0lSTT81aOJqtv26acbdl+kRalcQhd5LcJ1hkLZPhShlZYgUWFvDzz2CMzuDduzogGwucyOFNfCBujt+UHnUE7lpeK6lPr34DCyxLDGN01tg2juSf/wQAAP//M+NLdjUsAAA= + +- path: /opt/azure/containers/provision_configs.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/7xae3OjRrb/X5+il+iOx7XBeB5JJt5oqjC0vcQSKIAmMzdOUS1oyR3zUJrGtnbs736rm4dAAiTV3Nr8oXHgnN95dJ9XN9/9Q5mTWJmj9G5gWjr0DFOHn0fD13dJymIUYfAMGCIhkH3w9jQnMdUJrFGcDsgC/PEHGFoOGI3AULNsaDme5QhC8Oef/wLsDscDAACY2sYn1YWeMR0NX5MVQCC9Sx4BZnfn4BksKV4BeZqAExJjBm5v/rgNzv7858npAIcp3gWodJQN8Az8jAE5OAEnQF68OR0syAC6mu5NIbS9mT0eSXeMrdILRRl+3aC8XLx99+Fcykm1sQFNt5/4p5+lwcBP4gVZZhSrQUTiWYrp69OvQj//Di0xkCGQ33Ct+G8EzoE8AT/z/4A0/KrqE8OcOdB+keos4fa7l5ocB/sUs7SUok4NB9qfoO2Vyt3AL95Udf89khTMfOU+m2MaY4ZTxceUpQpakRTTB0zP7vE6l8uSzL8TQjvRKg2jJADnP56fH0iePMaAJgm74D97eQSTph5si49ajGjhb9W+j25H7XZiQS02zJGLgJkfdK5CH2CrJQcw5IEp/0fYYjkTy/FmtvEiNYOyNJ2rd8F/DkRfkJoritg5xhV+SHDMulzRAdjtin0MO6u7h2tjm0ghx1i2wpgOv27y6UuXka3I3Sb2kX/rWvdhN1a62BQatF3jytA4+YGb3qesZ9NvA2654f37gxnaV7qba2cXH2NbsYs7bOsA7LZtH0PvLu61TSzwMZbt7uIOI1uRu03sJ283sItHMKWYgX8+iT+xf5d0FpwXCTyDOUrxj++BLAfYTwIMPh5U00rcZkXoBuwuMyVSR4LthjwgIzewdzPbHuz+BNrAFsuxH7Evp7T5obbAh/mhYxe1+eFw7P4Q3PUDJzzEES2AL4MBjtOMYnuqvT4FeXeXrlOGI5+FMEbzEKtx4DBEGaArf07iADw/A/xEGBhC2/acL44LJ5o79hxXtV3vSjXG+1DklCF2CE6lnpoFhOmFhnml4VEz0w1X96CpXo6h/iLx9p/RDDdLTpciiGMeZk3V/pMFQCsGQpIyIMskThkKQxyUs8NJjnnSKHhoxbwlZt4qo0sM3p6Dd+fgzdvzUv6rssKJn3rTfZPNcYiZI4qXhimrVuhmdgnH0D22/7vPAbdbwC00vk2OgBE5WeAkKxynaQiWOKYpAnKSMTDcoyp4e/7+Q4Ob4r+BHONHID/9cP4zkAO0TsFP787PgXyP1/sBW8VWNgE5zeZ/AUnRzFFRW/iUuDXu3HxId3x98HTQ1VV2AbW2W3uJd8pUD8f2zDa7HBvaESPbbtntBGutuvup+wa2HZbcnP+d2dD71bHMDiPQfzKKz/5Kk3hb9yZn+4zZQbOr5w5hV0fQsj7dSXvP8rd0GZWXDmoyWpdB/HAKQ4NcsqkZU3VcliMHajZ0R8OveygU5fZWub29vX35f8OTOJ6U4/mIgV9+AdC6KozaWYA8bCU/TLJAugCSyjfCNJuHxNfEs+9zAoZjFDND0Ay/utBUTdcz9JfyfZrNU5+SFSNJXFI5s0tHs42pa1hmnRahQBNxXxF2GdXGlJ+u7GHMvVExU5wmGfXxNU2yVc5qQ8ea2Rr0rm1rNq0ow8RH3IacaGxpKte+ev0QuesVzl9+mnjulymsuyDGzEQRrsw3azqk2M8oYWuhw4bKhO7vln3DNZ7ZhvtlS5+HBuQnw3Zn6tgrmBpU9q6NW+Reh800yRh2ebnfSLKtmQs9l/cKFd2KkgjRtfqASIjmJCRs7dS1m9rGRLW/eOon1Rirl8aYm+PUXFAAOD4KcSuno6lj2GARG3NKkwcSYHqJ/PtksZgkQcGnja2ZPrWtT4YObe9S1W6sqytvYumwF0C6AB28Lz1cNmaU4LSb2bOhaxvQ6QOBT6skxjHrQYGfp5YJTbcPRs9ouU27YPSZne/dHphfCWOY9oD8argutFshbMTwmESkzRRbdeHYmBjtNlScv02dPmbvt2m7LyuAy8y/x70KeJcz7Qbu1+N3Shjep4z3u2248BCV9sPlejURsxRPUIyWODACHDPC1vCJ4TgtF3rmQG+imuo11D1Dh6bLAwx+dqHp1BY6SzFV05Qs4w2OoecBM3Og7amOY1ybdYxans1SbPBOPfbxBDMUIIYq2YbpuKqpQW8CXVVXXfWlypoouEQhZ6LOfVYmT1X3LtUx57A952ZWyQhIyrONlbF5ksWBY6qukNHk0A2Hpx/PmrmX1szUPU5XSsRPfpgFeIJShukVTSKHoThANBhfCij4WRvPdO4ux4W2d2VbEz6lmLpq69748qXKSPnyfUJZWMtHNxPHq9bskzobu0XjW7BF6IlEWTSumW1nIdaSrIjsifrZmMwmHreoMsiejaGnWbNNaJfib/C6FH7/IZV2337CtNgFEm++oXVVdU7yU+1EcU9OBCMgPbzdOWXkQAGQCZCUvpylBBLobhwPw8ozzgFI5Slm94DXGEI006iGEPFDMaNrPwo8svAWiIQZxWKQ/AG8/QFESbCiyRyDOfVizBYkZJg2h9uJxZ14CTczuugj5RhIdSYJfMwtiZIgC3Eq81A4C5Q6zRnXsmmMZhrGVJTcNFd5y5byZWVTtbxlLZ+OZ9eGmS+pcF3LqkYPYKiZhndpmJ5u2Mqbczn3MhclxnLxWrPMK+NaUFSseYfPG/xtkh2QimdXR2tsaF/yowbJRyHxkxYtqx1zkn4nRaK2S3NKgiWWqv9nFMXpClFeOb9bnhyhFA771ZL4sne9jJNY+BW8erWFUcbTCDR0+29bJ05C8v+UdE5iBc+Z2DhAZiBGDMhyRZ8fmORHRFoSM0RinrZiRiK8s880y3RVwxRZy3SNCcxtDRL/nm/6HTNzWF28HtQ04yJLofnbSpRuaTe8MBQ9PPwMtfIgyRjDkYiq/DgqKP5VcvFnfNImPj4LFPyEfS9liLJNkD0iwrxFQr0FCUXMn4M3YNgvrRn7/In3u+pq//a46dbMFcC8sPKokNE1yDUB9UvgulETnuW9q7F67RTHZPoRVvkhRtSLeDXxVjRZoaVo9rxFiJbpxtDNTf4/9tzk73VLl74H+aVY6gJKTJcbW3PjlADhKIlr6T2hgAASg+HrFP8N3giFTv8FgqSRToCcVjpWwGA3xgD4629wcnYCfmkhf/UKzClG921RI4QMCZC5Etwnbdh7PZAnmuLYs4r7EOMVeLMtNUhi3HvUWmythufL3ds8NW7uONNwLbtaPa6evW/PyVESE5bQM54D6P7o6RZy0EbpQDlUySJAjlfzYAX7l6TprcNO5DfZr+hfdo5KdXglGsxayOAFb0fLo+Mee9swDjKUMxYFJpf7gKgSknkpU/ybNyR7xNdQDpbMVS7KSlnm8oOiQplkxfJ2UPHLOlUdpJ+ldwc4pBv9m/ZBoUMTozo9bwnOqgbme2CM5jg0k6DW2I3VSzj2TEuHzgFOCDmAHHOEfkd0wB5kfYN3X4w2NNoboF3Q37QqNRWOuSb7NclojMJqJb5uUrk4MXZYQtESj1Z89koZb862KYRCE/Q0S/HozfX266K36nx/ldBHRAM3cdZpmCxHa5zmJC/gYzFVlM7+K9c12BT/LmcUHHLJcYxHbj6kvDGkSTgNUdxoCoc2vLQs14a/zQwb6hyVt4mmVY3nYhJhNOMN81YNpZhlNK73Cr3TWZ5X3DF4+1EJ8IMSZ2EI/DDjo75M4kWyFX8fHB7vpmFeV9uFj1MUIyay7gTFZIFTphPayL4T1TSuoOPqhj3ankajgicfz6L7gFAgr3LVanxc0CMluRxN5MuGiDz/CAF3SYSVYdUuKmdc2hahiLJhg1GpZeGmGhVJ7cZm2MSqXcRsJF9s/mwDOpC8Bs974p/4pNgGVo2RLaw7Fz8DWZYHaEWK444L8PBmUCx8ejGQy01wkWNjysiC+IhhGWXsLqGEreUAMXQBbqWhptbv8G+lQiJ9wPSiUbrya54BADGKsGAtD41+081bic/lDD+xXIH870KBQptdFlBMC9uvZBREJBYEXcIyysdAuRS0S3FP4uAC5HttwIUIxdrgatKEMoXSRODXnFe5rOaUXdcVjPd43cpwA7/cSgMJfOxcaflpc8qh5a5TM5akPgoxVYMgiavQ0cYzYYU6cy1xJ2B7qq435or6yRHnTZUyQ6AKVA7wKkzWEY7Z2RpFYU9h6pV4WHUqjr3S519yVxn6x+fha7Gxhz03XNUF5OnzUtqjSLsgB/sHSMqvxL5FWprNGzY1r/e+Bbm4XayBVzeM3wJLlxvA5t3XUajVrlU1Q0viGPss2dqwqiYObEyo8blDs6E4WVfHzmj4ekVJzBZA+norFRsjuJV4+PxPeit9D8qn+b1m801569p82rxrbb5DPiMPWCdUKLmGcbBKSMxmNMzpyi/lw2RJ4rOI+DRJkwVL4pDE+MxPolvp+9vqvjS/jaCdKJF4L+KrPKCKlFZFrila3ZVXlKXSJcySvzx7JHGQPKZnMWYFRvp3OKkkHKKEn1Bcx7n48P79uwJsicIQ93ikeL9jSPRNGii30osEpL4E0Ps6j1pOUkWEoG/GXv0rhkdwftr8xKj8SOgerwFN0cX7859/LL4aypvm+sdD7378If94KMnYzhl98a2LT3h9yuOAk56tcJR/SnQIA3+Wc5RfF41mjuK4I01VxqMHQlmGwnIOtXYezLafaObWE6klJG/gl9HwdeGjo8x6PD9tC3Fou8cBbsx+LBeoCbm/wjUBDy9uXXKOrmsUB+nH52FnthPJtEvabnY2UYQ/Pm+l5qMwuFN3FYK2exTKPV7vgNzAL/0Ym6ogPplJQuKvt4qC+Nomv1A4ZHU5jrwSQK2LW/ddlUk/Piv1epAq27lBaXz8kiqt7m7XVBxgfQetq8H/BQAA//+oI6qBejYAAA== + + +- path: /opt/azure/containers/provision_cis.sh + permissions: "0744" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6yWbW/iRhDH3/tTzHHWOWnjGOnSnHoRJyEgF0t5QDhqempatHjH9ir27mp3zUMD370yBmNSIoUcvECeh/3t3zNjez9+8EaMeyOiE8siWrOYD4Qw/YejY3i2AFgEsUIJzj9KCPP1rw+//P3VAQ9N6OmEUDG5AJMgtwAAgvb1fcs+EhK51ikowim4I6Lx/Ax+Oy5Tep1B79Wkz1/KrM5Nt9VgmRTKQKhm0pxAjEYSrU9ATugFSMX4KnS6/D9y7BLtnIDzaJ8/2nah5tF2jhtL5FU7uGrZR3JmEsHBDaFhd266jWNrGcUwEeAs79Cxi1SYg5dr5emiOGFSbD2h4CLM54BTZsDuDQbDjh8M20Hgf78dXvrXvWG/N7jxg8C/u7UAImYt1iW9ZCn2UWVMaya4XtUWoFgVtEqFJDfJaSri0kgNKk4MG6OunGEqcuoyzswOlytyI/NNhBLMBN+Y8imujCdUm0hKtFlfTwiJkW8geqbXlzknxiCnSN1cxopQ1N4O3+me+e6WMPJvrtAdczQukyTb5TeYYoZGzV4EQ84ywZkRaseqzZ2PXarYGDdJo1SMolzjq/4oxelYpC7j2pA0raWkhFMdEomenmnGI1FFyo5GQi07DIyD/bzs9eICqFgG1+3vt++vWg1vTJSXitgr8xaNKqfrD1r2EWWKkwyhUcaLRYvGcZWUPVGmwJVgP3f9wWKPKS1/RuRhAjX23oQwyQSF87PmuylUcCzLxjiFdUHANTOJEIErUWXgeOJX5YCLUwxXezqxO5meCFdNpg48L+DxwqoJajbLl1X5BLt7KXqBKN93P4WIlcjluwhnZyWBYkTy1Hixykd7cYphjFiKkpikGMglLVSCGzLaGKeJyFU6qzkoYVv2BPFpy5EJbpLtJbUhL+U3iwrY1fZ7j8XCsjSa/kNvKpkihgm+eoVqpOAyaOh5vx0Ew5v2n8Nu+0cwn8eNUlAqYsZPKUZ6x679/vWPYbHw4W7QHXbubi/97xasPnhbQOd/tE+f3kh7KdG/PbDEFfAgEv3bdufe/6PX2shbT1yuURFK99VYEZ3duDeLLL/RW12B35sOfPt20E4vme8G1kWu+gJfDqexhvxJiVVbPtdKeKhWf26+0uw3Aotzk5TprOMH1XO+9fRbAPWzamW+OGfVQZb1sXd3af0XAAD//0UaPRLwCgAA + + + + + + + + +- path: /etc/systemd/system/kubelet.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6SUz07jPBTF93kKq2LxfQsnbalQEfIChsBUVAwiRSxKFTnOJbHq2JHvTWlnmHcf9Q9oSosGhJfX53fOkWXd8Z3VNAnOAZXXNWlnxVWTgQEKvjmb6+XkRlIZzzUSiqhBHxmnpIkybaPpRhoE4wT8TCuYBLeAJD0JaZ7kAoPYzrR3tgJLF9qAiIBUlMOjbAy98kmjFCDGc00JSWpQdHqHQTwHlSy9bjyIVV4msWSRqymSPxsPkXKWpLbg8cUqxHIPV01z7RmvWTSTPjI6e03+iFZZ/V4XrlhLP7IxO/ivco0l9swKDzV7aL1NemixZ/akGDf/M26AtdmEnTAqwbJ17ArnPNM236m5Ozhhj7q1r/3GppJT4FhKD7tuQfAGxCWJC1RkGH9iFijU9awXkqpTD+Q1YFf0/w0p5yFEV8m5ctaKztFhv/eJqErOU1zYNJNqalzxUX6VWgGiLCBVDkn02p+kssYjiX77ow9jQRdluPnFYaFSKj1g2RG99vHR1zy6ot857n7N43DzdNsufG0DGcnMADJOzMrlXzEaaa9U12+ltqnAa7UDvbMY2EPANodzsEszjuBn4LdurMuBG5mBQdE6+HV1dxYP41F6/eM8ToenZ/Ew+d3aAmaiy7YHzjQV8No0hbY81369Z5Y1vAUCjNaKtQD/Yg9e0i6Gp5fJvovb+HKQjOLbVZ894/vB6Hs6Oh1cj5IgGA8skjRmEtxLS5CfLUTVGNK8QfAhSV8A/QkAAP//lvMgPnQFAAA= + + + +- path: /usr/local/bin/health-monitor.sh + + permissions: "0544" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/6RVXW/bNhR91684VoTBaSErMtAOSKcCXWasQReniJPuoS0MmrqyCEukQFJpMsP/fZBEf6h2CwzziyXxfp1zz708G0S10dFCyIjkIxbM5J53hvtcGBiuRWWhtFgKySylYBa5tZW5jKKlsHm9GHFVRqt6QVqSJXP4uCjUIiqZsaQjXtTt/5JTtOQiyokVNg9LJYVVemRy7wxMpsiZwYJIolSpyASlyJQGW5mQ5FJIGnmeIYtQQapaGrLb10pUlDFReB5X0jIhSc91La0oae6yCLkcnmPtAYXirECoUbKnObOWysqa5NXuxH1K4kNbrgW3ReIH6w8Pv0/m729vJpuWtu7A79ke1SBZSY3v1e30/t31dHI3v3uY3l/fTObTdzeTyzBVfEV6s4/SMcRz4qs5V2XJZJr4nRUq09iJDJ8/41TMfTgMEjgvH1+/voHNSXoATscP1h2YDSqVtkky4XlAA6JAA0TVFq8vEKxP+G/wFlFKj5Gsi+INUtUmEhmGwy2lSJIe6Tg/P6gJIJ4r+DfsaecQrA/tN9DEeE7pAB+14kSpkEtYBdfjPfNwzDugQpIxI9+lWWhiq/Y5E94+b7BNKqSwYi8EfPFPA/7iD3Cvn5sa2JIJCSGxC2KIK5ma0cilNQVRBT8YDjHGixfb4C9f4vy8sUiVJA/4louCYHVNhxQO/iv7R6ReHTETrE/rdINmkigdbOnaKu20ud809bTIul+1EkWBcHb958PsLkZnmToD1wDAPBtLJbcFOvOw+Qu/5SopG2b9H1a7rbLjNx5fdB0tDPUO/GA9+2sy+TifTa5up3/MnF+bv6V+43nN9irIHm+MjsK/mbDtQhqjFLK2ZNo359XIcEHIasmtUJK1K+GwqN7acepI4v2Jqm1V28T3f6ICZxMMea0LhGUD6yDcxkeYITQIZ+2ivoyiePzr6GJ0MYov44vxq1du9f6D8dtf4hPDF3QZetL54AAKg1p2/s87cXzXN0dGj/rX/6MlW+2d+QglIT4UWFfdg2FLusTRlYLfdoIJnWAiV93bJhE9CYvYazbcbqMnfnsbtv1olrvfHU2mnxI/IsujlDJWF7YNFJJ89F2BCAnbq2Ey/bTpDYJRtebfnbd5e+gbKXBVVkqStEkQex28mWXaYn+vOqDYa7QVYTMeznfj72k7/NrO6REnvUp/dnl6VPwwquO1F+t4mDyngA7Y+5M4dpF7iBrpSWVh6qpS2ra7KRPevwEAAP//kxgmoL0IAAA= + +- path: /etc/systemd/system/kubelet-monitor.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/zSOQWrDMBBF9zrFXKBRewAtCs0FYroKWUzkbzRYlopm3Nq3L1jx7g3M+7z7dxF7uC9obPJjUktg6kyW2CgmxFlpXp/IMErgbIm4jNSgxs2UZKICjBjd52Ro4fV7UbRfiXD3ocPD3boSOP/xruc5IIaPd3fDwlKOiesmFnaou26Iw6H4VZvPNXL2Tym+d7wttYjVdtF0Fv4HAAD//zPpD7jRAAAA + +- path: /etc/systemd/system/docker-monitor.timer + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/xTMsYrCQBAG4H6fYuDqHAdXpwkqWKVQsQgpxmQSB3dnZPZPkbcXX+AbbqYY00HqFPqGurVM0CJBeDJolsx7pdmnl0RT3BQetIQXquCA2kpwp+puxAsk6OGONFy/xph669xxkan9/ytqaThbBec8pjsbZO72tmwZ2mxV4hccqyD9HPtT+gQAAP//XmnFIZoAAAA= + +- path: /etc/systemd/system/docker-monitor.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/zSOsWoDQQxE+/0KQWr7kg+4IpBLG/CRyrhQdGNW+E4bJDmx/z7gJd0bmDfM8dM0T+UNIa7fqc1Gps6UlZOkQi5BS5MLnCp4zUpsCzki2TNIz2TAgqW8nhM+9uo+4D8qKMe5w6kcujHy+sv3+I8zZHx5LgdsrPZYmG6a4x1RphtkfijDNXxYm/A6fKkN/cZua6bZfB+VpFmyGnznV0vdUJ6mj/fyFwAA//+bGk4y3wAAAA== + +- path: /etc/systemd/system/kms.service + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/4SQPW7jMBBGe55CF5DoclcAi13sFmltBykcFzT92RmIP8rMUI5y+sB20ggB0hB8Dw8zwOweM+ne/IMEplGpZOffK2NIYtZ4rcQQdyxhAHcCnijA/Dkp2GXopfDQlhwpo1PPZ6gxu8292pvtPMIJpTHCrCHqWZ2PFz+L2VJCqbq5ug2CW5n/bwg3dLYK2wNle9/acM3Ns2mats1Q91JEP3EqsSY4W0btr89CQ4Md6gGcoZB+wd/EItEGsIoNvr1+6ETBK6QLrP3PyWLk5NlGOtiL92dk7ZfilqfAXaLARcpJu1CSHX6JHZLYAfPka9R+WnWr7rcxu4cs6mPcmyefFce/s0s1KrVVwF/n/wgAAP//Goll9c8BAAA= + +- path: /etc/apt/preferences + permissions: "0644" + encoding: gzip + owner: root + content: !!binary | + H4sIAAAAAAAA/wEAAP//AAAAAAAAAAA= + + +- path: /etc/apt/apt.conf.d/99periodic + permissions: "0644" + owner: root + content: | + APT::Periodic::Update-Package-Lists "0"; + APT::Periodic::Download-Upgradeable-Packages "0"; + APT::Periodic::AutocleanInterval "0"; + APT::Periodic::Unattended-Upgrade "0"; + + + + + + +- path: /etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + permissions: "0644" + encoding: gzip + owner: "root" + content: !!binary | + H4sIAAAAAAAA/4oOTi0qy0xOjeXyzS/NK3HLSUwvti3OSCxKTeFSdvV34wIEAAD//6XumLkhAAAA + + + +- path: /etc/systemd/system/docker.service.d/exec_start.conf + permissions: "0644" + owner: root + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 --bip= + ExecStartPost=/sbin/iptables -P FORWARD ACCEPT + #EOF + +- path: /etc/docker/daemon.json + permissions: "0644" + owner: root + content: | + { + "live-restore": true, + "log-driver": "json-file", + "log-opts": { + "max-size": "50m", + "max-file": "5" + } + } + + + + + + + + +- path: /etc/kubernetes/certs/ca.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + +- path: /etc/kubernetes/certs/client.crt + permissions: "0644" + encoding: base64 + owner: root + content: | + + + + +- path: /var/lib/kubelet/kubeconfig + permissions: "0644" + owner: root + content: | + apiVersion: v1 + kind: Config + clusters: + - name: localcluster + cluster: + certificate-authority: /etc/kubernetes/certs/ca.crt + server: https://:443 + users: + - name: client + user: + client-certificate: /etc/kubernetes/certs/client.crt + client-key: /etc/kubernetes/certs/client.key + contexts: + - context: + cluster: localcluster + user: client + name: localclustercontext + current-context: localclustercontext + #EOF + +- path: /etc/default/kubelet + permissions: "0644" + owner: root + content: | + KUBELET_FLAGS=--address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroups-per-qos=true --client-ca-file=/etc/kubernetes/certs/ca.crt --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --enforce-node-allocatable=pods --event-qps=0 --eviction-hard=memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5% --feature-gates=RotateKubeletServerCertificate=true,a=b,PodPriority=true,x=y --image-gc-high-threshold=85 --image-gc-low-threshold=80 --kube-reserved=cpu=100m,memory=1638Mi --max-pods=110 --node-status-update-frequency=10s --pod-manifest-path=/etc/kubernetes/manifests --pod-max-pids=-1 --protect-kernel-defaults=true --read-only-port=10255 --resolv-conf=/etc/resolv.conf --rotate-certificates=true --streaming-connection-idle-timeout=4h0m0s --system-reserved=cpu=2,memory=1Gi --tls-cert-file=/etc/kubernetes/certs/kubeletserver.crt --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --tls-private-key-file=/etc/kubernetes/certs/kubeletserver.key + KUBELET_REGISTER_SCHEDULABLE=true + KUBELET_IMAGE=hyperkube-amd64:v1.15.7 + + + KUBELET_NODE_LABELS=kubernetes.azure.com/role=agent,node-role.kubernetes.io/agent=,kubernetes.io/role=agent,agentpool=agent2,storageprofile=managed,storagetier=Premium_LRS,kubernetes.azure.com/cluster=',variables('labelResourceGroup'),' + + #EOF + +- path: /opt/azure/containers/kubelet.sh + permissions: "0755" + owner: root + content: | + #!/bin/bash + + + + #EOF + +runcmd: +- set -x +- . /opt/azure/containers/provision_source.sh +- aptmarkWALinuxAgent hold +'))] \ No newline at end of file diff --git a/pkg/agent/testdata/RawUbuntu/line119.sh b/pkg/agent/testdata/RawUbuntu/line119.sh new file mode 100644 index 00000000000..cb69dc2848f --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line119.sh @@ -0,0 +1,3 @@ +[Service] +MountFlags=shared +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line16.sh b/pkg/agent/testdata/RawUbuntu/line16.sh new file mode 100644 index 00000000000..e51aeb3fea9 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line16.sh @@ -0,0 +1,160 @@ +#!/bin/bash +ERR_FILE_WATCH_TIMEOUT=6 +set -x +if [ -f /opt/azure/containers/provision.complete ]; then + echo "Already ran to success exiting..." + exit 0 +fi + +echo $(date),$(hostname), startcustomscript>>/opt/m + +for i in $(seq 1 3600); do + if [ -s /opt/azure/containers/provision_source.sh ]; then + grep -Fq '#HELPERSEOF' /opt/azure/containers/provision_source.sh && break + fi + if [ $i -eq 3600 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi +done +sed -i "/#HELPERSEOF/d" /opt/azure/containers/provision_source.sh +source /opt/azure/containers/provision_source.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_installs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_installs.sh + +wait_for_file 3600 1 /opt/azure/containers/provision_configs.sh || exit $ERR_FILE_WATCH_TIMEOUT +source /opt/azure/containers/provision_configs.sh + +set +x +ETCD_PEER_CERT=$(echo ${ETCD_PEER_CERTIFICATES} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +ETCD_PEER_KEY=$(echo ${ETCD_PEER_PRIVATE_KEYS} | cut -d'[' -f 2 | cut -d']' -f 1 | cut -d',' -f $((${NODE_INDEX}+1))) +set -x + +if [[ $OS == $COREOS_OS_NAME ]]; then + echo "Changing default kubectl bin location" + KUBECTL=/opt/kubectl +fi + +if [ -f /var/run/reboot-required ]; then + REBOOTREQUIRED=true +else + REBOOTREQUIRED=false +fi + +configureAdminUser +cleanUpContainerd + + +if [[ "${GPU_NODE}" != "true" ]]; then + cleanUpGPUDrivers +fi + +VHD_LOGS_FILEPATH=/opt/azure/vhd-install.complete +if [ -f $VHD_LOGS_FILEPATH ]; then + echo "detected golden image pre-install" + export -f retrycmd_if_failure + export -f cleanUpContainerImages + export KUBERNETES_VERSION + echo "start to clean up container images" + bash -c cleanUpContainerImages & + FULL_INSTALL_REQUIRED=false +else + if [[ "${IS_VHD}" = true ]]; then + echo "Using VHD distro but file $VHD_LOGS_FILEPATH not found" + exit $ERR_VHD_FILE_NOT_FOUND + fi + FULL_INSTALL_REQUIRED=true +fi + +if [[ $OS == $UBUNTU_OS_NAME ]] && [ "$FULL_INSTALL_REQUIRED" = "true" ]; then + installDeps +else + echo "Golden image; skipping dependencies installation" +fi + +if [[ $OS == $UBUNTU_OS_NAME ]]; then + ensureAuditD +fi +installContainerRuntime + + +installNetworkPlugin + +installKubeletAndKubectl + +if [[ $OS != $COREOS_OS_NAME ]]; then + ensureRPC +fi + +createKubeManifestDir + +ensureContainerRuntime + +configureK8s + +configureCNI + + + +ensureKubelet +ensureJournal + +if $FULL_INSTALL_REQUIRED; then + if [[ $OS == $UBUNTU_OS_NAME ]]; then + + echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind + sed -i "13i\echo 2dd1ce17-079e-403c-b352-a1921ee207ee > /sys/bus/vmbus/drivers/hv_util/unbind\n" /etc/rc.local + fi +fi +rm -f /etc/apt/apt.conf.d/99periodic +if [[ $OS == $UBUNTU_OS_NAME ]]; then + apt_get_purge 20 30 120 apache2-utils & +fi + + +VALIDATION_ERR=0 +API_SERVER_DNS_RETRIES=20 +if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_DNS_RETRIES=200 +fi +RES=$(retrycmd_if_failure ${API_SERVER_DNS_RETRIES} 1 3 nslookup ${API_SERVER_NAME}) +STS=$? +if [[ $STS != 0 ]]; then + if [[ $RES == *"168.63.129.16"* ]]; then + VALIDATION_ERR=$ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL + else + VALIDATION_ERR=$ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL + fi +else + API_SERVER_CONN_RETRIES=50 + if [[ $API_SERVER_NAME == *.privatelink.* ]]; then + API_SERVER_CONN_RETRIES=100 + fi + retrycmd_if_failure ${API_SERVER_CONN_RETRIES} 1 3 nc -vz ${API_SERVER_NAME} 443 || VALIDATION_ERR=$ERR_K8S_API_SERVER_CONN_FAIL +fi + + + +if $REBOOTREQUIRED; then + echo 'reboot required, rebooting node in 1 minute' + /bin/bash -c "shutdown -r 1 &" + if [[ $OS == $UBUNTU_OS_NAME ]]; then + aptmarkWALinuxAgent unhold & + fi +else + if [[ $OS == $UBUNTU_OS_NAME ]]; then + /usr/lib/apt/apt.systemd.daily & + aptmarkWALinuxAgent unhold & + fi +fi + +echo "Custom script finished. API server connection check code:" $VALIDATION_ERR +echo $(date),$(hostname), endcustomscript>>/opt/m +mkdir -p /opt/azure/containers && touch /opt/azure/containers/provision.complete +ps auxfww > /opt/azure/provision-ps.log & + +exit $VALIDATION_ERR + +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line23.sh b/pkg/agent/testdata/RawUbuntu/line23.sh new file mode 100644 index 00000000000..1f074afdfa1 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line23.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +CC_SERVICE_IN_TMP=/opt/azure/containers/cc-proxy.service.in +CC_SOCKET_IN_TMP=/opt/azure/containers/cc-proxy.socket.in +CNI_CONFIG_DIR="/etc/cni/net.d" +CNI_BIN_DIR="/opt/cni/bin" +CNI_DOWNLOADS_DIR="/opt/cni/downloads" +CONTAINERD_DOWNLOADS_DIR="/opt/containerd/downloads" +K8S_DOWNLOADS_DIR="/opt/kubernetes/downloads" +UBUNTU_RELEASE=$(lsb_release -r -s) + +removeMoby() { + apt-get purge -y moby-engine moby-cli +} + +removeContainerd() { + apt-get purge -y moby-containerd +} + +cleanupContainerdDlFiles() { + rm -rf $CONTAINERD_DOWNLOADS_DIR +} + +installDeps() { + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/packages-microsoft-prod.deb > /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT + retrycmd_if_failure 60 5 10 dpkg -i /tmp/packages-microsoft-prod.deb || exit $ERR_MS_PROD_DEB_PKG_ADD_FAIL + aptmarkWALinuxAgent hold + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT + apt_get_dist_upgrade || exit $ERR_APT_DIST_UPGRADE_TIMEOUT + for apt_package in apache2-utils apt-transport-https blobfuse=1.1.1 ca-certificates ceph-common cgroup-lite cifs-utils conntrack cracklib-runtime ebtables ethtool fuse git glusterfs-client htop iftop init-system-helpers iotop iproute2 ipset iptables jq libpam-pwquality libpwquality-tools mount nfs-common pigz socat sysstat traceroute util-linux xz-utils zip; do + if ! apt_get_install 30 1 600 $apt_package; then + journalctl --no-pager -u $apt_package + exit $ERR_APT_INSTALL_TIMEOUT + fi + done + if [[ "${AUDITD_ENABLED}" == true ]]; then + if ! apt_get_install 30 1 600 auditd; then + journalctl --no-pager -u auditd + exit $ERR_APT_INSTALL_TIMEOUT + fi + fi +} + +installGPUDrivers() { + mkdir -p $GPU_DEST/tmp + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/gpgkey > $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-key add $GPU_DEST/tmp/aptnvidia.gpg || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 curl -fsSL https://nvidia.github.io/nvidia-docker/ubuntu${UBUNTU_RELEASE}/nvidia-docker.list > $GPU_DEST/tmp/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + wait_for_apt_locks + retrycmd_if_failure_no_stats 120 5 25 cat $GPU_DEST/tmp/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + apt_get_update + retrycmd_if_failure 30 5 3600 apt-get install -y linux-headers-$(uname -r) gcc make dkms || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + retrycmd_if_failure 30 5 60 curl -fLS https://us.download.nvidia.com/tesla/$GPU_DV/NVIDIA-Linux-x86_64-${GPU_DV}.run -o ${GPU_DEST}/nvidia-drivers-${GPU_DV} || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + tmpDir=$GPU_DEST/tmp + if ! ( + set -e -o pipefail + cd "${tmpDir}" + retrycmd_if_failure 30 5 3600 apt-get download nvidia-docker2="${NVIDIA_DOCKER_VERSION}+${NVIDIA_DOCKER_SUFFIX}" || exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + ); then + exit $ERR_GPU_DRIVERS_INSTALL_TIMEOUT + fi +} + +installSGXDrivers() { + echo "Installing SGX driver" + local VERSION + VERSION=$(grep DISTRIB_RELEASE /etc/*-release| cut -f 2 -d "=") + case $VERSION in + "18.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer18.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "16.04") + SGX_DRIVER_URL="https://download.01.org/intel-sgx/dcap-1.2/linux/dcap_installers/ubuntuServer16.04/sgx_linux_x64_driver_1.12_c110012.bin" + ;; + "*") + echo "Version $VERSION is not supported" + exit 1 + ;; + esac + + local PACKAGES="make gcc dkms" + wait_for_apt_locks + retrycmd_if_failure 30 5 3600 apt-get -y install $PACKAGES || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + + local SGX_DRIVER + SGX_DRIVER=$(basename $SGX_DRIVER_URL) + local OE_DIR=/opt/azure/containers/oe + mkdir -p ${OE_DIR} + + retrycmd_if_failure 120 5 25 curl -fsSL ${SGX_DRIVER_URL} -o ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_INSTALL_TIMEOUT + chmod a+x ${OE_DIR}/${SGX_DRIVER} + ${OE_DIR}/${SGX_DRIVER} || exit $ERR_SGX_DRIVERS_START_FAIL +} + +installContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + installMoby + fi + +} + +installMoby() { + CURRENT_VERSION=$(dockerd --version | grep "Docker version" | cut -d "," -f 1 | cut -d " " -f 3 | cut -d "+" -f 1) + if [[ "$CURRENT_VERSION" == "${MOBY_VERSION}" ]]; then + echo "dockerd $MOBY_VERSION is already installed, skipping Moby download" + else + removeMoby + getMobyPkg + MOBY_CLI=${MOBY_VERSION} + if [[ "${MOBY_CLI}" == "3.0.4" ]]; then + MOBY_CLI="3.0.3" + fi + apt_get_install 20 30 120 moby-engine=${MOBY_VERSION}* moby-cli=${MOBY_CLI}* --allow-downgrades || exit $ERR_MOBY_INSTALL_TIMEOUT + fi +} + + + +getMobyPkg() { + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/config/ubuntu/${UBUNTU_RELEASE}/prod.list > /tmp/microsoft-prod.list || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft-prod.list /etc/apt/sources.list.d/ || exit $ERR_MOBY_APT_LIST_TIMEOUT + retrycmd_if_failure_no_stats 120 5 25 curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + retrycmd_if_failure 10 5 10 cp /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/ || exit $ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT + apt_get_update || exit $ERR_APT_UPDATE_TIMEOUT +} + +installNetworkPlugin() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + installAzureCNI + fi + installCNI + rm -rf $CNI_DOWNLOADS_DIR & +} + +downloadCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadAzureCNI() { + mkdir -p $CNI_DOWNLOADS_DIR + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + retrycmd_get_tarball 120 5 "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ${VNET_CNI_PLUGINS_URL} || exit $ERR_CNI_DOWNLOAD_TIMEOUT +} + +downloadContainerd() { + CONTAINERD_DOWNLOAD_URL="${CONTAINERD_DOWNLOAD_URL_BASE}cri-containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz" + mkdir -p $CONTAINERD_DOWNLOADS_DIR + CONTAINERD_TGZ_TMP=${CONTAINERD_DOWNLOAD_URL##*/} + retrycmd_get_tarball 120 5 "$CONTAINERD_DOWNLOADS_DIR/${CONTAINERD_TGZ_TMP}" ${CONTAINERD_DOWNLOAD_URL} || exit $ERR_CONTAINERD_DOWNLOAD_TIMEOUT +} + +installCNI() { + CNI_TGZ_TMP=${CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadCNI + fi + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR + chown -R root:root $CNI_BIN_DIR + chmod -R 755 $CNI_BIN_DIR +} + +installAzureCNI() { + CNI_TGZ_TMP=${VNET_CNI_PLUGINS_URL##*/} # Use bash builtin ## to remove all chars ("*") up to the final "/" + if [[ ! -f "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" ]]; then + downloadAzureCNI + fi + mkdir -p $CNI_CONFIG_DIR + chown -R root:root $CNI_CONFIG_DIR + chmod 755 $CNI_CONFIG_DIR + mkdir -p $CNI_BIN_DIR + tar -xzf "$CNI_DOWNLOADS_DIR/${CNI_TGZ_TMP}" -C $CNI_BIN_DIR +} + +installImg() { + img_filepath=/usr/local/bin/img + retrycmd_get_executable 120 5 $img_filepath "https://acs-mirror.azureedge.net/img/img-linux-amd64-v0.5.6" ls || exit $ERR_IMG_DOWNLOAD_TIMEOUT +} + +extractHyperkube() { + CLI_TOOL=$1 + path="/home/hyperkube-downloads/${KUBERNETES_VERSION}" + pullContainerImage $CLI_TOOL ${HYPERKUBE_URL} + if [[ "$CLI_TOOL" == "docker" ]]; then + mkdir -p "$path" + # Check if we can extract kubelet and kubectl directly from hyperkube's binary folder + if docker run --rm --entrypoint "" -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /usr/local/bin/{kubelet,kubectl} $path"; then + mv "$path/kubelet" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/kubectl" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + return + else + docker run --rm -v $path:$path ${HYPERKUBE_URL} /bin/bash -c "cp /hyperkube $path" + fi + else + img unpack -o "$path" ${HYPERKUBE_URL} + fi + + if [[ $OS == $COREOS_OS_NAME ]]; then + cp "$path/hyperkube" "/opt/kubelet" + mv "$path/hyperkube" "/opt/kubectl" + chmod a+x /opt/kubelet /opt/kubectl + else + cp "$path/hyperkube" "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" + mv "$path/hyperkube" "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" + fi +} + +installKubeletAndKubectl() { + if [[ ! -f "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" ]]; then + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + extractHyperkube "docker" + else + installImg + extractHyperkube "img" + fi + fi + mv "/usr/local/bin/kubelet-${KUBERNETES_VERSION}" "/usr/local/bin/kubelet" + mv "/usr/local/bin/kubectl-${KUBERNETES_VERSION}" "/usr/local/bin/kubectl" + chmod a+x /usr/local/bin/kubelet /usr/local/bin/kubectl + rm -rf /usr/local/bin/kubelet-* /usr/local/bin/kubectl-* /home/hyperkube-downloads & +} + +pullContainerImage() { + CLI_TOOL=$1 + DOCKER_IMAGE_URL=$2 + retrycmd_if_failure 60 1 1200 $CLI_TOOL pull $DOCKER_IMAGE_URL || exit $ERR_CONTAINER_IMG_PULL_TIMEOUT +} + +cleanUpContainerImages() { + function cleanUpHyperkubeImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'hyperkube') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + function cleanUpControllerManagerImagesRun() { + images_to_delete=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep -vE "${KUBERNETES_VERSION}$|${KUBERNETES_VERSION}.[0-9]+$|${KUBERNETES_VERSION}-|${KUBERNETES_VERSION}_" | grep 'cloud-controller-manager') + local exit_code=$? + if [[ $exit_code != 0 ]]; then + exit $exit_code + elif [[ "${images_to_delete}" != "" ]]; then + docker rmi ${images_to_delete[@]} + fi + } + export -f cleanUpHyperkubeImagesRun + export -f cleanUpControllerManagerImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpHyperkubeImagesRun + retrycmd_if_failure 10 5 120 bash -c cleanUpControllerManagerImagesRun +} + +cleanUpGPUDrivers() { + rm -Rf $GPU_DEST + rm -f /etc/apt/sources.list.d/nvidia-docker.list +} + +cleanUpContainerd() { + rm -Rf $CONTAINERD_DOWNLOADS_DIR +} + +overrideNetworkConfig() { + CONFIG_FILEPATH="/etc/cloud/cloud.cfg.d/80_azure_net_config.cfg" + touch ${CONFIG_FILEPATH} + cat << EOF >> ${CONFIG_FILEPATH} +datasource: + Azure: + apply_network_config: false +EOF +} +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line30.sh b/pkg/agent/testdata/RawUbuntu/line30.sh new file mode 100644 index 00000000000..ce857cb431e --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line30.sh @@ -0,0 +1,337 @@ +#!/bin/bash +NODE_INDEX=$(hostname | tail -c 2) +NODE_NAME=$(hostname) +if [[ $OS == $COREOS_OS_NAME ]]; then + PRIVATE_IP=$(ip a show eth0 | grep -Po 'inet \K[\d.]+') +else + PRIVATE_IP=$(hostname -I | cut -d' ' -f1) +fi +ETCD_PEER_URL="https://${PRIVATE_IP}:2380" +ETCD_CLIENT_URL="https://${PRIVATE_IP}:2379" + +configureAdminUser(){ + chage -E -1 -I -1 -m 0 -M 99999 "${ADMINUSER}" + chage -l "${ADMINUSER}" +} + +configureSecrets(){ + APISERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/apiserver.key" + touch "${APISERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${APISERVER_PRIVATE_KEY_PATH}" + chown root:root "${APISERVER_PRIVATE_KEY_PATH}" + + CA_PRIVATE_KEY_PATH="/etc/kubernetes/certs/ca.key" + touch "${CA_PRIVATE_KEY_PATH}" + chmod 0600 "${CA_PRIVATE_KEY_PATH}" + chown root:root "${CA_PRIVATE_KEY_PATH}" + + ETCD_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdserver.key" + touch "${ETCD_SERVER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_SERVER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_SERVER_PRIVATE_KEY_PATH}" + fi + + ETCD_CLIENT_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdclient.key" + touch "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + chown root:root "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + + ETCD_PEER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.key" + touch "${ETCD_PEER_PRIVATE_KEY_PATH}" + chmod 0600 "${ETCD_PEER_PRIVATE_KEY_PATH}" + if [[ -z "${COSMOS_URI}" ]]; then + chown etcd:etcd "${ETCD_PEER_PRIVATE_KEY_PATH}" + fi + + ETCD_SERVER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdserver.crt" + touch "${ETCD_SERVER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_SERVER_CERTIFICATE_PATH}" + chown root:root "${ETCD_SERVER_CERTIFICATE_PATH}" + + ETCD_CLIENT_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdclient.crt" + touch "${ETCD_CLIENT_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_CLIENT_CERTIFICATE_PATH}" + chown root:root "${ETCD_CLIENT_CERTIFICATE_PATH}" + + ETCD_PEER_CERTIFICATE_PATH="/etc/kubernetes/certs/etcdpeer${NODE_INDEX}.crt" + touch "${ETCD_PEER_CERTIFICATE_PATH}" + chmod 0644 "${ETCD_PEER_CERTIFICATE_PATH}" + chown root:root "${ETCD_PEER_CERTIFICATE_PATH}" + + set +x + echo "${APISERVER_PRIVATE_KEY}" | base64 --decode > "${APISERVER_PRIVATE_KEY_PATH}" + echo "${CA_PRIVATE_KEY}" | base64 --decode > "${CA_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_PRIVATE_KEY}" | base64 --decode > "${ETCD_SERVER_PRIVATE_KEY_PATH}" + echo "${ETCD_CLIENT_PRIVATE_KEY}" | base64 --decode > "${ETCD_CLIENT_PRIVATE_KEY_PATH}" + echo "${ETCD_PEER_KEY}" | base64 --decode > "${ETCD_PEER_PRIVATE_KEY_PATH}" + echo "${ETCD_SERVER_CERTIFICATE}" | base64 --decode > "${ETCD_SERVER_CERTIFICATE_PATH}" + echo "${ETCD_CLIENT_CERTIFICATE}" | base64 --decode > "${ETCD_CLIENT_CERTIFICATE_PATH}" + echo "${ETCD_PEER_CERT}" | base64 --decode > "${ETCD_PEER_CERTIFICATE_PATH}" +} + +ensureRPC() { + systemctlEnableAndStart rpcbind || exit $ERR_SYSTEMCTL_START_FAIL + systemctlEnableAndStart rpc-statd || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureAuditD() { + if [[ "${AUDITD_ENABLED}" == true ]]; then + systemctlEnableAndStart auditd || exit $ERR_SYSTEMCTL_START_FAIL + else + if apt list --installed | grep 'auditd'; then + apt_get_purge 20 30 120 auditd & + fi + fi +} + +configureKubeletServerCert() { + KUBELET_SERVER_PRIVATE_KEY_PATH="/etc/kubernetes/certs/kubeletserver.key" + KUBELET_SERVER_CERT_PATH="/etc/kubernetes/certs/kubeletserver.crt" + + openssl genrsa -out $KUBELET_SERVER_PRIVATE_KEY_PATH 2048 + openssl req -new -x509 -days 7300 -key $KUBELET_SERVER_PRIVATE_KEY_PATH -out $KUBELET_SERVER_CERT_PATH -subj "/CN=${NODE_NAME}" +} + +configureK8s() { + KUBELET_PRIVATE_KEY_PATH="/etc/kubernetes/certs/client.key" + touch "${KUBELET_PRIVATE_KEY_PATH}" + chmod 0600 "${KUBELET_PRIVATE_KEY_PATH}" + chown root:root "${KUBELET_PRIVATE_KEY_PATH}" + + APISERVER_PUBLIC_KEY_PATH="/etc/kubernetes/certs/apiserver.crt" + touch "${APISERVER_PUBLIC_KEY_PATH}" + chmod 0644 "${APISERVER_PUBLIC_KEY_PATH}" + chown root:root "${APISERVER_PUBLIC_KEY_PATH}" + + AZURE_JSON_PATH="/etc/kubernetes/azure.json" + touch "${AZURE_JSON_PATH}" + chmod 0600 "${AZURE_JSON_PATH}" + chown root:root "${AZURE_JSON_PATH}" + + set +x + echo "${KUBELET_PRIVATE_KEY}" | base64 --decode > "${KUBELET_PRIVATE_KEY_PATH}" + echo "${APISERVER_PUBLIC_KEY}" | base64 --decode > "${APISERVER_PUBLIC_KEY_PATH}" + + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\\/\\\\} + SERVICE_PRINCIPAL_CLIENT_SECRET=${SERVICE_PRINCIPAL_CLIENT_SECRET//\"/\\\"} + cat << EOF > "${AZURE_JSON_PATH}" +{ + "cloud": "AzurePublicCloud", + "tenantId": "${TENANT_ID}", + "subscriptionId": "${SUBSCRIPTION_ID}", + "aadClientId": "${SERVICE_PRINCIPAL_CLIENT_ID}", + "aadClientSecret": "${SERVICE_PRINCIPAL_CLIENT_SECRET}", + "resourceGroup": "${RESOURCE_GROUP}", + "location": "${LOCATION}", + "vmType": "${VM_TYPE}", + "subnetName": "${SUBNET}", + "securityGroupName": "${NETWORK_SECURITY_GROUP}", + "vnetName": "${VIRTUAL_NETWORK}", + "vnetResourceGroup": "${VIRTUAL_NETWORK_RESOURCE_GROUP}", + "routeTableName": "${ROUTE_TABLE}", + "primaryAvailabilitySetName": "${PRIMARY_AVAILABILITY_SET}", + "primaryScaleSetName": "${PRIMARY_SCALE_SET}", + "cloudProviderBackoffMode": "${CLOUDPROVIDER_BACKOFF_MODE}", + "cloudProviderBackoff": ${CLOUDPROVIDER_BACKOFF}, + "cloudProviderBackoffRetries": ${CLOUDPROVIDER_BACKOFF_RETRIES}, + "cloudProviderBackoffExponent": ${CLOUDPROVIDER_BACKOFF_EXPONENT}, + "cloudProviderBackoffDuration": ${CLOUDPROVIDER_BACKOFF_DURATION}, + "cloudProviderBackoffJitter": ${CLOUDPROVIDER_BACKOFF_JITTER}, + "cloudProviderRateLimit": ${CLOUDPROVIDER_RATELIMIT}, + "cloudProviderRateLimitQPS": ${CLOUDPROVIDER_RATELIMIT_QPS}, + "cloudProviderRateLimitBucket": ${CLOUDPROVIDER_RATELIMIT_BUCKET}, + "cloudProviderRateLimitQPSWrite": ${CLOUDPROVIDER_RATELIMIT_QPS_WRITE}, + "cloudProviderRateLimitBucketWrite": ${CLOUDPROVIDER_RATELIMIT_BUCKET_WRITE}, + "useManagedIdentityExtension": ${USE_MANAGED_IDENTITY_EXTENSION}, + "userAssignedIdentityID": "${USER_ASSIGNED_IDENTITY_ID}", + "useInstanceMetadata": ${USE_INSTANCE_METADATA}, + "loadBalancerSku": "${LOAD_BALANCER_SKU}", + "disableOutboundSNAT": ${LOAD_BALANCER_DISABLE_OUTBOUND_SNAT}, + "excludeMasterFromStandardLB": ${EXCLUDE_MASTER_FROM_STANDARD_LB}, + "providerVaultName": "${KMS_PROVIDER_VAULT_NAME}", + "maximumLoadBalancerRuleCount": ${MAXIMUM_LOADBALANCER_RULE_COUNT}, + "providerKeyName": "k8s", + "providerKeyVersion": "" +} +EOF + set -x + if [[ "${CLOUDPROVIDER_BACKOFF_MODE}" = "v2" ]]; then + sed -i "/cloudProviderBackoffExponent/d" /etc/kubernetes/azure.json + sed -i "/cloudProviderBackoffJitter/d" /etc/kubernetes/azure.json + fi + + configureKubeletServerCert +} + +configureCNI() { + + retrycmd_if_failure 120 5 25 modprobe br_netfilter || exit $ERR_MODPROBE_FAIL + echo -n "br_netfilter" > /etc/modules-load.d/br_netfilter.conf + configureCNIIPTables + +} + +configureCNIIPTables() { + if [[ "${NETWORK_PLUGIN}" = "azure" ]]; then + mv $CNI_BIN_DIR/10-azure.conflist $CNI_CONFIG_DIR/ + chmod 600 $CNI_CONFIG_DIR/10-azure.conflist + if [[ "${NETWORK_POLICY}" == "calico" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + elif [[ "${NETWORK_POLICY}" == "" || "${NETWORK_POLICY}" == "none" ]] && [[ "${NETWORK_MODE}" == "transparent" ]]; then + sed -i 's#"mode":"bridge"#"mode":"transparent"#g' $CNI_CONFIG_DIR/10-azure.conflist + fi + /sbin/ebtables -t nat --list + fi +} + +ensureContainerRuntime() { + if [[ "$CONTAINER_RUNTIME" == "docker" ]]; then + ensureDocker + fi + +} + + + +ensureDocker() { + DOCKER_SERVICE_EXEC_START_FILE=/etc/systemd/system/docker.service.d/exec_start.conf + wait_for_file 1200 1 $DOCKER_SERVICE_EXEC_START_FILE || exit $ERR_FILE_WATCH_TIMEOUT + usermod -aG docker ${ADMINUSER} + DOCKER_MOUNT_FLAGS_SYSTEMD_FILE=/etc/systemd/system/docker.service.d/clear_mount_propagation_flags.conf + if [[ $OS != $COREOS_OS_NAME ]]; then + wait_for_file 1200 1 $DOCKER_MOUNT_FLAGS_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + fi + DOCKER_JSON_FILE=/etc/docker/daemon.json + for i in $(seq 1 1200); do + if [ -s $DOCKER_JSON_FILE ]; then + jq '.' < $DOCKER_JSON_FILE && break + fi + if [ $i -eq 1200 ]; then + exit $ERR_FILE_WATCH_TIMEOUT + else + sleep 1 + fi + done + systemctlEnableAndStart docker || exit $ERR_DOCKER_START_FAIL + + DOCKER_MONITOR_SYSTEMD_TIMER_FILE=/etc/systemd/system/docker-monitor.timer + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_TIMER_FILE || exit $ERR_FILE_WATCH_TIMEOUT + DOCKER_MONITOR_SYSTEMD_FILE=/etc/systemd/system/docker-monitor.service + wait_for_file 1200 1 $DOCKER_MONITOR_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart docker-monitor.timer || exit $ERR_SYSTEMCTL_START_FAIL +} + + + + + +ensureKubelet() { + KUBELET_DEFAULT_FILE=/etc/default/kubelet + wait_for_file 1200 1 $KUBELET_DEFAULT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBECONFIG_FILE=/var/lib/kubelet/kubeconfig + wait_for_file 1200 1 $KUBECONFIG_FILE || exit $ERR_FILE_WATCH_TIMEOUT + KUBELET_RUNTIME_CONFIG_SCRIPT_FILE=/opt/azure/containers/kubelet.sh + wait_for_file 1200 1 $KUBELET_RUNTIME_CONFIG_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart kubelet || exit $ERR_KUBELET_START_FAIL + + + +} + +ensureLabelNodes() { + LABEL_NODES_SCRIPT_FILE=/opt/azure/containers/label-nodes.sh + wait_for_file 1200 1 $LABEL_NODES_SCRIPT_FILE || exit $ERR_FILE_WATCH_TIMEOUT + LABEL_NODES_SYSTEMD_FILE=/etc/systemd/system/label-nodes.service + wait_for_file 1200 1 $LABEL_NODES_SYSTEMD_FILE || exit $ERR_FILE_WATCH_TIMEOUT + systemctlEnableAndStart label-nodes || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureJournal() { + { + echo "Storage=persistent" + echo "SystemMaxUse=1G" + echo "RuntimeMaxUse=1G" + echo "ForwardToSyslog=yes" + } >> /etc/systemd/journald.conf + systemctlEnableAndStart systemd-journald || exit $ERR_SYSTEMCTL_START_FAIL +} + +ensureK8sControlPlane() { + if $REBOOTREQUIRED || [ "$NO_OUTBOUND" = "true" ]; then + return + fi + retrycmd_if_failure 120 5 25 $KUBECTL 2>/dev/null cluster-info || exit $ERR_K8S_RUNNING_TIMEOUT +} + +createKubeManifestDir() { + KUBEMANIFESTDIR=/etc/kubernetes/manifests + mkdir -p $KUBEMANIFESTDIR +} + +writeKubeConfig() { + KUBECONFIGDIR=/home/$ADMINUSER/.kube + KUBECONFIGFILE=$KUBECONFIGDIR/config + mkdir -p $KUBECONFIGDIR + touch $KUBECONFIGFILE + chown $ADMINUSER:$ADMINUSER $KUBECONFIGDIR + chown $ADMINUSER:$ADMINUSER $KUBECONFIGFILE + chmod 700 $KUBECONFIGDIR + chmod 600 $KUBECONFIGFILE + set +x + echo " +--- +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: \"$CA_CERTIFICATE\" + server: $KUBECONFIG_SERVER + name: \"$MASTER_FQDN\" +contexts: +- context: + cluster: \"$MASTER_FQDN\" + user: \"$MASTER_FQDN-admin\" + name: \"$MASTER_FQDN\" +current-context: \"$MASTER_FQDN\" +kind: Config +users: +- name: \"$MASTER_FQDN-admin\" + user: + client-certificate-data: \"$KUBECONFIG_CERTIFICATE\" + client-key-data: \"$KUBECONFIG_KEY\" +" > $KUBECONFIGFILE + set -x +} + +configClusterAutoscalerAddon() { + CLUSTER_AUTOSCALER_ADDON_FILE=/etc/kubernetes/addons/cluster-autoscaler-deployment.yaml + wait_for_file 1200 1 $CLUSTER_AUTOSCALER_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SERVICE_PRINCIPAL_CLIENT_SECRET | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $SUBSCRIPTION_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $TENANT_ID | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE + sed -i "s||$(echo $RESOURCE_GROUP | base64)|g" $CLUSTER_AUTOSCALER_ADDON_FILE +} + +configACIConnectorAddon() { + ACI_CONNECTOR_CREDENTIALS=$(printf "{\"clientId\": \"%s\", \"clientSecret\": \"%s\", \"tenantId\": \"%s\", \"subscriptionId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\"}" "$SERVICE_PRINCIPAL_CLIENT_ID" "$SERVICE_PRINCIPAL_CLIENT_SECRET" "$TENANT_ID" "$SUBSCRIPTION_ID" | base64 -w 0) + + openssl req -newkey rsa:4096 -new -nodes -x509 -days 3650 -keyout /etc/kubernetes/certs/aci-connector-key.pem -out /etc/kubernetes/certs/aci-connector-cert.pem -subj "/C=US/ST=CA/L=virtualkubelet/O=virtualkubelet/OU=virtualkubelet/CN=virtualkubelet" + ACI_CONNECTOR_KEY=$(base64 /etc/kubernetes/certs/aci-connector-key.pem -w0) + ACI_CONNECTOR_CERT=$(base64 /etc/kubernetes/certs/aci-connector-cert.pem -w0) + + ACI_CONNECTOR_ADDON_FILE=/etc/kubernetes/addons/aci-connector-deployment.yaml + wait_for_file 1200 1 $ACI_CONNECTOR_ADDON_FILE || exit $ERR_FILE_WATCH_TIMEOUT + sed -i "s||$ACI_CONNECTOR_CREDENTIALS|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$RESOURCE_GROUP|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_CERT|g" $ACI_CONNECTOR_ADDON_FILE + sed -i "s||$ACI_CONNECTOR_KEY|g" $ACI_CONNECTOR_ADDON_FILE +} + +configAzurePolicyAddon() { + AZURE_POLICY_ADDON_FILE=/etc/kubernetes/addons/azure-policy-deployment.yaml + sed -i "s||/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP|g" $AZURE_POLICY_ADDON_FILE +} + + +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line38.sh b/pkg/agent/testdata/RawUbuntu/line38.sh new file mode 100644 index 00000000000..dcd2f21b7b3 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line38.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +assignRootPW() { + if grep '^root:[!*]:' /etc/shadow; then + SALT=$(openssl rand -base64 5) + SECRET=$(openssl rand -base64 37) + CMD="import crypt, getpass, pwd; print crypt.crypt('$SECRET', '\$6\$$SALT\$')" + HASH=$(python -c "$CMD") + + echo 'root:'$HASH | /usr/sbin/chpasswd -e || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + fi +} + +assignFilePermissions() { + FILES=" + auth.log + alternatives.log + cloud-init.log + cloud-init-output.log + daemon.log + dpkg.log + kern.log + lastlog + waagent.log + syslog + unattended-upgrades/unattended-upgrades.log + unattended-upgrades/unattended-upgrades-dpkg.log + azure-vnet-ipam.log + azure-vnet-telemetry.log + azure-cnimonitor.log + azure-vnet.log + kv-driver.log + blobfuse-driver.log + blobfuse-flexvol-installer.log + landscape/sysinfo.log + " + for FILE in ${FILES}; do + FILEPATH="/var/log/${FILE}" + DIR=$(dirname "${FILEPATH}") + mkdir -p ${DIR} || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + touch ${FILEPATH} || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + chmod 640 ${FILEPATH} || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + done + find /var/log -type f -perm '/o+r' -exec chmod 'g-wx,o-rwx' {} \; + chmod 600 /etc/passwd- || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + chmod 600 /etc/shadow- || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + chmod 600 /etc/group- || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + chmod 644 /etc/default/grub || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + for filepath in /etc/crontab /etc/cron.hourly /etc/cron.daily /etc/cron.weekly /etc/cron.monthly /etc/cron.d; do + chmod 0600 $filepath || exit $ERR_CIS_ASSIGN_FILE_PERMISSION + done +} + +setPWExpiration() { + sed -i "s|PASS_MAX_DAYS||g" /etc/login.defs || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + grep 'PASS_MAX_DAYS' /etc/login.defs && exit $ERR_CIS_APPLY_PASSWORD_CONFIG + sed -i "s|PASS_MIN_DAYS||g" /etc/login.defs || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + grep 'PASS_MIN_DAYS' /etc/login.defs && exit $ERR_CIS_APPLY_PASSWORD_CONFIG + sed -i "s|INACTIVE=||g" /etc/default/useradd || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + grep 'INACTIVE=' /etc/default/useradd && exit $ERR_CIS_APPLY_PASSWORD_CONFIG + echo 'PASS_MAX_DAYS 90' >> /etc/login.defs || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + grep 'PASS_MAX_DAYS 90' /etc/login.defs || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + echo 'PASS_MIN_DAYS 7' >> /etc/login.defs || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + grep 'PASS_MIN_DAYS 7' /etc/login.defs || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + echo 'INACTIVE=30' >> /etc/default/useradd || exit $ERR_CIS_APPLY_PASSWORD_CONFIG + grep 'INACTIVE=30' /etc/default/useradd || exit $ERR_CIS_APPLY_PASSWORD_CONFIG +} + +applyCIS() { + setPWExpiration + assignRootPW + assignFilePermissions +} + +applyCIS + +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line52.sh b/pkg/agent/testdata/RawUbuntu/line52.sh new file mode 100644 index 00000000000..e708f006a14 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line52.sh @@ -0,0 +1,38 @@ +[Unit] +Description=Kubelet +ConditionPathExists=/usr/local/bin/kubelet + + +[Service] +Restart=always +EnvironmentFile=/etc/default/kubelet +SuccessExitStatus=143 +ExecStartPre=/bin/bash /opt/azure/containers/kubelet.sh +ExecStartPre=/bin/mkdir -p /var/lib/kubelet +ExecStartPre=/bin/mkdir -p /var/lib/cni +ExecStartPre=/bin/bash -c "if [ $(mount | grep \"/var/lib/kubelet\" | wc -l) -le 0 ] ; then /bin/mount --bind /var/lib/kubelet /var/lib/kubelet ; fi" +ExecStartPre=/bin/mount --make-shared /var/lib/kubelet + + +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_retries2=8 +ExecStartPre=/sbin/sysctl -w net.core.somaxconn=16384 +ExecStartPre=/sbin/sysctl -w net.ipv4.tcp_max_syn_backlog=16384 +ExecStartPre=/sbin/sysctl -w net.core.message_cost=40 +ExecStartPre=/sbin/sysctl -w net.core.message_burst=80 + +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh1=4096 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh2=8192 +ExecStartPre=/sbin/sysctl -w net.ipv4.neigh.default.gc_thresh3=16384 + +ExecStartPre=-/sbin/ebtables -t nat --list +ExecStartPre=-/sbin/iptables -t nat --numeric --list +ExecStart=/usr/local/bin/kubelet \ + --enable-server \ + --node-labels="${KUBELET_NODE_LABELS}" \ + --v=2 \ + --volume-plugin-dir=/etc/kubernetes/volumeplugins \ + $KUBELET_FLAGS \ + $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/agent/testdata/RawUbuntu/line62.sh b/pkg/agent/testdata/RawUbuntu/line62.sh new file mode 100644 index 00000000000..79267663fdc --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line62.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +# This script originated at https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/health-monitor.sh +# and has been modified for aks-engine. + +set -o nounset +set -o pipefail + +container_runtime_monitoring() { + local -r max_attempts=5 + local attempt=1 + local -r crictl="${KUBE_HOME}/bin/crictl" + local -r container_runtime_name="${CONTAINER_RUNTIME_NAME:-docker}" + local healthcheck_command="docker ps" + if [[ "${CONTAINER_RUNTIME:-docker}" != "docker" ]]; then + healthcheck_command="${crictl} pods" + fi + + until timeout 60 ${healthcheck_command} > /dev/null; do + if (( attempt == max_attempts )); then + echo "Max attempt ${max_attempts} reached! Proceeding to monitor container runtime healthiness." + break + fi + echo "$attempt initial attempt \"${healthcheck_command}\"! Trying again in $attempt seconds..." + sleep "$(( 2 ** attempt++ ))" + done + while true; do + if ! timeout 60 ${healthcheck_command} > /dev/null; then + echo "Container runtime ${container_runtime_name} failed!" + if [[ "$container_runtime_name" == "docker" ]]; then + pkill -SIGUSR1 dockerd + fi + systemctl kill --kill-who=main "${container_runtime_name}" + sleep 120 + else + sleep "${SLEEP_SECONDS}" + fi + done +} + +kubelet_monitoring() { + echo "Wait for 2 minutes for kubelet to be functional" + sleep 120 + local -r max_seconds=10 + local output="" + while true; do + if ! output=$(curl -m "${max_seconds}" -f -s -S http://127.0.0.1:10255/healthz 2>&1); then + echo $output + echo "Kubelet is unhealthy!" + systemctl kill kubelet + sleep 60 + else + sleep "${SLEEP_SECONDS}" + fi + done +} + +if [[ "$#" -ne 1 ]]; then + echo "Usage: health-monitor.sh " + exit 1 +fi + +KUBE_HOME="/usr/local/bin" +KUBE_ENV="/etc/default/kube-env" +if [[ -e "${KUBE_ENV}" ]]; then + source "${KUBE_ENV}" +fi + +SLEEP_SECONDS=10 +component=$1 +echo "Start kubernetes health monitoring for ${component}" + +if [[ "${component}" == "container-runtime" ]]; then + container_runtime_monitoring +elif [[ "${component}" == "kubelet" ]]; then + kubelet_monitoring +else + echo "Health monitoring for component ${component} is not supported!" +fi diff --git a/pkg/agent/testdata/RawUbuntu/line69.sh b/pkg/agent/testdata/RawUbuntu/line69.sh new file mode 100644 index 00000000000..0778a089709 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line69.sh @@ -0,0 +1,8 @@ +[Unit] +Description=a script that checks kubelet health and restarts if needed +After=kubelet.service +[Service] +Restart=always +RestartSec=10 +RemainAfterExit=yes +ExecStart=/usr/local/bin/health-monitor.sh kubelet \ No newline at end of file diff --git a/pkg/agent/testdata/RawUbuntu/line76.sh b/pkg/agent/testdata/RawUbuntu/line76.sh new file mode 100644 index 00000000000..b7ff635ffb5 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line76.sh @@ -0,0 +1,7 @@ +[Unit] +Description=a timer that delays docker-monitor from starting too soon after boot +[Timer] +OnBootSec=30min +[Install] +WantedBy=multi-user.target +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line83.sh b/pkg/agent/testdata/RawUbuntu/line83.sh new file mode 100644 index 00000000000..f44163ccf61 --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line83.sh @@ -0,0 +1,9 @@ +[Unit] +Description=a script that checks docker health and restarts if needed +After=docker.service +[Service] +Restart=always +RestartSec=10 +RemainAfterExit=yes +ExecStart=/usr/local/bin/health-monitor.sh container-runtime +#EOF diff --git a/pkg/agent/testdata/RawUbuntu/line9.sh b/pkg/agent/testdata/RawUbuntu/line9.sh new file mode 100644 index 00000000000..08cbc16e86d --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line9.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +ERR_SYSTEMCTL_START_FAIL=4 +ERR_CLOUD_INIT_TIMEOUT=5 +ERR_FILE_WATCH_TIMEOUT=6 +ERR_HOLD_WALINUXAGENT=7 +ERR_RELEASE_HOLD_WALINUXAGENT=8 +ERR_APT_INSTALL_TIMEOUT=9 +ERR_NTP_INSTALL_TIMEOUT=10 +ERR_NTP_START_TIMEOUT=11 +ERR_STOP_SYSTEMD_TIMESYNCD_TIMEOUT=12 +ERR_DOCKER_INSTALL_TIMEOUT=20 +ERR_DOCKER_DOWNLOAD_TIMEOUT=21 +ERR_DOCKER_KEY_DOWNLOAD_TIMEOUT=22 +ERR_DOCKER_APT_KEY_TIMEOUT=23 +ERR_DOCKER_START_FAIL=24 +ERR_MOBY_APT_LIST_TIMEOUT=25 +ERR_MS_GPG_KEY_DOWNLOAD_TIMEOUT=26 +ERR_MOBY_INSTALL_TIMEOUT=27 +ERR_K8S_RUNNING_TIMEOUT=30 +ERR_K8S_DOWNLOAD_TIMEOUT=31 +ERR_KUBECTL_NOT_FOUND=32 +ERR_IMG_DOWNLOAD_TIMEOUT=33 +ERR_KUBELET_START_FAIL=34 +ERR_CONTAINER_IMG_PULL_TIMEOUT=35 +ERR_CNI_DOWNLOAD_TIMEOUT=41 +ERR_MS_PROD_DEB_DOWNLOAD_TIMEOUT=42 +ERR_MS_PROD_DEB_PKG_ADD_FAIL=43 + +ERR_SYSTEMD_INSTALL_FAIL=48 +ERR_MODPROBE_FAIL=49 +ERR_OUTBOUND_CONN_FAIL=50 +ERR_K8S_API_SERVER_CONN_FAIL=51 +ERR_K8S_API_SERVER_DNS_LOOKUP_FAIL=52 +ERR_K8S_API_SERVER_AZURE_DNS_LOOKUP_FAIL=53 +ERR_KATA_KEY_DOWNLOAD_TIMEOUT=60 +ERR_KATA_APT_KEY_TIMEOUT=61 +ERR_KATA_INSTALL_TIMEOUT=62 +ERR_CONTAINERD_DOWNLOAD_TIMEOUT=70 +ERR_CUSTOM_SEARCH_DOMAINS_FAIL=80 +ERR_GPU_DRIVERS_START_FAIL=84 +ERR_GPU_DRIVERS_INSTALL_TIMEOUT=85 +ERR_GPU_DEVICE_PLUGIN_START_FAIL=86 +ERR_GPU_INFO_ROM_CORRUPTED=87 +ERR_SGX_DRIVERS_INSTALL_TIMEOUT=90 +ERR_SGX_DRIVERS_START_FAIL=91 +ERR_APT_DAILY_TIMEOUT=98 +ERR_APT_UPDATE_TIMEOUT=99 +ERR_CSE_PROVISION_SCRIPT_NOT_READY_TIMEOUT=100 +ERR_APT_DIST_UPGRADE_TIMEOUT=101 +ERR_APT_PURGE_FAIL=102 +ERR_SYSCTL_RELOAD=103 +ERR_CIS_ASSIGN_ROOT_PW=111 +ERR_CIS_ASSIGN_FILE_PERMISSION=112 +ERR_PACKER_COPY_FILE=113 +ERR_CIS_APPLY_PASSWORD_CONFIG=115 +ERR_SYSTEMD_DOCKER_STOP_FAIL=116 + +ERR_VHD_FILE_NOT_FOUND=124 +ERR_VHD_BUILD_ERROR=125 + + +ERR_AZURE_STACK_GET_ARM_TOKEN=120 +ERR_AZURE_STACK_GET_NETWORK_CONFIGURATION=121 +ERR_AZURE_STACK_GET_SUBNET_PREFIX=122 + +OS=$(sort -r /etc/*-release | gawk 'match($0, /^(ID_LIKE=(coreos)|ID=(.*))$/, a) { print toupper(a[2] a[3]); exit }') +UBUNTU_OS_NAME="UBUNTU" +RHEL_OS_NAME="RHEL" +COREOS_OS_NAME="COREOS" +KUBECTL=/usr/local/bin/kubectl +DOCKER=/usr/bin/docker +export GPU_DV=418.126.02 +export GPU_DEST=/usr/local/nvidia +NVIDIA_DOCKER_VERSION=2.0.3 +DOCKER_VERSION=1.13.1-1 +NVIDIA_CONTAINER_RUNTIME_VERSION=2.0.0 +NVIDIA_DOCKER_SUFFIX=docker18.09.2-1 + +aptmarkWALinuxAgent() { + wait_for_apt_locks + retrycmd_if_failure 120 5 25 apt-mark $1 walinuxagent || \ + if [[ "$1" == "hold" ]]; then + exit $ERR_HOLD_WALINUXAGENT + elif [[ "$1" == "unhold" ]]; then + exit $ERR_RELEASE_HOLD_WALINUXAGENT + fi +} + +retrycmd_if_failure() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + echo Executed \"$@\" $i times; + return 1 + else + sleep $wait_sleep + fi + done + echo Executed \"$@\" $i times; +} +retrycmd_if_failure_no_stats() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + timeout $timeout ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +retrycmd_get_tarball() { + tar_retries=$1; wait_sleep=$2; tarball=$3; url=$4 + echo "${tar_retries} retries" + for i in $(seq 1 $tar_retries); do + tar -tzf $tarball && break || \ + if [ $i -eq $tar_retries ]; then + return 1 + else + timeout 60 curl -fsSL $url -o $tarball + sleep $wait_sleep + fi + done +} +retrycmd_get_executable() { + retries=$1; wait_sleep=$2; filepath=$3; url=$4; validation_args=$5 + echo "${retries} retries" + for i in $(seq 1 $retries); do + $filepath $validation_args && break || \ + if [ $i -eq $retries ]; then + return 1 + else + timeout 30 curl -fsSL $url -o $filepath + chmod +x $filepath + sleep $wait_sleep + fi + done +} +wait_for_file() { + retries=$1; wait_sleep=$2; filepath=$3 + paved=/opt/azure/cloud-init-files.paved + grep -Fq "${filepath}" $paved && return 0 + for i in $(seq 1 $retries); do + grep -Fq '#EOF' $filepath && break + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + sed -i "/#EOF/d" $filepath + echo $filepath >> $paved +} +wait_for_apt_locks() { + while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock >/dev/null 2>&1; do + echo 'Waiting for release of apt locks' + sleep 3 + done +} +apt_get_update() { + retries=10 + apt_update_output=/tmp/apt-get-update.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + ! (apt-get update 2>&1 | tee $apt_update_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_update_output && break || \ + cat $apt_update_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get update $i times + wait_for_apt_locks +} +apt_get_install() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get install -o Dpkg::Options::="--force-confold" --no-install-recommends -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + apt_get_update + fi + done + echo Executed apt-get install --no-install-recommends -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_purge() { + retries=$1; wait_sleep=$2; timeout=$3; shift && shift && shift + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get purge -o Dpkg::Options::="--force-confold" -y ${@} && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done + echo Executed apt-get purge -y \"$@\" $i times; + wait_for_apt_locks +} +apt_get_dist_upgrade() { + retries=10 + apt_dist_upgrade_output=/tmp/apt-get-dist-upgrade.out + for i in $(seq 1 $retries); do + wait_for_apt_locks + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a --force-confdef + apt-get -f -y install + apt-mark showhold + ! (apt-get dist-upgrade -y 2>&1 | tee $apt_dist_upgrade_output | grep -E "^([WE]:.*)|([eE]rr.*)$") && \ + cat $apt_dist_upgrade_output && break || \ + cat $apt_dist_upgrade_output + if [ $i -eq $retries ]; then + return 1 + else sleep 5 + fi + done + echo Executed apt-get dist-upgrade $i times + wait_for_apt_locks +} +systemctl_restart() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl restart $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_stop() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl stop $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +systemctl_disable() { + retries=$1; wait_sleep=$2; timeout=$3 svcname=$4 + for i in $(seq 1 $retries); do + timeout $timeout systemctl daemon-reload + timeout $timeout systemctl disable $svcname && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +sysctl_reload() { + retries=$1; wait_sleep=$2; timeout=$3 + for i in $(seq 1 $retries); do + timeout $timeout sysctl --system && break || \ + if [ $i -eq $retries ]; then + return 1 + else + sleep $wait_sleep + fi + done +} +version_gte() { + test "$(printf '%s\n' "$@" | sort -rV | head -n 1)" == "$1" +} + +systemctlEnableAndStart() { + systemctl_restart 100 5 30 $1 + RESTART_STATUS=$? + systemctl status $1 --no-pager -l > /var/log/azure/$1-status.log + if [ $RESTART_STATUS -ne 0 ]; then + echo "$1 could not be started" + return 1 + fi + if ! retrycmd_if_failure 120 5 25 systemctl enable $1; then + echo "$1 could not be enabled by systemctl" + return 1 + fi +} + +systemctlDisableAndStop() { + if [ systemctl list-units --full --all | grep -q "$1.service" ]; then + systemctl_stop 20 5 25 $1 || echo "$1 could not be stopped" + systemctl_disable 20 5 25 $1 || echo "$1 could not be disabled" + fi +} +#HELPERSEOF diff --git a/pkg/agent/testdata/RawUbuntu/line90.sh b/pkg/agent/testdata/RawUbuntu/line90.sh new file mode 100644 index 00000000000..0b53f09994a --- /dev/null +++ b/pkg/agent/testdata/RawUbuntu/line90.sh @@ -0,0 +1,19 @@ +[Unit] +Description=azurekms +Requires=docker.service +After=network-online.target + +[Service] +Type=simple +Restart=always +TimeoutStartSec=0 +ExecStart=/usr/bin/docker run \ + --net=host \ + --volume=/opt:/opt \ + --volume=/etc/kubernetes:/etc/kubernetes \ + --volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \ + --volume=/var/lib/waagent:/var/lib/waagent \ + mcr.microsoft.com/k8s/kms/keyvault:v0.0.9 + +[Install] +WantedBy=multi-user.target diff --git a/pkg/agent/testdata/RawUbuntu/line97.sh b/pkg/agent/testdata/RawUbuntu/line97.sh new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/agent/testdata/convert.sh b/pkg/agent/testdata/convert.sh new file mode 100755 index 00000000000..627061bda19 --- /dev/null +++ b/pkg/agent/testdata/convert.sh @@ -0,0 +1,17 @@ +# !/bin/bash + +if [[ $# -ne 1 ]]; then + echo "Usage: $0 " + exit 1; +fi + +rm $1/line*.sh +file="./$1/CustomData" +#echo "Processing $file" +lineNumber=`grep "content: \!\!binary" -n $file | cut -d':' -f1` +for i in $lineNumber; do +c=$((i+1)); +#echo "Working on line $c"; +z=`sed -n ${c}p $file` +echo $z | base64 -d | gunzip > $1/line${c}.sh +done \ No newline at end of file diff --git a/pkg/agent/types.go b/pkg/agent/types.go index 16d2aa905f9..483184ab856 100644 --- a/pkg/agent/types.go +++ b/pkg/agent/types.go @@ -11,3 +11,325 @@ type KeyVaultRef struct { SecretName string `json:"secretName"` SecretVersion string `json:"secretVersion,omitempty"` } + +// AKSKubeletConfiguration contains the configuration for the Kubelet that AKS set +// this is a subset of KubeletConfiguration defined in https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go +// changed metav1.Duration to Duration and pointers to values to simplify translation +type AKSKubeletConfiguration struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` + // staticPodPath is the path to the directory containing local (static) pods to + // run, or the path to a single static pod file. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // the set of static pods specified at the new path may be different than the + // ones the Kubelet initially started with, and this may disrupt your node. + // Default: "" + // +optional + StaticPodPath string `json:"staticPodPath,omitempty"` + // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 + // for all interfaces). + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Default: "0.0.0.0" + // +optional + Address string `json:"address,omitempty"` + // readOnlyPort is the read-only port for the Kubelet to serve on with + // no authentication/authorization. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Default: 0 (disabled) + // +optional + ReadOnlyPort int32 `json:"readOnlyPort,omitempty"` + // tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, + // if any, concatenated after server cert). If tlsCertFile and + // tlsPrivateKeyFile are not provided, a self-signed certificate + // and key are generated for the public address and saved to the directory + // passed to the Kubelet's --cert-dir flag. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Default: "" + // +optional + TLSCertFile string `json:"tlsCertFile,omitempty"` + // tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Default: "" + // +optional + TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"` + // TLSCipherSuites is the list of allowed cipher suites for the server. + // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Default: nil + // +optional + TLSCipherSuites []string `json:"tlsCipherSuites,omitempty"` + // rotateCertificates enables client certificate rotation. The Kubelet will request a + // new certificate from the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // disabling it may disrupt the Kubelet's ability to authenticate with the API server + // after the current certificate expires. + // Default: false + // +optional + RotateCertificates bool `json:"rotateCertificates,omitempty"` + // authentication specifies how requests to the Kubelet's server are authenticated + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Defaults: + // anonymous: + // enabled: false + // webhook: + // enabled: true + // cacheTTL: "2m" + // +optional + Authentication KubeletAuthentication `json:"authentication"` + // authorization specifies how requests to the Kubelet's server are authorized + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may disrupt components that interact with the Kubelet server. + // Defaults: + // mode: Webhook + // webhook: + // cacheAuthorizedTTL: "5m" + // cacheUnauthorizedTTL: "30s" + // +optional + Authorization KubeletAuthorization `json:"authorization"` + // eventRecordQPS is the maximum event creations per second. If 0, there + // is no limit enforced. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may impact scalability by changing the amount of traffic produced by + // event creations. + // Default: 5 + // +optional + EventRecordQPS int32 `json:"eventRecordQPS,omitempty"` + // clusterDomain is the DNS domain for this cluster. If set, kubelet will + // configure all containers to search this domain in addition to the + // host's search domains. + // Dynamic Kubelet Config (beta): Dynamically updating this field is not recommended, + // as it should be kept in sync with the rest of the cluster. + // Default: "" + // +optional + ClusterDomain string `json:"clusterDomain,omitempty"` + // clusterDNS is a list of IP addresses for the cluster DNS server. If set, + // kubelet will configure all containers to use this for DNS resolution + // instead of the host's DNS servers. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // changes will only take effect on Pods created after the update. Draining + // the node is recommended before changing this field. + // Default: nil + // +optional + ClusterDNS []string `json:"clusterDNS,omitempty"` + // streamingConnectionIdleTimeout is the maximum time a streaming connection + // can be idle before the connection is automatically closed. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may impact components that rely on infrequent updates over streaming + // connections to the Kubelet server. + // Default: "4h" + // +optional + StreamingConnectionIdleTimeout Duration `json:"streamingConnectionIdleTimeout,omitempty"` + // nodeStatusUpdateFrequency is the frequency that kubelet computes node + // status. If node lease feature is not enabled, it is also the frequency that + // kubelet posts node status to master. + // Note: When node lease feature is not enabled, be cautious when changing the + // constant, it must work with nodeMonitorGracePeriod in nodecontroller. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may impact node scalability, and also that the node controller's + // nodeMonitorGracePeriod must be set to N*NodeStatusUpdateFrequency, + // where N is the number of retries before the node controller marks + // the node unhealthy. + // Default: "10s" + // +optional + NodeStatusUpdateFrequency Duration `json:"nodeStatusUpdateFrequency,omitempty"` + // imageGCHighThresholdPercent is the percent of disk usage after which + // image garbage collection is always run. The percent is calculated as + // this field value out of 100. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may trigger or delay garbage collection, and may change the image overhead + // on the node. + // Default: 85 + // +optional + ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent,omitempty"` + // imageGCLowThresholdPercent is the percent of disk usage before which + // image garbage collection is never run. Lowest disk usage to garbage + // collect to. The percent is calculated as this field value out of 100. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may trigger or delay garbage collection, and may change the image overhead + // on the node. + // Default: 80 + // +optional + ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent,omitempty"` + // Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes + // And all Burstable and BestEffort pods are brought up under their + // specific top level QoS cgroup. + // Dynamic Kubelet Config (beta): This field should not be updated without a full node + // reboot. It is safest to keep this value the same as the local config. + // Default: true + // +optional + CgroupsPerQOS bool `json:"cgroupsPerQOS,omitempty"` + // maxPods is the number of pods that can run on this Kubelet. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // changes may cause Pods to fail admission on Kubelet restart, and may change + // the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting + // future scheduling decisions. Increasing this value may also decrease performance, + // as more Pods can be packed into a single node. + // Default: 110 + // +optional + MaxPods int32 `json:"maxPods,omitempty"` + // PodPidsLimit is the maximum number of pids in any pod. + // Requires the SupportPodPidsLimit feature gate to be enabled. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // lowering it may prevent container processes from forking after the change. + // Default: -1 + // +optional + PodPidsLimit int64 `json:"podPidsLimit,omitempty"` + // ResolverConfig is the resolver configuration file used as the basis + // for the container DNS resolution configuration. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // changes will only take effect on Pods created after the update. Draining + // the node is recommended before changing this field. + // Default: "/etc/resolv.conf" + // +optional + ResolverConfig string `json:"resolvConf,omitempty"` + // Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. + // To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may trigger or delay Pod evictions. + // Default: + // memory.available: "100Mi" + // nodefs.available: "10%" + // nodefs.inodesFree: "5%" + // imagefs.available: "15%" + // +optional + EvictionHard map[string]string `json:"evictionHard,omitempty"` + // protectKernelDefaults, if true, causes the Kubelet to error if kernel + // flags are not as it expects. Otherwise the Kubelet will attempt to modify + // kernel flags to match its expectation. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // enabling it may cause the Kubelet to crash-loop if the Kernel is not configured as + // Kubelet expects. + // Default: false + // +optional + ProtectKernelDefaults bool `json:"protectKernelDefaults,omitempty"` + // featureGates is a map of feature names to bools that enable or disable alpha/experimental + // features. This field modifies piecemeal the built-in default values from + // "k8s.io/kubernetes/pkg/features/kube_features.go". + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider the + // documentation for the features you are enabling or disabling. While we + // encourage feature developers to make it possible to dynamically enable + // and disable features, some changes may require node reboots, and some + // features may require careful coordination to retroactively disable. + // Default: nil + // +optional + FeatureGates map[string]bool `json:"featureGates,omitempty"` + + /* the following fields are meant for Node Allocatable */ + + // systemReserved is a set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) + // pairs that describe resources reserved for non-kubernetes components. + // Currently only cpu and memory are supported. + // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may not be possible to increase the reserved resources, because this + // requires resizing cgroups. Always look for a NodeAllocatableEnforced event + // after updating this field to ensure that the update was successful. + // Default: nil + // +optional + SystemReserved map[string]string `json:"systemReserved,omitempty"` + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + // that describe resources reserved for kubernetes system components. + // Currently cpu, memory and local storage for root file system are supported. + // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // it may not be possible to increase the reserved resources, because this + // requires resizing cgroups. Always look for a NodeAllocatableEnforced event + // after updating this field to ensure that the update was successful. + // Default: nil + // +optional + KubeReserved map[string]string `json:"kubeReserved,omitempty"` + // This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. + // This flag accepts a list of options. Acceptable options are `none`, `pods`, `system-reserved` & `kube-reserved`. + // If `none` is specified, no other options may be specified. + // Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. + // Dynamic Kubelet Config (beta): If dynamically updating this field, consider that + // removing enforcements may reduce the stability of the node. Alternatively, adding + // enforcements may reduce the stability of components which were using more than + // the reserved amount of resources; for example, enforcing kube-reserved may cause + // Kubelets to OOM if it uses more than the reserved resources, and enforcing system-reserved + // may cause system daemons to OOM if they use more than the reserved resources. + // Default: ["pods"] + // +optional + EnforceNodeAllocatable []string `json:"enforceNodeAllocatable,omitempty"` +} + +type Duration string + +// below are copied from Kubernetes +type KubeletAuthentication struct { + // x509 contains settings related to x509 client certificate authentication + // +optional + X509 KubeletX509Authentication `json:"x509"` + // webhook contains settings related to webhook bearer token authentication + // +optional + Webhook KubeletWebhookAuthentication `json:"webhook"` + // anonymous contains settings related to anonymous authentication + // +optional + Anonymous KubeletAnonymousAuthentication `json:"anonymous"` +} + +type KubeletX509Authentication struct { + // clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate + // signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, + // and groups corresponding to the Organization in the client certificate. + // +optional + ClientCAFile string `json:"clientCAFile,omitempty"` +} + +type KubeletWebhookAuthentication struct { + // enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API + // +optional + Enabled bool `json:"enabled,omitempty"` + // cacheTTL enables caching of authentication results + // +optional + CacheTTL Duration `json:"cacheTTL,omitempty"` +} + +type KubeletAnonymousAuthentication struct { + // enabled allows anonymous requests to the kubelet server. + // Requests that are not rejected by another authentication method are treated as anonymous requests. + // Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated. + // +optional + Enabled bool `json:"enabled,omitempty"` +} + +type KubeletAuthorization struct { + // mode is the authorization mode to apply to requests to the kubelet server. + // Valid values are AlwaysAllow and Webhook. + // Webhook mode uses the SubjectAccessReview API to determine authorization. + // +optional + Mode KubeletAuthorizationMode `json:"mode,omitempty"` + + // webhook contains settings related to Webhook authorization. + // +optional + Webhook KubeletWebhookAuthorization `json:"webhook"` +} + +type KubeletAuthorizationMode string + +type KubeletWebhookAuthorization struct { + // cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. + // +optional + CacheAuthorizedTTL Duration `json:"cacheAuthorizedTTL,omitempty"` + // cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. + // +optional + CacheUnauthorizedTTL Duration `json:"cacheUnauthorizedTTL,omitempty"` +} diff --git a/pkg/agent/utils.go b/pkg/agent/utils.go index bbd3f205a28..f516fedb4d8 100644 --- a/pkg/agent/utils.go +++ b/pkg/agent/utils.go @@ -14,6 +14,7 @@ import ( "net" "net/http" "regexp" + "sort" "strconv" "strings" "text/template" @@ -25,6 +26,37 @@ import ( "github.com/pkg/errors" ) +var TranslatedKubeletConfigFlags map[string]bool = map[string]bool{ + "--address": true, + "--anonymous-auth": true, + "--client-ca-file": true, + "--authentication-token-webhook": true, + "--authorization-mode": true, + "--pod-manifest-path": true, + "--cluster-dns": true, + "--cgroups-per-qos": true, + "--tls-cert-file": true, + "--tls-private-key-file": true, + "--tls-cipher-suites": true, + "--cluster-domain": true, + "--max-pods": true, + "--eviction-hard": true, + "--node-status-update-frequency": true, + "--image-gc-high-threshold": true, + "--image-gc-low-threshold": true, + "--event-qps": true, + "--pod-max-pids": true, + "--enforce-node-allocatable": true, + "--streaming-connection-idle-timeout": true, + "--rotate-certificates": true, + "--read-only-port": true, + "--feature-gates": true, + "--protect-kernel-defaults": true, + "--resolv-conf": true, + "--system-reserved": true, + "--kube-reserved": true, +} + var keyvaultSecretPathRe *regexp.Regexp func init() { @@ -808,3 +840,145 @@ func getCustomDataFromJSON(jsonStr string) string { } return customDataObj["customData"] } + +// GetOrderedKubeletConfigFlagString returns an ordered string of key/val pairs +// copied from AKS-Engine and filter out flags that already translated to config file +func GetOrderedKubeletConfigFlagString(k *api.KubernetesConfig, cs *api.ContainerService) string { + if k.KubeletConfig == nil { + return "" + } + keys := []string{} + dynamicKubeletSupported := IsDynamicKubeletSupported(cs) + for key := range k.KubeletConfig { + if !dynamicKubeletSupported || !TranslatedKubeletConfigFlags[key] { + keys = append(keys, key) + } + } + sort.Strings(keys) + var buf bytes.Buffer + for _, key := range keys { + buf.WriteString(fmt.Sprintf("%s=%s ", key, k.KubeletConfig[key])) + } + return buf.String() +} + +// IsDynamicKubeletSupported get if dynamic kubelet is supported in AKS +func IsDynamicKubeletSupported(cs *api.ContainerService) bool { + // TODO(bowa) fix this after we figure out how to pass toggle value from RP + return false + // return cs.Properties.OrchestratorProfile.IsKubernetes() && IsKubernetesVersionGe(cs.Properties.OrchestratorProfile.OrchestratorVersion, "1.14.0") +} + +// convert kubelet flags we set to a file +func getDynamicKubeletConfigFileContent(kc map[string]string) string { + if kc == nil { + return "" + } + // translate simple values + kubeletConfig := &AKSKubeletConfiguration{ + APIVersion: "kubelet.config.k8s.io/v1beta1", + Kind: "KubeletConfiguration", + Address: kc["--address"], + StaticPodPath: kc["--pod-manifest-path"], + Authorization: KubeletAuthorization{ + Mode: KubeletAuthorizationMode(kc["--authorization-mode"]), + }, + ClusterDNS: strings.Split(kc["--cluster-dns"], ","), + CgroupsPerQOS: strToBool(kc["--cgroups-per-qos"]), + TLSCertFile: kc["--tls-cert-file"], + TLSPrivateKeyFile: kc["--tls-private-key-file"], + TLSCipherSuites: strings.Split(kc["--tls-cipher-suites"], ","), + ClusterDomain: kc["--cluster-domain"], + MaxPods: strToInt32(kc["--max-pods"]), + NodeStatusUpdateFrequency: Duration(kc["--node-status-update-frequency"]), + ImageGCHighThresholdPercent: strToInt32(kc["--image-gc-high-threshold"]), + ImageGCLowThresholdPercent: strToInt32(kc["--image-gc-low-threshold"]), + EventRecordQPS: strToInt32(kc["--event-qps"]), + PodPidsLimit: strToInt64(kc["--pod-max-pids"]), + EnforceNodeAllocatable: strings.Split(kc["--enforce-node-allocatable"], ","), + StreamingConnectionIdleTimeout: Duration(kc["--streaming-connection-idle-timeout"]), + RotateCertificates: strToBool(kc["--rotate-certificates"]), + ReadOnlyPort: strToInt32(kc["--read-only-port"]), + ProtectKernelDefaults: strToBool(kc["--protect-kernel-defaults"]), + ResolverConfig: kc["--resolv-conf"], + } + + // Authentication + kubeletConfig.Authentication = KubeletAuthentication{} + if ca := kc["--client-ca-file"]; ca != "" { + kubeletConfig.Authentication.X509 = KubeletX509Authentication{ + ClientCAFile: ca, + } + } + if aw := kc["--authentication-token-webhook"]; aw != "" { + kubeletConfig.Authentication.Webhook = KubeletWebhookAuthentication{ + Enabled: strToBool(aw), + } + } + if aa := kc["--anonymous-auth"]; aa != "" { + kubeletConfig.Authentication.Anonymous = KubeletAnonymousAuthentication{ + Enabled: strToBool(aa), + } + } + + // EvictionHard + // default: "memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5%" + if eh, ok := kc["--eviction-hard"]; ok && eh != "" { + kubeletConfig.EvictionHard = strKeyValToMap(eh, ",", "<") + } + + // feature gates + // look like "f1=true,f2=true" + kubeletConfig.FeatureGates = strKeyValToMapBool(kc["--feature-gates"], ",", "=") + + // system reserve and kube reserve + // looks like "cpu=100m,memory=1638Mi" + kubeletConfig.SystemReserved = strKeyValToMap(kc["--system-reserved"], ",", "=") + kubeletConfig.KubeReserved = strKeyValToMap(kc["--kube-reserved"], ",", "=") + + configStringByte, _ := json.MarshalIndent(kubeletConfig, "", " ") + return string(configStringByte) +} + +func strToBool(str string) bool { + b, _ := strconv.ParseBool(str) + return b +} + +func strToInt32(str string) int32 { + i, _ := strconv.ParseInt(str, 10, 32) + return int32(i) +} + +func strToInt64(str string) int64 { + i, _ := strconv.ParseInt(str, 10, 64) + return i +} + +func strKeyValToMap(str string, strDelim string, pairDelim string) map[string]string { + m := make(map[string]string) + pairs := strings.Split(str, strDelim) + for _, pairRaw := range pairs { + pair := strings.Split(pairRaw, pairDelim) + if len(pair) == 2 { + key := strings.TrimSpace(pair[0]) + val := strings.TrimSpace(pair[1]) + m[key] = val + } + } + return m +} + +func strKeyValToMapBool(str string, strDelim string, pairDelim string) map[string]bool { + m := make(map[string]bool) + pairs := strings.Split(str, strDelim) + for _, pairRaw := range pairs { + pair := strings.Split(pairRaw, pairDelim) + if len(pair) == 2 { + key := strings.TrimSpace(pair[0]) + val := strings.TrimSpace(pair[1]) + m[key] = strToBool(val) + } + } + return m +} diff --git a/pkg/agent/utils_test.go b/pkg/agent/utils_test.go new file mode 100644 index 00000000000..10ca6694940 --- /dev/null +++ b/pkg/agent/utils_test.go @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +package agent + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestGetKubeletConfigFileFromFlags(t *testing.T) { + kc := map[string]string{ + "--address": "0.0.0.0", + "--pod-manifest-path": "/etc/kubernetes/manifests", + "--cluster-domain": "cluster.local", + "--cluster-dns": "10.0.0.10", + "--cgroups-per-qos": "true", + "--tls-cert-file": "/etc/kubernetes/certs/kubeletserver.crt", + "--tls-private-key-file": "/etc/kubernetes/certs/kubeletserver.key", + "--tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256", + "--max-pods": "110", + "--node-status-update-frequency": "10s", + "--image-gc-high-threshold": "85", + "--image-gc-low-threshold": "80", + "--event-qps": "0", + "--pod-max-pids": "-1", + "--enforce-node-allocatable": "pods", + "--streaming-connection-idle-timeout": "4h0m0s", + "--rotate-certificates": "true", + "--read-only-port": "10255", + "--protect-kernel-defaults": "true", + "--resolv-conf": "/etc/resolv.conf", + "--anonymous-auth": "false", + "--client-ca-file": "/etc/kubernetes/certs/ca.crt", + "--authentication-token-webhook": "true", + "--authorization-mode": "Webhook", + "--eviction-hard": "memory.available<750Mi,nodefs.available<10%,nodefs.inodesFree<5%", + "--feature-gates": "RotateKubeletServerCertificate=true,DynamicKubeletConfig=false", // what if you turn off dynamic kubelet using dynamic kubelet? + "--system-reserved": "cpu=2,memory=1Gi", + "--kube-reserved": "cpu=100m,memory=1638Mi", + } + configFileStr := getDynamicKubeletConfigFileContent(kc) + diff := cmp.Diff(expectedJSON, configFileStr) + if diff != "" { + t.Errorf("Generated config file is different than expected: %s", diff) + } +} + +var expectedJSON string = `{ + "kind": "KubeletConfiguration", + "apiVersion": "kubelet.config.k8s.io/v1beta1", + "staticPodPath": "/etc/kubernetes/manifests", + "address": "0.0.0.0", + "readOnlyPort": 10255, + "tlsCertFile": "/etc/kubernetes/certs/kubeletserver.crt", + "tlsPrivateKeyFile": "/etc/kubernetes/certs/kubeletserver.key", + "tlsCipherSuites": [ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_256_GCM_SHA384", + "TLS_RSA_WITH_AES_128_GCM_SHA256" + ], + "rotateCertificates": true, + "authentication": { + "x509": { + "clientCAFile": "/etc/kubernetes/certs/ca.crt" + }, + "webhook": { + "enabled": true + }, + "anonymous": {} + }, + "authorization": { + "mode": "Webhook", + "webhook": {} + }, + "clusterDomain": "cluster.local", + "clusterDNS": [ + "10.0.0.10" + ], + "streamingConnectionIdleTimeout": "4h0m0s", + "nodeStatusUpdateFrequency": "10s", + "imageGCHighThresholdPercent": 85, + "imageGCLowThresholdPercent": 80, + "cgroupsPerQOS": true, + "maxPods": 110, + "podPidsLimit": -1, + "resolvConf": "/etc/resolv.conf", + "evictionHard": { + "memory.available": "750Mi", + "nodefs.available": "10%", + "nodefs.inodesFree": "5%" + }, + "protectKernelDefaults": true, + "featureGates": { + "DynamicKubeletConfig": false, + "RotateKubeletServerCertificate": true + }, + "systemReserved": { + "cpu": "2", + "memory": "1Gi" + }, + "kubeReserved": { + "cpu": "100m", + "memory": "1638Mi" + }, + "enforceNodeAllocatable": [ + "pods" + ] +}` diff --git a/pkg/templates/templates_generated.go b/pkg/templates/templates_generated.go index 292005d01b2..b2da4f9649c 100644 --- a/pkg/templates/templates_generated.go +++ b/pkg/templates/templates_generated.go @@ -656,6 +656,18 @@ EOF EOF set -x {{end}} + +{{- if IsDynamicKubeletSupported}} + set +x + KUBELET_CONFIG_JSON_PATH="/etc/default/kubeletconfig.json" + touch "${KUBELET_CONFIG_JSON_PATH}" + chmod 0644 "${KUBELET_CONFIG_JSON_PATH}" + chown root:root "${KUBELET_CONFIG_JSON_PATH}" + cat << EOF > "${KUBELET_CONFIG_JSON_PATH}" +{{GetDynamicKubeletConfigFileContent}} +EOF + set -x +{{- end}} } configureCNI() { @@ -2316,7 +2328,10 @@ ExecStart=/usr/local/bin/kubelet \ --node-labels="${KUBELET_NODE_LABELS}" \ --v=2 {{if NeedsContainerd}}--container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock{{end}} \ --volume-plugin-dir=/etc/kubernetes/volumeplugins \ - $KUBELET_CONFIG \ + {{- if IsDynamicKubeletSupported}} + --config /etc/default/kubeletconfig.json --dynamic-config-dir /etc/default/dynamickubelet \ + {{- end}} + $KUBELET_FLAGS \ $KUBELET_REGISTER_NODE $KUBELET_REGISTER_WITH_TAINTS [Install] @@ -3438,7 +3453,7 @@ write_files: permissions: "0644" owner: root content: | - KUBELET_CONFIG={{GetKubeletConfigKeyVals .KubernetesConfig }} + KUBELET_FLAGS={{GetKubeletConfigKeyVals .KubernetesConfig }} KUBELET_REGISTER_SCHEDULABLE=true {{- if not (IsKubernetesVersionGe "1.17.0")}} KUBELET_IMAGE={{GetHyperkubeImageReference}} diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 00000000000..32017f8fa1d --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000000..c9a63ceda5e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,655 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.curPtrs.Init() + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case exporter: + s.exporters = append(s.exporters, opt) + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Cycle-detection for slice elements (see NOTE in compareSlice). + t := step.Type() + vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + var mayForce, mayForceInit bool + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000000..dd032354fef --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportExporters = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("no support for forcibly accessing unexported fields") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000000..57020e26ca7 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,25 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportExporters = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + // See https://github.com/google/go-cmp/issues/167 for discussion of the + // following expression. + return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000000..fe98dcc6774 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000000..597b6ae56b1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000000..3d2e42662ca --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000000..a9e7fc0b5b3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000000..01aed0a1532 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000000..c0b667f58b0 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000000..ace1dbe86e5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000000..0a01c4796f1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000000..da134ae2a80 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000000..24fbae6e3c5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000000..06a8ffd036d --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000000..abbd2a63b69 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,549 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") + } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return exporter(func(t reflect.Type) bool { return m[t] }) +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc + reportByCycle +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000000..509d6b8527d --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,377 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int + isSlice bool // False for reflect.Array +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000000..6ddf29993e5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000000..17a05eede48 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000000..2761b628921 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,278 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000000..eafcf2e4c0b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000000..8b8fcab7bdf --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,387 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000000..83031a7f507 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bdab7d3cb29..aa99e153aa0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -81,6 +81,12 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp # github.com/google/btree v1.0.0 github.com/google/btree +# github.com/google/go-cmp v0.4.0 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1