diff --git a/Dockerfile.networkResourcesInjector.rhel b/Dockerfile.networkResourcesInjector.rhel new file mode 100644 index 000000000..ae4376fa8 --- /dev/null +++ b/Dockerfile.networkResourcesInjector.rhel @@ -0,0 +1,12 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +ARG TARGETOS +ARG TARGETARCH +WORKDIR /workspace +COPY . . +RUN GOMAXPROCS=2 CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} make build-network-resources-injector + +FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +ARG TARGETARCH +WORKDIR / + +COPY --from=builder /workspace/bin/nri.${TARGETARCH} /webhook diff --git a/Makefile b/Makefile index 3d5868dd4..7ba8d9e8c 100644 --- a/Makefile +++ b/Makefile @@ -218,12 +218,13 @@ DAEMON_BIN = bin/daemon DPU_CNI_BIN = bin/dpu-cni IPU_PLUGIN_BIN = bin/ipuplugin VSP_BIN = bin/vsp-mrvl +NRI_BIN = bin/nri GOARCH ?= amd64 GOOS ?= linux .PHONY: build -build: manifests generate fmt vet build-manager build-daemon build-intel-vsp build-marvell-vsp +build: manifests generate fmt vet build-manager build-daemon build-intel-vsp build-marvell-vsp build-network-resources-injector @echo "Built all components" .PHONY: build-manager @@ -243,6 +244,10 @@ build-intel-vsp: build-marvell-vsp: CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -o $(VSP_BIN).${GOARCH} internal/daemon/vendor-specific-plugins/marvell/main.go +.PHONY: build-network-resources-injector +build-network-resources-injector: + CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build -o ${NRI_BIN}.${GOARCH} cmd/nri/networkresourcesinjector.go + # If you wish built the manager image targeting other platforms you can use the --platform flag. # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ @@ -262,6 +267,7 @@ DPU_OPERATOR_IMAGE := $(REGISTRY):5000/dpu-operator:dev DPU_DAEMON_IMAGE := $(REGISTRY):5000/dpu-daemon:dev MARVELL_VSP_IMAGE := $(REGISTRY):5000/mrvl-vsp:dev INTEL_VSP_IMAGE := $(REGISTRY):5000/intel-vsp:dev +NETWORK_RESOURCES_INJECTOR_IMAGE:= $(REGISTRY):5000/network-resources-injector-image:dev .PHONY: local-deploy-prep prep-local-deploy: tools @@ -296,6 +302,7 @@ local-build: ## Build all container images necessary to run the whole operator $(CONTAINER_TOOL) build -v $(GO_CONTAINER_CACHE):/go:z -f Dockerfile.daemon.rhel -t $(DPU_DAEMON_IMAGE) $(CONTAINER_TOOL) build -v $(GO_CONTAINER_CACHE):/go:z -f Dockerfile.mrvlVSP.rhel -t $(MARVELL_VSP_IMAGE) $(CONTAINER_TOOL) build -v $(GO_CONTAINER_CACHE):/go:z -f Dockerfile.IntelVSP.rhel -t $(INTEL_VSP_IMAGE) + $(CONTAINER_TOOL) build -v $(GO_CONTAINER_CACHE):/go:z -f Dockerfile.networkResourcesInjector.rhel -t $(NETWORK_RESOURCES_INJECTOR_IMAGE) .PHONE: prepare-multi-arch prepare-multi-arch: @@ -308,7 +315,7 @@ go-cache: ## Build all container images necessary to run the whole operator ## Build all container images necessary to run the whole operator .PHONY: local-buildx -local-buildx: prepare-multi-arch go-cache local-buildx-manager local-buildx-daemon local-buildx-marvell-vsp local-buildx-intel-vsp +local-buildx: prepare-multi-arch go-cache local-buildx-manager local-buildx-daemon local-buildx-marvell-vsp local-buildx-intel-vsp local-buildx-network-resources-injector @echo "local-buildx completed" define build_image @@ -333,6 +340,10 @@ local-buildx-marvell-vsp: prepare-multi-arch go-cache local-buildx-intel-vsp: prepare-multi-arch go-cache $(call build_image,INTEL_VSP_IMAGE,Dockerfile.IntelVSP.rhel) +.PHONY: local-buildx-network-resources-injector +local-buildx-network-resources-injector: prepare-multi-arch go-cache + $(call build_image,NETWORK_RESOURCES_INJECTOR_IMAGE,Dockerfile.networkResourcesInjector.rhel) + TMP_FILE=/tmp/dpu-operator-incremental-build define build_image_incremental bin/incremental -dockerfile $(2) -base-uri $($(1))-base -output-file $(TMP_FILE) @@ -367,8 +378,14 @@ local-buildx-incremental-intel-vsp: prepare-multi-arch go-cache GOARCH=amd64 $(MAKE) build-intel-vsp $(call build_image_incremental,INTEL_VSP_IMAGE,Dockerfile.IntelVSP.rhel) +.PHONY: local-buildx-incremental-network-resources-injector +local-buildx-incremental-network-resources-injector: prepare-multi-arch go-cache + GOARCH=arm64 $(MAKE) build-network-resources-injector + GOARCH=amd64 $(MAKE) build-network-resources-injector + $(call build_image_incremental,NETWORK_RESOURCES_INJECTOR_IMAGE,Dockerfile.networkResourcesInjector.rhel) + .PHONY: incremental-local-buildx -incremental-local-buildx: prepare-multi-arch go-cache incremental-prep-local-deploy local-buildx-incremental-manager local-buildx-incremental-daemon local-buildx-incremental-marvell-vsp local-buildx-incremental-intel-vsp +incremental-local-buildx: prepare-multi-arch go-cache incremental-prep-local-deploy local-buildx-incremental-manager local-buildx-incremental-daemon local-buildx-incremental-marvell-vsp local-buildx-incremental-intel-vsp local-buildx-incremental-network-resources-injector @echo "local-buildx-incremental completed" .PHONY: local-pushx-incremental @@ -377,6 +394,7 @@ local-pushx-incremental: ## Push all container images necessary to run the whole buildah manifest push --all $(DPU_DAEMON_IMAGE)-manifest docker://$(DPU_DAEMON_IMAGE) buildah manifest push --all $(MARVELL_VSP_IMAGE)-manifest docker://$(MARVELL_VSP_IMAGE) buildah manifest push --all $(INTEL_VSP_IMAGE)-manifest docker://$(INTEL_VSP_IMAGE) + buildah manifest push --all $(NETWORK_RESOURCES_INJECTOR_IMAGE)-manifest docker://$(INTEL_VSP_IMAGE) .PHONY: local-pushx local-pushx: ## Push all container images necessary to run the whole operator @@ -384,17 +402,20 @@ local-pushx: ## Push all container images necessary to run the whole operator buildah manifest push --all $(DPU_DAEMON_IMAGE)-manifest docker://$(DPU_DAEMON_IMAGE) buildah manifest push --all $(MARVELL_VSP_IMAGE)-manifest docker://$(MARVELL_VSP_IMAGE) buildah manifest push --all $(INTEL_VSP_IMAGE)-manifest docker://$(INTEL_VSP_IMAGE) + buildah manifest push --all $(NETWORK_RESOURCES_INJECTOR_IMAGE)-manifest docker://$(NETWORK_RESOURCES_INJECTOR_IMAGE) buildah manifest push --all $(DPU_OPERATOR_IMAGE)-manifest docker://$(DPU_OPERATOR_IMAGE)-base buildah manifest push --all $(DPU_DAEMON_IMAGE)-manifest docker://$(DPU_DAEMON_IMAGE)-base buildah manifest push --all $(MARVELL_VSP_IMAGE)-manifest docker://$(MARVELL_VSP_IMAGE)-base buildah manifest push --all $(INTEL_VSP_IMAGE)-manifest docker://$(INTEL_VSP_IMAGE)-base - + buildah manifest push --all $(NETWORK_RESOURCES_INJECTOR_IMAGE)-manifest docker://$(NETWORK_RESOURCES_INJECTOR_IMAGE)-base + .PHONY: local-push local-push: ## Push all container images necessary to run the whole operator $(CONTAINER_TOOL) push $(DPU_OPERATOR_IMAGE) $(CONTAINER_TOOL) push $(DPU_DAEMON_IMAGE) $(CONTAINER_TOOL) push $(MARVELL_VSP_IMAGE) $(CONTAINER_TOOL) push $(INTEL_VSP_IMAGE) + $(CONTAINER_TOOL) push $(NETWORK_RESOURCES_INJECTOR_IMAGE) # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: # - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ diff --git a/bundle/manifests/dpu-operator.clusterserviceversion.yaml b/bundle/manifests/dpu-operator.clusterserviceversion.yaml index 7c644eb8e..4ae6fbb7d 100644 --- a/bundle/manifests/dpu-operator.clusterserviceversion.yaml +++ b/bundle/manifests/dpu-operator.clusterserviceversion.yaml @@ -299,6 +299,8 @@ spec: env: - name: DPU_DAEMON_IMAGE value: quay.io/openshift/origin-dpu-daemon:4.19 + - name: NETWORK_RESOURCES_INJECTOR_IMAGE + value: quay.io/openshift/sriov-dp-admission-controller:latest image: quay.io/openshift/origin-dpu-operator:4.19 livenessProbe: httpGet: diff --git a/cmd/main.go b/cmd/main.go index d17f05ec7..b7cd2d6c4 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -95,7 +95,13 @@ func main() { os.Exit(1) } - b := controller.NewDpuOperatorConfigReconciler(mgr.GetClient(), mgr.GetScheme(), dpuDaemonImage, vspImages, vspExtraData) + networkResourcesInjectorImage := os.Getenv("NETWORK_RESOURCES_INJECTOR_IMAGE") + if networkResourcesInjectorImage == "" { + setupLog.Error(err, "Failed to set NETWORK_RESOURCES_INJECTOR_IMAGE env var") + os.Exit(1) + } + + b := controller.NewDpuOperatorConfigReconciler(mgr.GetClient(), mgr.GetScheme(), dpuDaemonImage, vspImages, vspExtraData, networkResourcesInjectorImage) if value, ok := os.LookupEnv("IMAGE_PULL_POLICIES"); ok { b = b.WithImagePullPolicy(value) diff --git a/cmd/nri/networkresourcesinjector.go b/cmd/nri/networkresourcesinjector.go new file mode 100644 index 000000000..1c8f41835 --- /dev/null +++ b/cmd/nri/networkresourcesinjector.go @@ -0,0 +1,253 @@ +// Copyright (c) 2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "net/http" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches" + netcache "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/tools" + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/userdefinedinjections" + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook" +) + +const ( + defaultClientCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + controlSwitchesConfigMap = "nri-control-switches" +) + +func main() { + var namespace string + var clientCAPaths webhook.ClientCAFlags + + /* load configuration */ + port := flag.Int("port", 8443, "The port on which to serve.") + address := flag.String("bind-address", "0.0.0.0", "The IP address on which to listen for the --port port.") + cert := flag.String("tls-cert-file", "cert.pem", "File containing the default x509 Certificate for HTTPS.") + key := flag.String("tls-private-key-file", "key.pem", "File containing the default x509 private key matching --tls-cert-file.") + insecure := flag.Bool("insecure", false, "Disable adding client CA to server TLS endpoint --insecure") + flag.Var(&clientCAPaths, "client-ca", "File containing client CA. This flag is repeatable if more than one client CA needs to be added to server") + healthCheckPort := flag.Int("health-check-port", 8444, "The port to use for health check monitoring") + enableHTTP2 := flag.Bool("enable-http2", false, "If HTTP/2 should be enabled for the webhook server.") + + // do initialization of control switches flags + controlSwitches := controlswitches.SetupControlSwitchesFlags() + + // at the end when all flags are declared parse it + flag.Parse() + + // initialize all control switches structures + controlSwitches.InitControlSwitches() + glog.Infof("controlSwitches: %+v", *controlSwitches) + + if !isValidPort(*port) { + glog.Fatalf("invalid port number. Choose between 1024 and 65535") + } + + if !controlSwitches.IsResourcesNameEnabled() { + glog.Fatalf("Input argument for resourceName cannot be empty.") + } + + if *address == "" || *cert == "" || *key == "" { + glog.Fatalf("input argument(s) not defined correctly") + } + + if len(clientCAPaths) == 0 { + clientCAPaths = append(clientCAPaths, defaultClientCa) + } + + if namespace = os.Getenv("NAMESPACE"); namespace == "" { + namespace = "kube-system" + } + + if !isValidPort(*healthCheckPort) { + glog.Fatalf("Invalid health check port number. Choose between 1024 and 65535") + } else if *healthCheckPort == *port { + glog.Fatalf("Health check port should be different from port") + } else { + go func() { + addr := fmt.Sprintf("%s:%d", *address, *healthCheckPort) + mux := http.NewServeMux() + + mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + err := http.ListenAndServe(addr, mux) + if err != nil { + glog.Fatalf("error starting health check server: %v", err) + } + }() + } + + glog.Infof("starting mutating admission controller for network resources injection") + + keyPair, err := webhook.NewTlsKeypairReloader(*cert, *key) + if err != nil { + glog.Fatalf("error load certificate: %s", err.Error()) + } + + clientCaPool, err := webhook.NewClientCertPool(&clientCAPaths, *insecure) + if err != nil { + glog.Fatalf("error loading client CA pool: '%s'", err.Error()) + } + + /* init API client */ + clientset := webhook.SetupInClusterClient() + + // initialize webhook with controlSwitches + webhook.SetControlSwitches(controlSwitches) + + //initialize webhook with cache + netAnnotationCache := netcache.Create() + netAnnotationCache.Start() + webhook.SetNetAttachDefCache(netAnnotationCache) + + userInjections := userdefinedinjections.CreateUserInjectionsStructure() + webhook.SetUserInjectionStructure(userInjections) + + go func() { + /* register handlers */ + var httpServer *http.Server + + http.HandleFunc("/mutate", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/mutate" { + http.NotFound(w, r) + return + } + if r.Method != http.MethodPost { + http.Error(w, "Invalid HTTP verb requested", 405) + return + } + webhook.MutateHandler(w, r) + }) + + /* start serving */ + httpServer = &http.Server{ + Addr: fmt.Sprintf("%s:%d", *address, *port), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + ReadHeaderTimeout: 1 * time.Second, + TLSConfig: &tls.Config{ + ClientAuth: webhook.GetClientAuth(*insecure), + MinVersion: tls.VersionTLS12, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384}, + ClientCAs: clientCaPool.GetCertPool(), + PreferServerCipherSuites: true, + InsecureSkipVerify: false, + CipherSuites: []uint16{ + // tls 1.2 + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + // tls 1.3 configuration not supported + }, + GetCertificate: keyPair.GetCertificateFunc(), + }, + // CVE-2023-39325 https://github.com/golang/go/issues/63417 + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), + } + + if *enableHTTP2 { + httpServer.TLSNextProto = nil + } + + err := httpServer.ListenAndServeTLS("", "") + if err != nil { + glog.Fatalf("error starting web server: %v", err) + } + }() + + /* watch the cert file and restart http sever if the file updated. */ + watcher, err := fsnotify.NewWatcher() + if err != nil { + glog.Fatalf("error starting fsnotify watcher: %v", err) + } + defer watcher.Close() + + certUpdated := false + keyUpdated := false + + for { + watcher.Add(*cert) + watcher.Add(*key) + + select { + case event, ok := <-watcher.Events: + if !ok { + continue + } + glog.V(2).Infof("watcher event: %v", event) + mask := fsnotify.Create | fsnotify.Rename | fsnotify.Remove | + fsnotify.Write | fsnotify.Chmod + if (event.Op & mask) != 0 { + glog.V(2).Infof("modified file: %v", event.Name) + if event.Name == *cert { + certUpdated = true + } + if event.Name == *key { + keyUpdated = true + } + if keyUpdated && certUpdated { + if err := keyPair.Reload(); err != nil { + glog.Fatalf("Failed to reload certificate: %v", err) + } + certUpdated = false + keyUpdated = false + } + } + case err, ok := <-watcher.Errors: + if !ok { + continue + } + glog.Infof("watcher error: %v", err) + case <-time.After(30 * time.Second): + cm, err := clientset.CoreV1().ConfigMaps(namespace).Get( + context.Background(), controlSwitchesConfigMap, metav1.GetOptions{}) + // only in case of API errors report an error and do not restore default values + if err != nil && !errors.IsNotFound(err) { + glog.Warningf("Error getting control switches configmap %s", err.Error()) + continue + } + + // to be called each time when map is present or not (in that case to restore default values) + controlSwitches.ProcessControlSwitchesConfigMap(cm) + userInjections.SetUserDefinedInjections(cm) + } + } + + // TODO: find a way to stop cache, should we run the above block in a go routine and make main module + // to respond to terminate singal ? +} + +func isValidPort(port int) bool { + if port < 1024 || port > 65535 { + return false + } + return true +} diff --git a/config/dev/local-images-template.yaml b/config/dev/local-images-template.yaml index 9cfb7ae0b..e5392f2b5 100644 --- a/config/dev/local-images-template.yaml +++ b/config/dev/local-images-template.yaml @@ -28,5 +28,7 @@ spec: value: {{ .RegistryURL }}:5000/mrvl-vsp:dev - name: IMAGE_PULL_POLICIES value: Always + - name: NETWORK_RESOURCES_INJECTOR_IMAGE + value: {{ .RegistryURL }}:5000/network-resources-injector-image:dev image: {{ .RegistryURL }}:5000/dpu-operator:dev imagePullPolicy: Always diff --git a/config/incremental/local-images-template.yaml b/config/incremental/local-images-template.yaml index e09f31fd2..c10930dbc 100644 --- a/config/incremental/local-images-template.yaml +++ b/config/incremental/local-images-template.yaml @@ -26,5 +26,7 @@ spec: value: {{ .RegistryURL }}:5000/mrvl-vsp:dev-incremental - name: IMAGE_PULL_POLICIES value: Always + - name: NETWORK_RESOURCES_INJECTOR_IMAGE + value: {{ .RegistryURL }}:5000/network-resources-injector:dev-incremental image: {{ .RegistryURL }}:5000/dpu-operator:dev-incremental imagePullPolicy: Always diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index a0055f50e..081e01ba7 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -76,6 +76,8 @@ spec: env: - name: DPU_DAEMON_IMAGE value: quay.io/openshift/origin-dpu-daemon:4.19 + - name: NETWORK_RESOURCES_INJECTOR_IMAGE + value: quay.io/openshift/sriov-dp-admission-controller:latest #TODO: We will need pointers to all supported VSP images so we can dynamically detect the correct vendor to support at runtime # - name: IntelVspImage # value: quay.io/openshift/origin-intel-vsp-image:4.16 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 750db4ae4..96ed56034 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -16,6 +16,18 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - '*' +- apiGroups: + - "" + resources: + - secrets + verbs: + - '*' - apiGroups: - "" resources: @@ -33,6 +45,7 @@ rules: resources: - services verbs: + - '*' - create - delete - get @@ -40,6 +53,12 @@ rules: - patch - update - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - '*' - apiGroups: - apiextensions.k8s.io resources: @@ -58,6 +77,18 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - config.openshift.io resources: @@ -134,8 +165,10 @@ rules: - clusterrolebindings verbs: - create + - delete - get - list + - update - watch - apiGroups: - rbac.authorization.k8s.io @@ -143,7 +176,10 @@ rules: - clusterroles verbs: - create + - delete - get + - patch + - update - apiGroups: - rbac.authorization.k8s.io resources: diff --git a/go.mod b/go.mod index 3ff1eb25f..b3dbc1ba6 100644 --- a/go.mod +++ b/go.mod @@ -5,12 +5,15 @@ go 1.23.4 require ( github.com/containernetworking/cni v1.2.3 github.com/containernetworking/plugins v1.5.0 + github.com/fsnotify/fsnotify v1.8.0 github.com/go-logr/logr v1.4.2 + github.com/golang/glog v1.2.4 github.com/gorilla/mux v1.8.1 github.com/intel/ipu-opi-plugins/ipu-plugin v0.0.0-20250304160257-7495d7a9cee8 github.com/jaypipes/ghw v0.13.1-0.20241024164530-c1bfc6e6cd6a github.com/k8snetworkplumbingwg/cni-log v0.0.0-20230801160229-b6e062c9e0f2 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 + github.com/k8snetworkplumbingwg/network-resources-injector v1.7.1 github.com/k8snetworkplumbingwg/sriov-network-operator v1.5.0 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 @@ -72,7 +75,6 @@ require ( github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-errors/errors v1.4.2 // indirect @@ -84,7 +86,6 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.9 // indirect @@ -184,6 +185,7 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.0.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 0ea9c4960..6af73c457 100644 --- a/go.sum +++ b/go.sum @@ -110,8 +110,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -141,8 +141,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= @@ -233,6 +233,8 @@ github.com/k8snetworkplumbingwg/govdpa v0.1.4 h1:e6mM7JFZkLVJeMQw3px96EigHAhnb4V github.com/k8snetworkplumbingwg/govdpa v0.1.4/go.mod h1:UQR1xu7A+nnRK1dkLEi12OnNL0OiBPpIKOYDuaQQkck= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 h1:VzM3TYHDgqPkettiP6I6q2jOeQFL4nrJM+UcAc4f6Fs= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0/go.mod h1:nqCI7aelBJU61wiBeeZWJ6oi4bJy5nrjkM6lWIMA4j0= +github.com/k8snetworkplumbingwg/network-resources-injector v1.7.1 h1:CebU+l352d2CzDovMz6sl4iv/go8OIsEd1dJr/I69aQ= +github.com/k8snetworkplumbingwg/network-resources-injector v1.7.1/go.mod h1:vxug9e8dnJRWrxEtydp7c5CLVbXwWWNvh640mFk/lAI= github.com/k8snetworkplumbingwg/sriov-network-device-plugin v0.0.0-20221127172732-a5a7395122e3 h1:hIHzF4vNTCFb9UMngrcJAMvdNslCekaSvNbfQt21CUw= github.com/k8snetworkplumbingwg/sriov-network-device-plugin v0.0.0-20221127172732-a5a7395122e3/go.mod h1:b0YSmUuNOy6CkEmV27XfmZ3a7njs2pjxHoFAvfLbUII= github.com/k8snetworkplumbingwg/sriov-network-operator v1.5.0 h1:xH8g2P+L99qZjAu/rPikxe483vhpUoJ9+j7yU4JhDlw= @@ -563,6 +565,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.0.2 h1:71L/o4mHzVZit0yaIHkOBaEO1fj0lucarJCmhuPCsOA= +gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.0.2/go.mod h1:u1IhI+b7jS0cdRNRJajg0VIzEbQkNNXPZGBSvDr7t0M= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= diff --git a/hack/cluster-configs/ocp-tft-config.yaml b/hack/cluster-configs/ocp-tft-config.yaml index b40a9a69f..8dfab250c 100644 --- a/hack/cluster-configs/ocp-tft-config.yaml +++ b/hack/cluster-configs/ocp-tft-config.yaml @@ -18,6 +18,5 @@ tft: - name: "$worker" sriov: "true" secondary_network_nad: "default-sriov-net" - resource_name: "openshift.io/dpu" kubeconfig: "/root/kubeconfig.ocpcluster" kubeconfig_infra: "/root/kubeconfig.microshift" diff --git a/hack/cluster-configs/traffic_flow_manifests/sriov-pod.yaml.j2 b/hack/cluster-configs/traffic_flow_manifests/sriov-pod.yaml.j2 new file mode 100644 index 000000000..197483bda --- /dev/null +++ b/hack/cluster-configs/traffic_flow_manifests/sriov-pod.yaml.j2 @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + namespace: {{ name_space }} + name: {{ pod_name }} + annotations: {% if not use_secondary_network %} + v1.multus-cni.io/default-network: {{ default_network }} {% else %} + k8s.v1.cni.cncf.io/networks: {{ secondary_network_nad }} {% endif %} + labels: + tft-tests: "{{ index }}" + tft-port: "{{ port }}" +spec: + nodeSelector: + kubernetes.io/hostname: {{ node_name }} + containers: + - name: {{ pod_name }} + image: {{ test_image }} + command: {{ command }} + args: {{ args }} + imagePullPolicy: {{ image_pull_policy }} diff --git a/hack/traffic_flow_tests.sh b/hack/traffic_flow_tests.sh index e65a2ffa3..5bbd3fe1e 100755 --- a/hack/traffic_flow_tests.sh +++ b/hack/traffic_flow_tests.sh @@ -27,6 +27,7 @@ temp_file=$(mktemp) envsubst < ../hack/cluster-configs/ocp-tft-config.yaml > $temp_file +export TFT_MANIFESTS_OVERRIDES=../hack/cluster-configs/traffic_flow_manifests OUTPUT_BASE="./ft-logs/result-$(date '+%Y%m%d-%H%M%S.%4N-')" ./tft.py -v debug --output-base "$OUTPUT_BASE" "$temp_file" diff --git a/internal/controller/bindata/network-resources-injector/00_service.yaml b/internal/controller/bindata/network-resources-injector/00_service.yaml new file mode 100644 index 000000000..fa534ac59 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/00_service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: network-resources-injector-service + namespace: {{.Namespace}} + annotations: + service.alpha.openshift.io/serving-cert-secret-name: "network-resources-injector-secret" +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app: network-resources-injector diff --git a/internal/controller/bindata/network-resources-injector/01_webhook.yaml b/internal/controller/bindata/network-resources-injector/01_webhook.yaml new file mode 100644 index 000000000..0efb12fc2 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/01_webhook.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: network-resources-injector-config + namespace: {{.Namespace}} + annotations: + service.beta.openshift.io/inject-cabundle: "true" +webhooks: + - name: network-resources-injector-config.k8s.io + sideEffects: None + admissionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: network-resources-injector-service + namespace: {{.Namespace}} + path: "/mutate" + namespaceSelector: + matchExpressions: + - key: "kubernetes.io/metadata.name" + operator: "NotIn" + values: + - {{.Namespace}} + rules: + - operations: [ "CREATE" ] + apiGroups: ["apps", ""] + apiVersions: ["v1"] + resources: ["pods"] diff --git a/internal/controller/bindata/network-resources-injector/02_serviceaccount.yaml b/internal/controller/bindata/network-resources-injector/02_serviceaccount.yaml new file mode 100644 index 000000000..608f33d62 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/02_serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{.Namespace}} + name: network-resources-injector-sa + diff --git a/internal/controller/bindata/network-resources-injector/03_secret.yaml b/internal/controller/bindata/network-resources-injector/03_secret.yaml new file mode 100644 index 000000000..13b62fa1a --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/03_secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: network-resources-injector-sa-secret + namespace: {{.Namespace}} + annotations: + kubernetes.io/service-account.name: network-resources-injector-sa +type: kubernetes.io/service-account-token diff --git a/internal/controller/bindata/network-resources-injector/04_clusterrole_network_resources_injector.yaml b/internal/controller/bindata/network-resources-injector/04_clusterrole_network_resources_injector.yaml new file mode 100644 index 000000000..108989813 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/04_clusterrole_network_resources_injector.yaml @@ -0,0 +1,59 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: network-resources-injector +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - '*' +- apiGroups: + - "" + resources: + - secrets + verbs: + - '*' +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list + - watch + - create + - delete +- apiGroups: + - k8s.cni.cncf.io + resources: + - network-attachment-definitions + verbs: + - 'watch' + - 'list' + - 'get' +- apiGroups: + - "" + resources: + - configmaps + verbs: + - 'get' +- apiGroups: + - apps + resources: + - deployments + verbs: + - 'watch' + - 'list' + - 'get' +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + - hostnetwork + - privileged + resources: + - securitycontextconstraints + verbs: + - 'use' diff --git a/internal/controller/bindata/network-resources-injector/05_clusterrole_secrets.yaml b/internal/controller/bindata/network-resources-injector/05_clusterrole_secrets.yaml new file mode 100644 index 000000000..661f9b333 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/05_clusterrole_secrets.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: network-resources-injector-secrets +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - '*' diff --git a/internal/controller/bindata/network-resources-injector/06_clusterrole_webhook_configs.yaml b/internal/controller/bindata/network-resources-injector/06_clusterrole_webhook_configs.yaml new file mode 100644 index 000000000..d8a2e6e70 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/06_clusterrole_webhook_configs.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: network-resources-injector-webhook-configs +rules: +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - '*' diff --git a/internal/controller/bindata/network-resources-injector/07_clusterrole_service.yaml b/internal/controller/bindata/network-resources-injector/07_clusterrole_service.yaml new file mode 100644 index 000000000..7bec6c7ea --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/07_clusterrole_service.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: network-resources-injector-service +rules: +- apiGroups: + - "" + resources: + - services + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods + verbs: + - '*' diff --git a/internal/controller/bindata/network-resources-injector/08_clusterrole_configmaps.yaml b/internal/controller/bindata/network-resources-injector/08_clusterrole_configmaps.yaml new file mode 100644 index 000000000..b58c11ad1 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/08_clusterrole_configmaps.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: network-resources-injector-configmaps +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - 'get' diff --git a/internal/controller/bindata/network-resources-injector/09_clusterrolebinding_network_resources_injector_role_binding.yaml b/internal/controller/bindata/network-resources-injector/09_clusterrolebinding_network_resources_injector_role_binding.yaml new file mode 100644 index 000000000..65e17756b --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/09_clusterrolebinding_network_resources_injector_role_binding.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: network-resources-injector-role-binding +roleRef: + kind: ClusterRole + name: network-resources-injector +subjects: +- kind: ServiceAccount + name: network-resources-injector-sa + namespace: {{.Namespace}} diff --git a/internal/controller/bindata/network-resources-injector/10_clusterrolebinding_secrets.yaml b/internal/controller/bindata/network-resources-injector/10_clusterrolebinding_secrets.yaml new file mode 100644 index 000000000..0732a6834 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/10_clusterrolebinding_secrets.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: network-resources-injector-secrets-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: network-resources-injector-secrets +subjects: +- kind: ServiceAccount + name: network-resources-injector-sa + namespace: {{.Namespace}} diff --git a/internal/controller/bindata/network-resources-injector/11_clusterrolebinding_webhook_configs.yaml b/internal/controller/bindata/network-resources-injector/11_clusterrolebinding_webhook_configs.yaml new file mode 100644 index 000000000..57a8595f0 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/11_clusterrolebinding_webhook_configs.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: network-resources-injector-webhook-configs-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: network-resources-injector-webhook-configs +subjects: +- kind: ServiceAccount + name: network-resources-injector-sa + namespace: {{.Namespace}} diff --git a/internal/controller/bindata/network-resources-injector/12_clusterrolebinding_service.yaml b/internal/controller/bindata/network-resources-injector/12_clusterrolebinding_service.yaml new file mode 100644 index 000000000..5ea93cf5f --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/12_clusterrolebinding_service.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: network-resources-injector-service-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: network-resources-injector-service +subjects: +- kind: ServiceAccount + name: network-resources-injector-sa + namespace: {{.Namespace}} diff --git a/internal/controller/bindata/network-resources-injector/13_clusterrolebinding_configmaps.yaml b/internal/controller/bindata/network-resources-injector/13_clusterrolebinding_configmaps.yaml new file mode 100644 index 000000000..9f94516a8 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/13_clusterrolebinding_configmaps.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: network-resources-injector-configmaps-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: network-resources-injector-configmaps +subjects: +- kind: ServiceAccount + name: network-resources-injector-sa + namespace: {{.Namespace}} diff --git a/internal/controller/bindata/network-resources-injector/14_server.yaml b/internal/controller/bindata/network-resources-injector/14_server.yaml new file mode 100644 index 000000000..883b5fa48 --- /dev/null +++ b/internal/controller/bindata/network-resources-injector/14_server.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: network-resources-injector + name: network-resources-injector + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: network-resources-injector + template: + metadata: + labels: + app: network-resources-injector + spec: + serviceAccount: network-resources-injector-sa + containers: + - name: webhook-server + image: {{.NRIWebhookImage}} + imagePullPolicy: {{.ImagePullPolicy}} + command: + - /webhook + args: + - -bind-address=0.0.0.0 + - -port=8443 + - -tls-private-key-file=/etc/tls/tls.key + - -tls-cert-file=/etc/tls/tls.crt + - -insecure=true + - -health-check-port=8444 + - -logtostderr + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + privileged: true + capabilities: + drop: + - ALL + add: ["NET_BIND_SERVICE"] + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/tls + name: tls + resources: + requests: + memory: "50Mi" + cpu: "250m" + limits: + memory: "200Mi" + cpu: "500m" + livenessProbe: + httpGet: + path: /healthz + port: 8444 + initialDelaySeconds: 10 + periodSeconds: 5 + volumes: + - name: tls + secret: + secretName: network-resources-injector-secret diff --git a/internal/controller/dpuoperatorconfig_controller.go b/internal/controller/dpuoperatorconfig_controller.go index f5105ea23..60f060ce8 100644 --- a/internal/controller/dpuoperatorconfig_controller.go +++ b/internal/controller/dpuoperatorconfig_controller.go @@ -43,9 +43,10 @@ type DpuOperatorConfigReconciler struct { vspImages map[string]string vspExtraData map[string]string imagePullPolicy string + nriWebhookImage string } -func NewDpuOperatorConfigReconciler(client client.Client, scheme *runtime.Scheme, dpuDaemonImage string, vspImages map[string]string, vspExtraData map[string]string) *DpuOperatorConfigReconciler { +func NewDpuOperatorConfigReconciler(client client.Client, scheme *runtime.Scheme, dpuDaemonImage string, vspImages map[string]string, vspExtraData map[string]string, nriWebhookImage string) *DpuOperatorConfigReconciler { return &DpuOperatorConfigReconciler{ Client: client, Scheme: scheme, @@ -53,6 +54,7 @@ func NewDpuOperatorConfigReconciler(client client.Client, scheme *runtime.Scheme vspImages: vspImages, vspExtraData: vspExtraData, imagePullPolicy: "IfNotPresent", + nriWebhookImage: nriWebhookImage, } } @@ -72,10 +74,16 @@ func (r *DpuOperatorConfigReconciler) WithImagePullPolicy(policy string) *DpuOpe //+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=security.openshift.io,resources=securitycontextconstraints,resourceNames=anyuid;hostnetwork;privileged,verbs=use //+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=create;get +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=create;get;update;patch;delete //+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;update;delete +//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations,verbs=* +//+kubebuilder:rbac:groups="",resources=secrets,verbs=* +//+kubebuilder:rbac:groups="",resources=services,verbs=* +//+kubebuilder:rbac:groups="",resources=pods,verbs=* // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -111,6 +119,11 @@ func (r *DpuOperatorConfigReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } + err = r.ensureNetworkResourcesInjector(ctx, dpuOperatorConfig) + if err != nil { + logger.Error(err, "Failed to ensure Network Resources Injector is running") + } + return ctrl.Result{}, nil } @@ -122,6 +135,7 @@ func (r *DpuOperatorConfigReconciler) createCommonData(cfg *configv1.DpuOperator "Mode": "auto", "DpuOperatorDaemonImage": r.dpuDaemonImage, "ResourceName": "openshift.io/dpu", // FIXME: Hardcode for now + "NRIWebhookImage": r.nriWebhookImage, } for key, value := range r.vspImages { @@ -145,6 +159,11 @@ func (r *DpuOperatorConfigReconciler) ensureDpuDeamonSet(ctx context.Context, cf return r.createAndApplyAllFromBinData(logger, "daemon", cfg) } +func (r *DpuOperatorConfigReconciler) ensureNetworkResourcesInjector(ctx context.Context, cfg *configv1.DpuOperatorConfig) error { + logger := log.FromContext(ctx) + logger.Info("Create Network Resources Injector") + return r.createAndApplyAllFromBinData(logger, "network-resources-injector", cfg) +} func (r *DpuOperatorConfigReconciler) ensureNetworkFunctioNAD(ctx context.Context, cfg *configv1.DpuOperatorConfig) error { logger := log.FromContext(ctx) logger.Info("Create the Network Function NAD") diff --git a/internal/controller/dpuoperatorconfig_controller_test.go b/internal/controller/dpuoperatorconfig_controller_test.go index b35073fa2..cae0c2968 100755 --- a/internal/controller/dpuoperatorconfig_controller_test.go +++ b/internal/controller/dpuoperatorconfig_controller_test.go @@ -125,7 +125,7 @@ func startDPUControllerManager(ctx context.Context, client *rest.Config, wg *syn }) Expect(err).NotTo(HaveOccurred()) - b := NewDpuOperatorConfigReconciler(mgr.GetClient(), mgr.GetScheme(), "mock-image", plugin.CreateVspImagesMap(false, setupLog), plugin.CreateVspExtraDataMap(false, setupLog)) + b := NewDpuOperatorConfigReconciler(mgr.GetClient(), mgr.GetScheme(), "mock-image", plugin.CreateVspImagesMap(false, setupLog), plugin.CreateVspExtraDataMap(false, setupLog), "mock-webhook-image") err = b.SetupWithManager(mgr) Expect(err).NotTo(HaveOccurred()) diff --git a/manifests/stable/dpu-operator.clusterserviceversion.yaml b/manifests/stable/dpu-operator.clusterserviceversion.yaml index 7c644eb8e..4ae6fbb7d 100644 --- a/manifests/stable/dpu-operator.clusterserviceversion.yaml +++ b/manifests/stable/dpu-operator.clusterserviceversion.yaml @@ -299,6 +299,8 @@ spec: env: - name: DPU_DAEMON_IMAGE value: quay.io/openshift/origin-dpu-daemon:4.19 + - name: NETWORK_RESOURCES_INJECTOR_IMAGE + value: quay.io/openshift/sriov-dp-admission-controller:latest image: quay.io/openshift/origin-dpu-operator:4.19 livenessProbe: httpGet: diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 000000000..201a12e97 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,895 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the SPEC.md file. +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" + // slightly awkward wording to preserve anyone matching on error strings + ErrorCheckNotSupp = fmt.Errorf("does not support the CHECK command") +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + DisableGC bool + Plugins []*NetworkConfig + Bytes []byte +} + +type NetworkAttachment struct { + ContainerID string + Network string + IfName string + Config []byte + NetNS string + CniArgs [][2]string + CapabilityArgs map[string]interface{} +} + +type GCArgs struct { + ValidAttachments []types.GCAttachment +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) + + GCNetworkList(ctx context.Context, net *NetworkConfigList, args *GCArgs) error + GetStatusNetworkList(ctx context.Context, net *NetworkConfigList) error + + GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) + + GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + if rt != nil { + return injectRuntimeConfig(orig, rt) + } + + return orig, nil +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + NetNS string `json:"netns,omitempty"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + NetNS: rt.NetNS, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0o700); err != nil { + return err + } + + return os.WriteFile(fname, newBytes, 0o600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = os.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := os.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := os.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +// GetCachedAttachments returns a list of network attachments from the cache. +// The returned list will be filtered by the containerID if the value is not empty. +func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) { + dirPath := filepath.Join(c.getCacheDir(&RuntimeConf{}), "results") + entries, err := os.ReadDir(dirPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + fileNames := make([]string, 0, len(entries)) + for _, e := range entries { + fileNames = append(fileNames, e.Name()) + } + sort.Strings(fileNames) + + attachments := []*NetworkAttachment{} + for _, fname := range fileNames { + if len(containerID) > 0 { + part := fmt.Sprintf("-%s-", containerID) + pos := strings.Index(fname, part) + if pos <= 0 || pos+len(part) >= len(fname) { + continue + } + } + + cacheFile := filepath.Join(dirPath, fname) + bytes, err := os.ReadFile(cacheFile) + if err != nil { + continue + } + + cachedInfo := cachedInfo{} + + if err := json.Unmarshal(bytes, &cachedInfo); err != nil { + continue + } + if cachedInfo.Kind != CNICacheV1 { + continue + } + if len(containerID) > 0 && cachedInfo.ContainerID != containerID { + continue + } + if cachedInfo.IfName == "" || cachedInfo.NetworkName == "" { + continue + } + + attachments = append(attachments, &NetworkAttachment{ + ContainerID: cachedInfo.ContainerID, + Network: cachedInfo.NetworkName, + IfName: cachedInfo.IfName, + Config: cachedInfo.Config, + NetNS: cachedInfo.NetNS, + CniArgs: cachedInfo.CniArgs, + CapabilityArgs: cachedInfo.CapabilityArgs, + }) + } + return attachments, nil +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q %w", list.CNIVersion, ErrorCheckNotSupp) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + if cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt); err != nil { + _ = c.cacheDel(list.Name, rt) + cachedResult = nil + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q %w", net.Network.CNIVersion, ErrorCheckNotSupp) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// GCNetworkList will do two things +// - dump the list of cached attachments, and issue deletes as necessary +// - issue a GC to the underlying plugins (if the version is high enough) +func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, args *GCArgs) error { + // If DisableGC is set, then don't bother GCing at all. + if list.DisableGC { + return nil + } + + // First, get the list of cached attachments + cachedAttachments, err := c.GetCachedAttachments("") + if err != nil { + return nil + } + + var validAttachments map[types.GCAttachment]interface{} + if args != nil { + validAttachments = make(map[types.GCAttachment]interface{}, len(args.ValidAttachments)) + for _, a := range args.ValidAttachments { + validAttachments[a] = nil + } + } + + var errs []error + + for _, cachedAttachment := range cachedAttachments { + if cachedAttachment.Network != list.Name { + continue + } + // we found this attachment + gca := types.GCAttachment{ + ContainerID: cachedAttachment.ContainerID, + IfName: cachedAttachment.IfName, + } + if _, ok := validAttachments[gca]; ok { + continue + } + // otherwise, this attachment wasn't valid and we should issue a CNI DEL + rt := RuntimeConf{ + ContainerID: cachedAttachment.ContainerID, + NetNS: cachedAttachment.NetNS, + IfName: cachedAttachment.IfName, + Args: cachedAttachment.CniArgs, + CapabilityArgs: cachedAttachment.CapabilityArgs, + } + if err := c.DelNetworkList(ctx, list, &rt); err != nil { + errs = append(errs, fmt.Errorf("failed to delete stale attachment %s %s: %w", rt.ContainerID, rt.IfName, err)) + } + } + + // now, if the version supports it, issue a GC + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); gt { + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + if args != nil { + inject["cni.dev/valid-attachments"] = args.ValidAttachments + // #1101: spec used incorrect variable name + inject["cni.dev/attachments"] = args.ValidAttachments + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate configuration to GC plugin %s: %w", plugin.Network.Type, err)) + } + if err := c.gcNetwork(ctx, pluginConfig); err != nil { + errs = append(errs, fmt.Errorf("failed to GC plugin %s: %w", plugin.Network.Type, err)) + } + } + } + + return errors.Join(errs...) +} + +func (c *CNIConfig) gcNetwork(ctx context.Context, net *NetworkConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("GC", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + +func (c *CNIConfig) GetStatusNetworkList(ctx context.Context, list *NetworkConfigList) error { + // If the version doesn't support status, abort. + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); !gt { + return nil + } + + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + return fmt.Errorf("failed to generate configuration to get plugin STATUS %s: %w", plugin.Network.Type, err) + } + if err := c.getStatusNetwork(ctx, pluginConfig); err != nil { + return err // Don't collect errors here, so we return a clean error code. + } + } + return nil +} + +func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *NetworkConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("STATUS", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 000000000..1d1b821c6 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,346 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "slices" + "sort" + "strings" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + rawVersions, ok := rawList["cniVersions"] + if ok { + // Parse the current package CNI version + rvs, ok := rawVersions.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions: %T", rvs) + } + vs := make([]string, 0, len(rvs)) + for i, rv := range rvs { + v, ok := rv.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions index %d: %T", i, rv) + } + gt, err := version.GreaterThan(v, version.Current()) + if err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersions entry %s at index %d: %w", v, i, err) + } else if !gt { + // Skip versions "greater" than this implementation of the spec + vs = append(vs, v) + } + } + + // if cniVersion was already set, append it to the list for sorting. + if cniVersion != "" { + gt, err := version.GreaterThan(cniVersion, version.Current()) + if err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion %s: %w", cniVersion, err) + } else if !gt { + // ignore any versions higher than the current implemented spec version + vs = append(vs, cniVersion) + } + } + slices.SortFunc[[]string](vs, func(v1, v2 string) int { + if v1 == v2 { + return 0 + } + if gt, _ := version.GreaterThan(v1, v2); gt { + return 1 + } + return -1 + }) + if len(vs) > 0 { + cniVersion = vs[len(vs)-1] + } + } + + readBool := func(key string) (bool, error) { + rawVal, ok := rawList[key] + if !ok { + return false, nil + } + if b, ok := rawVal.(bool); ok { + return b, nil + } + + s, ok := rawVal.(string) + if !ok { + return false, fmt.Errorf("error parsing configuration list: invalid type %T for %s", rawVal, key) + } + s = strings.ToLower(s) + switch s { + case "false": + return false, nil + case "true": + return true, nil + } + return false, fmt.Errorf("error parsing configuration list: invalid value %q for %s", s, key) + } + + disableCheck, err := readBool("disableCheck") + if err != nil { + return nil, err + } + + disableGC, err := readBool("disableGC") + if err != nil { + return nil, err + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + DisableGC: disableGC, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := os.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + var ncfErr NoConfigsFoundError + if len(files) != 0 && errors.As(err, &ncfErr) { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b..f4e7dbf37 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b..daea9dd6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e575754..fa854785d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d..e4ac2a2ff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd..c349c326c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e4..36c311694 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a..d8de5ab76 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c01..5eb5dbc66 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d6..c54a63083 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc49..0760efe91 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b88..f65e8fe3e 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78..a29fc7aab 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go index 8c00e737a..1b632e077 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/github.com/golang/glog/glog.go @@ -76,7 +76,7 @@ // -log_backtrace_at=gopherflakes.go:234 // A stack trace will be written to the Info log whenever execution // hits one of these statements. (Unlike with -vmodule, the ".go" -// must bepresent.) +// must be present.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go index a1551dbc8..b54bd4052 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/github.com/golang/glog/glog_file.go @@ -26,7 +26,6 @@ import ( "fmt" "io" "os" - "os/user" "path/filepath" "runtime" "strings" @@ -68,9 +67,8 @@ func init() { host = shortHostname(h) } - current, err := user.Current() - if err == nil { - userName = current.Username + if u := lookupUser(); u != "" { + userName = u } // Sanitize userName since it is used to construct file paths. userName = strings.Map(func(r rune) rune { @@ -118,32 +116,53 @@ var onceLogDirs sync.Once // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { +func create(tag string, t time.Time, dir string) (f *os.File, filename string, err error) { + if dir != "" { + f, name, err := createInDir(dir, tag, t) + if err == nil { + return f, name, err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", err) + } + onceLogDirs.Do(createLogDirs) if len(logDirs) == 0 { return nil, "", errors.New("log: no log dirs") } - name, link := logName(tag, t) var lastErr error for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) + f, name, err := createInDir(dir, tag, t) if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - if *logLink != "" { - lsymlink := filepath.Join(*logLink, link) - os.Remove(lsymlink) // ignore err - os.Symlink(fname, lsymlink) // ignore err - } - return f, fname, nil + return f, name, err } lastErr = err } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } +func createInDir(dir, tag string, t time.Time) (f *os.File, name string, err error) { + name, link := logName(tag, t) + fname := filepath.Join(dir, name) + // O_EXCL is important here, as it prevents a vulnerability. The general idea is that logs often + // live in an insecure directory (like /tmp), so an unprivileged attacker could create fname in + // advance as a symlink to a file the logging process can access, but the attacker cannot. O_EXCL + // fails the open if it already exists, thus prevent our this code from opening the existing file + // the attacker points us to. + f, err = os.OpenFile(fname, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + if *logLink != "" { + lsymlink := filepath.Join(*logLink, link) + os.Remove(lsymlink) // ignore err + os.Symlink(fname, lsymlink) // ignore err + } + return f, fname, nil + } + return nil, "", err +} + // flushSyncWriter is the interface satisfied by logging destinations. type flushSyncWriter interface { Flush() error @@ -160,7 +179,10 @@ var sinks struct { func init() { // Register stderr first: that way if we crash during file-writing at least // the log will have gone somewhere. - logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + if shouldRegisterStderrSink() { + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr) + } + logsink.TextSinks = append(logsink.TextSinks, &sinks.file) sinks.file.flushChan = make(chan logsink.Severity, 1) go sinks.file.flushDaemon() @@ -247,6 +269,7 @@ type syncBuffer struct { names []string sev logsink.Severity nbytes uint64 // The number of bytes written to this file + madeAt time.Time } func (sb *syncBuffer) Sync() error { @@ -254,9 +277,14 @@ func (sb *syncBuffer) Sync() error { } func (sb *syncBuffer) Write(p []byte) (n int, err error) { + // Rotate the file if it is too large, but ensure we only do so, + // if rotate doesn't create a conflicting filename. if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - return 0, err + now := timeNow() + if now.After(sb.madeAt.Add(1*time.Second)) || now.Second() != sb.madeAt.Second() { + if err := sb.rotateFile(now); err != nil { + return 0, err + } } } n, err = sb.Writer.Write(p) @@ -274,7 +302,8 @@ const footer = "\nCONTINUED IN NEXT FILE\n" func (sb *syncBuffer) rotateFile(now time.Time) error { var err error pn := "" - file, name, err := create(sb.sev.String(), now) + file, name, err := create(sb.sev.String(), now, "") + sb.madeAt = now if sb.file != nil { // The current log file becomes the previous log at the end of diff --git a/vendor/github.com/golang/glog/glog_file_nonwindows.go b/vendor/github.com/golang/glog/glog_file_nonwindows.go new file mode 100644 index 000000000..a0089ba4a --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -0,0 +1,19 @@ +//go:build !windows + +package glog + +import "os/user" + +// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr. +// Today, this always returns true on non-Windows platforms, as it specifically checks for a +// condition that is only present on Windows. +func shouldRegisterStderrSink() bool { + return true +} + +func lookupUser() string { + if current, err := user.Current(); err == nil { + return current.Username + } + return "" +} diff --git a/vendor/github.com/golang/glog/glog_file_windows.go b/vendor/github.com/golang/glog/glog_file_windows.go new file mode 100644 index 000000000..2f032e19b --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_windows.go @@ -0,0 +1,43 @@ +//go:build windows + +package glog + +import ( + "os" + "syscall" +) + +// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr. +// Today, this checks if stderr is "valid", in that it maps to a non-NULL Handle. +// Windows Services are spawned without Stdout and Stderr, so any attempt to use them equates to +// referencing an invalid file Handle. +// os.Stderr's FD is derived from a call to `syscall.GetStdHandle(syscall.STD_ERROR_HANDLE)`. +// Documentation[1] for the GetStdHandle function indicates the return value may be NULL if the +// application lacks the standard handle, so consider Stderr valid if its FD is non-NULL. +// [1]: https://learn.microsoft.com/en-us/windows/console/getstdhandle +func shouldRegisterStderrSink() bool { + return os.Stderr.Fd() != 0 +} + +// This follows the logic in the standard library's user.Current() function, except +// that it leaves out the potentially expensive calls required to look up the user's +// display name in Active Directory. +func lookupUser() string { + token, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "" + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "" + } + username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") + if err != nil { + return "" + } + if accountType != syscall.SidTypeUser { + return "" + } + return username +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..f4238c549 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + k8scnicncfiov1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + K8sCniCncfIoV1() k8scnicncfiov1.K8sCniCncfIoV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + k8sCniCncfIoV1 *k8scnicncfiov1.K8sCniCncfIoV1Client +} + +// K8sCniCncfIoV1 retrieves the K8sCniCncfIoV1Client +func (c *Clientset) K8sCniCncfIoV1() k8scnicncfiov1.K8sCniCncfIoV1Interface { + return c.k8sCniCncfIoV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.k8sCniCncfIoV1, err = k8scnicncfiov1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.k8sCniCncfIoV1 = k8scnicncfiov1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.k8sCniCncfIoV1 = k8scnicncfiov1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/doc.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/doc.go new file mode 100644 index 000000000..22485f354 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..49f3510bf --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..93942f6cb --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + k8scnicncfiov1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + k8scnicncfiov1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/doc.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/doc.go new file mode 100644 index 000000000..32d02a1a0 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/generated_expansion.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/generated_expansion.go new file mode 100644 index 000000000..245ff707b --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type NetworkAttachmentDefinitionExpansion interface{} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/k8s.cni.cncf.io_client.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/k8s.cni.cncf.io_client.go new file mode 100644 index 000000000..9317b8034 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/k8s.cni.cncf.io_client.go @@ -0,0 +1,89 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type K8sCniCncfIoV1Interface interface { + RESTClient() rest.Interface + NetworkAttachmentDefinitionsGetter +} + +// K8sCniCncfIoV1Client is used to interact with features provided by the k8s.cni.cncf.io group. +type K8sCniCncfIoV1Client struct { + restClient rest.Interface +} + +func (c *K8sCniCncfIoV1Client) NetworkAttachmentDefinitions(namespace string) NetworkAttachmentDefinitionInterface { + return newNetworkAttachmentDefinitions(c, namespace) +} + +// NewForConfig creates a new K8sCniCncfIoV1Client for the given config. +func NewForConfig(c *rest.Config) (*K8sCniCncfIoV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &K8sCniCncfIoV1Client{client}, nil +} + +// NewForConfigOrDie creates a new K8sCniCncfIoV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *K8sCniCncfIoV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new K8sCniCncfIoV1Client for the given RESTClient. +func New(c rest.Interface) *K8sCniCncfIoV1Client { + return &K8sCniCncfIoV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *K8sCniCncfIoV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/networkattachmentdefinition.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/networkattachmentdefinition.go new file mode 100644 index 000000000..1f0ddac45 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1/networkattachmentdefinition.go @@ -0,0 +1,178 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + scheme "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// NetworkAttachmentDefinitionsGetter has a method to return a NetworkAttachmentDefinitionInterface. +// A group's client should implement this interface. +type NetworkAttachmentDefinitionsGetter interface { + NetworkAttachmentDefinitions(namespace string) NetworkAttachmentDefinitionInterface +} + +// NetworkAttachmentDefinitionInterface has methods to work with NetworkAttachmentDefinition resources. +type NetworkAttachmentDefinitionInterface interface { + Create(ctx context.Context, networkAttachmentDefinition *v1.NetworkAttachmentDefinition, opts metav1.CreateOptions) (*v1.NetworkAttachmentDefinition, error) + Update(ctx context.Context, networkAttachmentDefinition *v1.NetworkAttachmentDefinition, opts metav1.UpdateOptions) (*v1.NetworkAttachmentDefinition, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkAttachmentDefinition, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkAttachmentDefinitionList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkAttachmentDefinition, err error) + NetworkAttachmentDefinitionExpansion +} + +// networkAttachmentDefinitions implements NetworkAttachmentDefinitionInterface +type networkAttachmentDefinitions struct { + client rest.Interface + ns string +} + +// newNetworkAttachmentDefinitions returns a NetworkAttachmentDefinitions +func newNetworkAttachmentDefinitions(c *K8sCniCncfIoV1Client, namespace string) *networkAttachmentDefinitions { + return &networkAttachmentDefinitions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkAttachmentDefinition, and returns the corresponding networkAttachmentDefinition object, and an error if there is any. +func (c *networkAttachmentDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkAttachmentDefinition, err error) { + result = &v1.NetworkAttachmentDefinition{} + err = c.client.Get(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkAttachmentDefinitions that match those selectors. +func (c *networkAttachmentDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkAttachmentDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NetworkAttachmentDefinitionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkAttachmentDefinitions. +func (c *networkAttachmentDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a networkAttachmentDefinition and creates it. Returns the server's representation of the networkAttachmentDefinition, and an error, if there is any. +func (c *networkAttachmentDefinitions) Create(ctx context.Context, networkAttachmentDefinition *v1.NetworkAttachmentDefinition, opts metav1.CreateOptions) (result *v1.NetworkAttachmentDefinition, err error) { + result = &v1.NetworkAttachmentDefinition{} + err = c.client.Post(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(networkAttachmentDefinition). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a networkAttachmentDefinition and updates it. Returns the server's representation of the networkAttachmentDefinition, and an error, if there is any. +func (c *networkAttachmentDefinitions) Update(ctx context.Context, networkAttachmentDefinition *v1.NetworkAttachmentDefinition, opts metav1.UpdateOptions) (result *v1.NetworkAttachmentDefinition, err error) { + result = &v1.NetworkAttachmentDefinition{} + err = c.client.Put(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + Name(networkAttachmentDefinition.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(networkAttachmentDefinition). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the networkAttachmentDefinition and deletes it. Returns an error if one occurs. +func (c *networkAttachmentDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkAttachmentDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("network-attachment-definitions"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched networkAttachmentDefinition. +func (c *networkAttachmentDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkAttachmentDefinition, err error) { + result = &v1.NetworkAttachmentDefinition{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("network-attachment-definitions"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go new file mode 100644 index 000000000..2476f9fe6 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + internalinterfaces "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces" + k8scnicncfio "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + K8sCniCncfIo() k8scnicncfio.Interface +} + +func (f *sharedInformerFactory) K8sCniCncfIo() k8scnicncfio.Interface { + return k8scnicncfio.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/generic.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/generic.go new file mode 100644 index 000000000..649781164 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,62 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=k8s.cni.cncf.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("network-attachment-definitions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.K8sCniCncfIo().V1().NetworkAttachmentDefinitions().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..edc84dd9b --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/interface.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/interface.go new file mode 100644 index 000000000..f4d418cb0 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package k8s + +import ( + internalinterfaces "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1/interface.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1/interface.go new file mode 100644 index 000000000..aa1b0762f --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // NetworkAttachmentDefinitions returns a NetworkAttachmentDefinitionInformer. + NetworkAttachmentDefinitions() NetworkAttachmentDefinitionInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// NetworkAttachmentDefinitions returns a NetworkAttachmentDefinitionInformer. +func (v *version) NetworkAttachmentDefinitions() NetworkAttachmentDefinitionInformer { + return &networkAttachmentDefinitionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1/networkattachmentdefinition.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1/networkattachmentdefinition.go new file mode 100644 index 000000000..35f2f931d --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1/networkattachmentdefinition.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + k8scnicncfiov1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + versioned "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + internalinterfaces "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkAttachmentDefinitionInformer provides access to a shared informer and lister for +// NetworkAttachmentDefinitions. +type NetworkAttachmentDefinitionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NetworkAttachmentDefinitionLister +} + +type networkAttachmentDefinitionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewNetworkAttachmentDefinitionInformer constructs a new informer for NetworkAttachmentDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkAttachmentDefinitionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkAttachmentDefinitionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkAttachmentDefinitionInformer constructs a new informer for NetworkAttachmentDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkAttachmentDefinitionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace).Watch(context.TODO(), options) + }, + }, + &k8scnicncfiov1.NetworkAttachmentDefinition{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkAttachmentDefinitionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkAttachmentDefinitionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkAttachmentDefinitionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&k8scnicncfiov1.NetworkAttachmentDefinition{}, f.defaultInformer) +} + +func (f *networkAttachmentDefinitionInformer) Lister() v1.NetworkAttachmentDefinitionLister { + return v1.NewNetworkAttachmentDefinitionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1/expansion_generated.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1/expansion_generated.go new file mode 100644 index 000000000..cb9a39138 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// NetworkAttachmentDefinitionListerExpansion allows custom methods to be added to +// NetworkAttachmentDefinitionLister. +type NetworkAttachmentDefinitionListerExpansion interface{} + +// NetworkAttachmentDefinitionNamespaceListerExpansion allows custom methods to be added to +// NetworkAttachmentDefinitionNamespaceLister. +type NetworkAttachmentDefinitionNamespaceListerExpansion interface{} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1/networkattachmentdefinition.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1/networkattachmentdefinition.go new file mode 100644 index 000000000..fb9b666ed --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1/networkattachmentdefinition.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NetworkAttachmentDefinitionLister helps list NetworkAttachmentDefinitions. +type NetworkAttachmentDefinitionLister interface { + // List lists all NetworkAttachmentDefinitions in the indexer. + List(selector labels.Selector) (ret []*v1.NetworkAttachmentDefinition, err error) + // NetworkAttachmentDefinitions returns an object that can list and get NetworkAttachmentDefinitions. + NetworkAttachmentDefinitions(namespace string) NetworkAttachmentDefinitionNamespaceLister + NetworkAttachmentDefinitionListerExpansion +} + +// networkAttachmentDefinitionLister implements the NetworkAttachmentDefinitionLister interface. +type networkAttachmentDefinitionLister struct { + indexer cache.Indexer +} + +// NewNetworkAttachmentDefinitionLister returns a new NetworkAttachmentDefinitionLister. +func NewNetworkAttachmentDefinitionLister(indexer cache.Indexer) NetworkAttachmentDefinitionLister { + return &networkAttachmentDefinitionLister{indexer: indexer} +} + +// List lists all NetworkAttachmentDefinitions in the indexer. +func (s *networkAttachmentDefinitionLister) List(selector labels.Selector) (ret []*v1.NetworkAttachmentDefinition, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.NetworkAttachmentDefinition)) + }) + return ret, err +} + +// NetworkAttachmentDefinitions returns an object that can list and get NetworkAttachmentDefinitions. +func (s *networkAttachmentDefinitionLister) NetworkAttachmentDefinitions(namespace string) NetworkAttachmentDefinitionNamespaceLister { + return networkAttachmentDefinitionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// NetworkAttachmentDefinitionNamespaceLister helps list and get NetworkAttachmentDefinitions. +type NetworkAttachmentDefinitionNamespaceLister interface { + // List lists all NetworkAttachmentDefinitions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.NetworkAttachmentDefinition, err error) + // Get retrieves the NetworkAttachmentDefinition from the indexer for a given namespace and name. + Get(name string) (*v1.NetworkAttachmentDefinition, error) + NetworkAttachmentDefinitionNamespaceListerExpansion +} + +// networkAttachmentDefinitionNamespaceLister implements the NetworkAttachmentDefinitionNamespaceLister +// interface. +type networkAttachmentDefinitionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all NetworkAttachmentDefinitions in the indexer for a given namespace. +func (s networkAttachmentDefinitionNamespaceLister) List(selector labels.Selector) (ret []*v1.NetworkAttachmentDefinition, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.NetworkAttachmentDefinition)) + }) + return ret, err +} + +// Get retrieves the NetworkAttachmentDefinition from the indexer for a given namespace and name. +func (s networkAttachmentDefinitionNamespaceLister) Get(name string) (*v1.NetworkAttachmentDefinition, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("networkattachmentdefinition"), name) + } + return obj.(*v1.NetworkAttachmentDefinition), nil +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils/cniconfig.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils/cniconfig.go new file mode 100644 index 000000000..4b54909bb --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils/cniconfig.go @@ -0,0 +1,237 @@ +// Copyright (c) 2021 Kubernetes Network Plumbing Working Group +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "encoding/json" + "fmt" + "github.com/containernetworking/cni/libcni" + "io/ioutil" + "os" + "path/filepath" + "strings" + + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" +) + +const ( + baseDevInfoPath = "/var/run/k8s.cni.cncf.io/devinfo" + dpDevInfoSubDir = "dp" + cniDevInfoSubDir = "cni" +) + +// GetCNIConfig (from annotation string to CNI JSON bytes) +func GetCNIConfig(net *v1.NetworkAttachmentDefinition, confDir string) (config []byte, err error) { + emptySpec := v1.NetworkAttachmentDefinitionSpec{} + if net.Spec == emptySpec { + // Network Spec empty; generate delegate from CNI JSON config + // from the configuration directory that has the same network + // name as the custom resource + config, err = GetCNIConfigFromFile(net.Name, confDir) + if err != nil { + return nil, fmt.Errorf("GetCNIConfig: err in GetCNIConfigFromFile: %v", err) + } + } else { + // Config contains a standard JSON-encoded CNI configuration + // or configuration list which defines the plugin chain to + // execute. + config, err = GetCNIConfigFromSpec(net.Spec.Config, net.Name) + if err != nil { + return nil, fmt.Errorf("GetCNIConfig: err in getCNIConfigFromSpec: %v", err) + } + } + return config, nil +} + +// GetCNIConfigFromSpec reads a CNI JSON configuration from given directory (confDir) +func GetCNIConfigFromFile(name, confDir string) ([]byte, error) { + // In the absence of valid keys in a Spec, the runtime (or + // meta-plugin) should load and execute a CNI .configlist + // or .config (in that order) file on-disk whose JSON + // "name" key matches this Network object’s name. + + // In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go#getDefaultCNINetwork + files, err := libcni.ConfFiles(confDir, []string{".conf", ".json", ".conflist"}) + switch { + case err != nil: + return nil, fmt.Errorf("No networks found in %s", confDir) + case len(files) == 0: + return nil, fmt.Errorf("No networks found in %s", confDir) + } + + for _, confFile := range files { + var confList *libcni.NetworkConfigList + if strings.HasSuffix(confFile, ".conflist") { + confList, err = libcni.ConfListFromFile(confFile) + if err != nil { + return nil, fmt.Errorf("Error loading CNI conflist file %s: %v", confFile, err) + } + + if confList.Name == name || name == "" { + return confList.Bytes, nil + } + + } else { + conf, err := libcni.ConfFromFile(confFile) + if err != nil { + return nil, fmt.Errorf("Error loading CNI config file %s: %v", confFile, err) + } + + if conf.Network.Name == name || name == "" { + // Ensure the config has a "type" so we know what plugin to run. + // Also catches the case where somebody put a conflist into a conf file. + if conf.Network.Type == "" { + return nil, fmt.Errorf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile) + } + return conf.Bytes, nil + } + } + } + + return nil, fmt.Errorf("no network available in the name %s in cni dir %s", name, confDir) +} + +// GetCNIConfigFromSpec reads a CNI JSON configuration from the NetworkAttachmentDefinition +// object's Spec.Config field and fills in any missing details like the network name +func GetCNIConfigFromSpec(configData, netName string) ([]byte, error) { + var rawConfig map[string]interface{} + var err error + + configBytes := []byte(configData) + err = json.Unmarshal(configBytes, &rawConfig) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal Spec.Config: %v", err) + } + + // Inject network name if missing from Config for the thick plugin case + if n, ok := rawConfig["name"]; !ok || n == "" { + rawConfig["name"] = netName + configBytes, err = json.Marshal(rawConfig) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal Spec.Config: %v", err) + } + } + + return configBytes, nil +} + +// loadDeviceInfo loads a Device Information file +func loadDeviceInfo(path string) (*v1.DeviceInfo, error) { + var devInfo v1.DeviceInfo + + bytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + err = json.Unmarshal(bytes, &devInfo) + if err != nil { + return nil, err + } + + return &devInfo, nil +} + +// cleanDeviceInfo removes a Device Information file +func cleanDeviceInfo(path string) error { + if _, err := os.Stat(path); !os.IsNotExist(err) { + return os.Remove(path) + } + return nil +} + +// saveDeviceInfo writes a Device Information file +func saveDeviceInfo(devInfo *v1.DeviceInfo, path string) error { + if devInfo == nil { + return fmt.Errorf("Device Information is null") + } + + dir := filepath.Dir(path) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err := os.MkdirAll(dir, os.ModeDir); err != nil { + return err + } + } + + if _, err := os.Stat(path); !os.IsNotExist(err) { + return fmt.Errorf("Device Information file already exists: %s", path) + } + + devInfoJSON, err := json.Marshal(devInfo) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path, devInfoJSON, 0444); err != nil { + return err + } + return nil +} + +// getDPDeviceInfoPath returns the standard Device Plugin DevInfo filename +// This filename is fixed because Device Plugin and NPWG Implementation need +// to both access file and name is not passed between them. So name is generated +// from Resource Name and DeviceID. +func getDPDeviceInfoPath(resourceName string, deviceID string) string { + return filepath.Join(baseDevInfoPath, dpDevInfoSubDir, fmt.Sprintf("%s-%s-device.json", + strings.ReplaceAll(resourceName, "/", "-"), strings.ReplaceAll(deviceID, "/", "-"))) +} + +// GetCNIDeviceInfoPath returns the standard Device Plugin DevInfo filename +// The path is fixed but the filename is flexible and determined by the caller. +func GetCNIDeviceInfoPath(filename string) string { + return filepath.Join(baseDevInfoPath, cniDevInfoSubDir, strings.ReplaceAll(filename, "/", "-")) +} + +// LoadDeviceInfoFromDP loads a DeviceInfo structure from file created by a Device Plugin +// Returns an error if the device information is malformed and (nil, nil) if it does not exist +func LoadDeviceInfoFromDP(resourceName string, deviceID string) (*v1.DeviceInfo, error) { + return loadDeviceInfo(getDPDeviceInfoPath(resourceName, deviceID)) +} + +// SaveDeviceInfoForDP saves a DeviceInfo structure created by a Device Plugin +func SaveDeviceInfoForDP(resourceName string, deviceID string, devInfo *v1.DeviceInfo) error { + return saveDeviceInfo(devInfo, getDPDeviceInfoPath(resourceName, deviceID)) +} + +// CleanDeviceInfoForDP removes a DeviceInfo DP File. +func CleanDeviceInfoForDP(resourceName string, deviceID string) error { + return cleanDeviceInfo(getDPDeviceInfoPath(resourceName, deviceID)) +} + +// LoadDeviceInfoFromCNI loads a DeviceInfo structure from created by a CNI. +// Returns an error if the device information is malformed and (nil, nil) if it does not exist +func LoadDeviceInfoFromCNI(cniPath string) (*v1.DeviceInfo, error) { + return loadDeviceInfo(cniPath) +} + +// SaveDeviceInfoForCNI saves a DeviceInfo structure created by a CNI +func SaveDeviceInfoForCNI(cniPath string, devInfo *v1.DeviceInfo) error { + return saveDeviceInfo(devInfo, cniPath) +} + +// CopyDeviceInfoForCNIFromDP saves a DeviceInfo structure created by a DP to a CNI File. +func CopyDeviceInfoForCNIFromDP(cniPath string, resourceName string, deviceID string) error { + devInfo, err := loadDeviceInfo(getDPDeviceInfoPath(resourceName, deviceID)) + if err != nil { + return err + } + return saveDeviceInfo(devInfo, cniPath) +} + +// CleanDeviceInfoForCNI removes a DeviceInfo CNI File. +func CleanDeviceInfoForCNI(cniPath string) error { + return cleanDeviceInfo(cniPath) +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils/net-attach-def.go b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils/net-attach-def.go new file mode 100644 index 000000000..4bca1645f --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils/net-attach-def.go @@ -0,0 +1,267 @@ +// Copyright (c) 2021 Kubernetes Network Plumbing Working Group +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "context" + "encoding/json" + "fmt" + "net" + "regexp" + "strings" + + cnitypes "github.com/containernetworking/cni/pkg/types" + cni100 "github.com/containernetworking/cni/pkg/types/100" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" +) + +// convertDNS converts CNI's DNS type to client DNS +func convertDNS(dns cnitypes.DNS) *v1.DNS { + var v1dns v1.DNS + + v1dns.Nameservers = append([]string{}, dns.Nameservers...) + v1dns.Domain = dns.Domain + v1dns.Search = append([]string{}, dns.Search...) + v1dns.Options = append([]string{}, dns.Options...) + + return &v1dns +} + +// SetNetworkStatus updates the Pod status +func SetNetworkStatus(client kubernetes.Interface, pod *corev1.Pod, statuses []v1.NetworkStatus) error { + if client == nil { + return fmt.Errorf("no client set") + } + + if pod == nil { + return fmt.Errorf("no pod set") + } + + var networkStatus []string + if statuses != nil { + for _, status := range statuses { + data, err := json.MarshalIndent(status, "", " ") + if err != nil { + return fmt.Errorf("SetNetworkStatus: error with Marshal Indent: %v", err) + } + networkStatus = append(networkStatus, string(data)) + } + } + + err := setPodNetworkStatus(client, pod, fmt.Sprintf("[%s]", strings.Join(networkStatus, ","))) + if err != nil { + return fmt.Errorf("SetNetworkStatus: failed to update the pod %s in out of cluster comm: %v", pod.Name, err) + } + return nil +} + +func setPodNetworkStatus(client kubernetes.Interface, pod *corev1.Pod, networkstatus string) error { + if len(pod.Annotations) == 0 { + pod.Annotations = make(map[string]string) + } + + coreClient := client.CoreV1() + var err error + name := pod.Name + namespace := pod.Namespace + + resultErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + pod, err = coreClient.Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(pod.Annotations) == 0 { + pod.Annotations = make(map[string]string) + } + pod.Annotations[v1.NetworkStatusAnnot] = networkstatus + _, err = coreClient.Pods(namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) + return err + }) + if resultErr != nil { + return fmt.Errorf("status update failed for pod %s/%s: %v", pod.Namespace, pod.Name, resultErr) + } + return nil +} + +// GetNetworkStatus returns pod's network status +func GetNetworkStatus(pod *corev1.Pod) ([]v1.NetworkStatus, error) { + if pod == nil { + return nil, fmt.Errorf("cannot find pod") + } + if pod.Annotations == nil { + return nil, fmt.Errorf("cannot find pod annotation") + } + + netStatusesJson, ok := pod.Annotations[v1.NetworkStatusAnnot] + if !ok { + return nil, fmt.Errorf("cannot find network status") + } + + var netStatuses []v1.NetworkStatus + err := json.Unmarshal([]byte(netStatusesJson), &netStatuses) + + return netStatuses, err +} + +// CreateNetworkStatus create NetworkStatus from CNI result +func CreateNetworkStatus(r cnitypes.Result, networkName string, defaultNetwork bool, dev *v1.DeviceInfo) (*v1.NetworkStatus, error) { + netStatus := &v1.NetworkStatus{} + netStatus.Name = networkName + netStatus.Default = defaultNetwork + + // Convert whatever the IPAM result was into the current Result type + result, err := cni100.NewResultFromResult(r) + if err != nil { + return netStatus, fmt.Errorf("error convert the type.Result to cni100.Result: %v", err) + } + + for _, ifs := range result.Interfaces { + // Only pod interfaces can have sandbox information + if ifs.Sandbox != "" { + netStatus.Interface = ifs.Name + netStatus.Mac = ifs.Mac + } + } + + for _, ipconfig := range result.IPs { + netStatus.IPs = append(netStatus.IPs, ipconfig.Address.IP.String()) + } + + for _, route := range result.Routes { + if isDefaultRoute(route) { + netStatus.Gateway = append(netStatus.Gateway, route.GW.String()) + } + } + + v1dns := convertDNS(result.DNS) + netStatus.DNS = *v1dns + + if dev != nil { + netStatus.DeviceInfo = dev + } + + return netStatus, nil +} + +func isDefaultRoute(route *cnitypes.Route) bool { + return route.Dst.IP == nil && route.Dst.Mask == nil || + route.Dst.IP.Equal(net.IPv4zero) || + route.Dst.IP.Equal(net.IPv6zero) +} + +// ParsePodNetworkAnnotation parses Pod annotation for net-attach-def and get NetworkSelectionElement +func ParsePodNetworkAnnotation(pod *corev1.Pod) ([]*v1.NetworkSelectionElement, error) { + netAnnot := pod.Annotations[v1.NetworkAttachmentAnnot] + defaultNamespace := pod.Namespace + + if len(netAnnot) == 0 { + return nil, &v1.NoK8sNetworkError{Message: "no kubernetes network found"} + } + + networks, err := ParseNetworkAnnotation(netAnnot, defaultNamespace) + if err != nil { + return nil, err + } + return networks, nil +} + +// ParseNetworkAnnotation parses actual annotation string and get NetworkSelectionElement +func ParseNetworkAnnotation(podNetworks, defaultNamespace string) ([]*v1.NetworkSelectionElement, error) { + var networks []*v1.NetworkSelectionElement + + if podNetworks == "" { + return nil, fmt.Errorf("parsePodNetworkAnnotation: pod annotation not having \"network\" as key") + } + + if strings.IndexAny(podNetworks, "[{\"") >= 0 { + if err := json.Unmarshal([]byte(podNetworks), &networks); err != nil { + return nil, fmt.Errorf("parsePodNetworkAnnotation: failed to parse pod Network Attachment Selection Annotation JSON format: %v", err) + } + } else { + // Comma-delimited list of network attachment object names + for _, item := range strings.Split(podNetworks, ",") { + // Remove leading and trailing whitespace. + item = strings.TrimSpace(item) + + // Parse network name (i.e. /@) + netNsName, networkName, netIfName, err := parsePodNetworkObjectText(item) + if err != nil { + return nil, fmt.Errorf("parsePodNetworkAnnotation: %v", err) + } + + networks = append(networks, &v1.NetworkSelectionElement{ + Name: networkName, + Namespace: netNsName, + InterfaceRequest: netIfName, + }) + } + } + + for _, net := range networks { + if net.Namespace == "" { + net.Namespace = defaultNamespace + } + } + + return networks, nil +} + +// parsePodNetworkObjectText parses annotation text and returns +// its triplet, (namespace, name, interface name). +func parsePodNetworkObjectText(podnetwork string) (string, string, string, error) { + var netNsName string + var netIfName string + var networkName string + + slashItems := strings.Split(podnetwork, "/") + if len(slashItems) == 2 { + netNsName = strings.TrimSpace(slashItems[0]) + networkName = slashItems[1] + } else if len(slashItems) == 1 { + networkName = slashItems[0] + } else { + return "", "", "", fmt.Errorf("Invalid network object (failed at '/')") + } + + atItems := strings.Split(networkName, "@") + networkName = strings.TrimSpace(atItems[0]) + if len(atItems) == 2 { + netIfName = strings.TrimSpace(atItems[1]) + } else if len(atItems) != 1 { + return "", "", "", fmt.Errorf("Invalid network object (failed at '@')") + } + + // Check and see if each item matches the specification for valid attachment name. + // "Valid attachment names must be comprised of units of the DNS-1123 label format" + // [a-z0-9]([-a-z0-9]*[a-z0-9])? + // And we allow at (@), and forward slash (/) (units separated by commas) + // It must start and end alphanumerically. + allItems := []string{netNsName, networkName, netIfName} + for i := range allItems { + matched, _ := regexp.MatchString("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", allItems[i]) + if !matched && len([]rune(allItems[i])) > 0 { + return "", "", "", fmt.Errorf(fmt.Sprintf("Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i])) + } + } + + return netNsName, networkName, netIfName, nil +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/LICENSE b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/LICENSE new file mode 100644 index 000000000..5da767a79 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 © Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches/controlswitches.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches/controlswitches.go new file mode 100644 index 000000000..eb1fe610b --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches/controlswitches.go @@ -0,0 +1,191 @@ +// Copyright (c) 2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controlswitches + +import ( + "encoding/json" + "flag" + "fmt" + "strings" + + "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" + + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types" +) + +const ( + // control switch keys + controlSwitchesMainKey = "features" + + // enableHugePageDownAPIKey feature name + enableHugePageDownAPIKey = "enableHugePageDownApi" + // enableHonorExistingResourcesKey feature name + enableHonorExistingResourcesKey = "enableHonorExistingResources" +) + +// controlSwitchesStates - depicts possible feature states +type controlSwitchesStates struct { + active bool + initial bool +} + +// setActiveToInitialState - set active state to the initial state set during initialization +func (state *controlSwitchesStates) setActiveToInitialState() { + state.active = state.initial +} + +// setActiveState - set active state to the passed value +func (state *controlSwitchesStates) setActiveState(value bool) { + state.active = value +} + +type ControlSwitches struct { + // pointers to command line arguments + injectHugepageDownAPI *bool + resourceNameKeysFlag *string + resourcesHonorFlag *bool + + configuration map[string]controlSwitchesStates + resourceNameKeys []string + isValid bool +} + +// SetupControlSwitchesFlags - setup all control switches flags that can be set as command line NRI arguments +// :return pointer to the structure that should be initialized with InitControlSwitches +func SetupControlSwitchesFlags() *ControlSwitches { + var initFlags ControlSwitches + + initFlags.injectHugepageDownAPI = flag.Bool("injectHugepageDownApi", false, "Enable hugepage requests and limits into Downward API.") + initFlags.resourceNameKeysFlag = flag.String("network-resource-name-keys", "k8s.v1.cni.cncf.io/resourceName", "comma separated resource name keys --network-resource-name-keys.") + initFlags.resourcesHonorFlag = flag.Bool("honor-resources", false, "Honor the existing requested resources requests & limits --honor-resources") + + return &initFlags +} + +// InitControlSwitches - initialize internal control switches structures based on command line arguments +func (switches *ControlSwitches) InitControlSwitches() { + switches.configuration = make(map[string]controlSwitchesStates) + + state := controlSwitchesStates{initial: *switches.injectHugepageDownAPI, active: *switches.injectHugepageDownAPI} + switches.configuration[enableHugePageDownAPIKey] = state + + state = controlSwitchesStates{initial: *switches.resourcesHonorFlag, active: *switches.resourcesHonorFlag} + switches.configuration[enableHonorExistingResourcesKey] = state + + switches.resourceNameKeys = setResourceNameKeys(*switches.resourceNameKeysFlag) + + switches.isValid = true +} + +// setResourceNameKeys extracts resources from a string and add them to resourceNameKeys array +func setResourceNameKeys(keys string) []string { + var resourceNameKeys []string + + for _, resourceNameKey := range strings.Split(keys, ",") { + resourceNameKey = strings.TrimSpace(resourceNameKey) + resourceNameKeys = append(resourceNameKeys, resourceNameKey) + } + + return resourceNameKeys +} + +func (switches *ControlSwitches) GetResourceNameKeys() []string { + return switches.resourceNameKeys +} + +func (switches *ControlSwitches) IsHugePagedownAPIEnabled() bool { + return switches.configuration[enableHugePageDownAPIKey].active +} + +func (switches *ControlSwitches) IsHonorExistingResourcesEnabled() bool { + return switches.configuration[enableHonorExistingResourcesKey].active +} + +func (switches *ControlSwitches) IsResourcesNameEnabled() bool { + return len(*switches.resourceNameKeysFlag) > 0 +} + +// IsValid returns true when ControlSwitches structure was initialized, false otherwise +func (switches *ControlSwitches) IsValid() bool { + return switches.isValid +} + +// GetAllFeaturesState returns string with information if feature is active or not +func (switches *ControlSwitches) GetAllFeaturesState() string { + var output string + + output = fmt.Sprintf("HugePageInject: %t", switches.IsHugePagedownAPIEnabled()) + output = output + " / " + fmt.Sprintf("HonorExistingResources: %t", switches.IsHonorExistingResourcesEnabled()) + output = output + " / " + fmt.Sprintf("EnableResourceNames: %t", switches.IsResourcesNameEnabled()) + + return output +} + +// setAllFeaturesToInitialState - reset feature state to initial one set during NRI initialization +func (switches *ControlSwitches) setAllFeaturesToInitialState() { + state := switches.configuration[enableHugePageDownAPIKey] + state.setActiveToInitialState() + switches.configuration[enableHugePageDownAPIKey] = state + + state = switches.configuration[enableHonorExistingResourcesKey] + state.setActiveToInitialState() + switches.configuration[enableHonorExistingResourcesKey] = state +} + +// setFeatureToState set given feature to the state defined in the map object +func (switches *ControlSwitches) setFeatureToState(featureName string, switchObj map[string]bool) { + if featureState, available := switchObj[featureName]; available { + state := switches.configuration[featureName] + state.setActiveState(featureState) + switches.configuration[featureName] = state + } else { + state := switches.configuration[featureName] + state.setActiveToInitialState() + switches.configuration[featureName] = state + } +} + +// ProcessControlSwitchesConfigMap sets on the fly control switches +// :param controlSwitchesCm - Kubernetes ConfigMap with control switches definition +func (switches *ControlSwitches) ProcessControlSwitchesConfigMap(controlSwitchesCm *corev1.ConfigMap) { + var err error + if v, fileExists := controlSwitchesCm.Data[types.ConfigMapMainFileKey]; fileExists { + var obj map[string]json.RawMessage + + if err = json.Unmarshal([]byte(v), &obj); err != nil { + glog.Warningf("Error during json unmarshal %v", err) + switches.setAllFeaturesToInitialState() + return + } + + if controlSwitches, mainExists := obj[controlSwitchesMainKey]; mainExists { + var switchObj map[string]bool + + if err = json.Unmarshal(controlSwitches, &switchObj); err != nil { + glog.Warningf("Unable to unmarshal [%s] from configmap, err: %v", controlSwitchesMainKey, err) + switches.setAllFeaturesToInitialState() + return + } + + switches.setFeatureToState(enableHugePageDownAPIKey, switchObj) + switches.setFeatureToState(enableHonorExistingResourcesKey, switchObj) + } else { + glog.Warningf("Map does not contains [%s]", controlSwitchesMainKey) + } + } else { + glog.Warningf("Map does not contains [%s]", types.ConfigMapMainFileKey) + } +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches/controlswitchesaccessors.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches/controlswitchesaccessors.go new file mode 100644 index 000000000..a22aecb2e --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches/controlswitchesaccessors.go @@ -0,0 +1,30 @@ +// Copyright (c) 2021 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build unittests + +// This file should be only include in build system during unit tests execution. +// Special method allows to setup structure with values needed by test. + +package controlswitches + +func SetupControlSwitchesUnitTests(downAPI, honor *bool, name *string) *ControlSwitches { + var initFlags ControlSwitches + + initFlags.injectHugepageDownAPI = downAPI + initFlags.resourceNameKeysFlag = name + initFlags.resourcesHonorFlag = honor + + return &initFlags +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/tools/cache.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/tools/cache.go new file mode 100644 index 000000000..d2d29d561 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/tools/cache.go @@ -0,0 +1,145 @@ +// Copyright (c) 2021 Nordix Foundation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/golang/glog" + cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +type NetAttachDefCache struct { + networkAnnotationsMap map[string]map[string]string + networkAnnotationsMapMutex *sync.Mutex + stopper chan struct{} + isRunning int32 +} + +type NetAttachDefCacheService interface { + Start() + Stop() + Get(namespace string, networkName string) map[string]string +} + +func Create() NetAttachDefCacheService { + return &NetAttachDefCache{make(map[string]map[string]string), + &sync.Mutex{}, make(chan struct{}), 0} +} + +// Start creates informer for NetworkAttachmentDefinition events and populate the local cache +func (nc *NetAttachDefCache) Start() { + factory := externalversions.NewSharedInformerFactoryWithOptions(setupNetAttachDefClient(), 0, externalversions.WithNamespace("")) + informer := factory.K8sCniCncfIo().V1().NetworkAttachmentDefinitions().Informer() + // mutex to serialize the events. + mutex := &sync.Mutex{} + + informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + mutex.Lock() + defer mutex.Unlock() + netAttachDef := obj.(*cniv1.NetworkAttachmentDefinition) + nc.put(netAttachDef.Namespace, netAttachDef.Name, netAttachDef.Annotations) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + mutex.Lock() + defer mutex.Unlock() + oldNetAttachDef := oldObj.(*cniv1.NetworkAttachmentDefinition) + newNetAttachDef := newObj.(*cniv1.NetworkAttachmentDefinition) + if oldNetAttachDef.GetResourceVersion() == newNetAttachDef.GetResourceVersion() { + glog.Infof("no change in net-attach-def %s, ignoring update event", nc.getKey(oldNetAttachDef.Namespace, newNetAttachDef.Name)) + return + } + nc.remove(oldNetAttachDef.Namespace, oldNetAttachDef.Name) + nc.put(newNetAttachDef.Namespace, newNetAttachDef.Name, newNetAttachDef.Annotations) + }, + DeleteFunc: func(obj interface{}) { + mutex.Lock() + defer mutex.Unlock() + netAttachDef := obj.(*cniv1.NetworkAttachmentDefinition) + nc.remove(netAttachDef.Namespace, netAttachDef.Name) + }, + }) + go func() { + atomic.StoreInt32(&(nc.isRunning), int32(1)) + // informer Run blocks until informer is stopped + glog.Infof("starting net-attach-def informer") + informer.Run(nc.stopper) + glog.Infof("net-attach-def informer is stopped") + atomic.StoreInt32(&(nc.isRunning), int32(0)) + }() +} + +// Stop teardown the NetworkAttachmentDefinition informer +func (nc *NetAttachDefCache) Stop() { + close(nc.stopper) + tEnd := time.Now().Add(3 * time.Second) + for tEnd.After(time.Now()) { + if atomic.LoadInt32(&nc.isRunning) == 0 { + glog.Infof("net-attach-def informer is no longer running, proceed to clean up nad cache") + break + } + time.Sleep(600 * time.Millisecond) + } + nc.networkAnnotationsMapMutex.Lock() + nc.networkAnnotationsMap = nil + nc.networkAnnotationsMapMutex.Unlock() +} + +func (nc *NetAttachDefCache) put(namespace, networkName string, annotations map[string]string) { + nc.networkAnnotationsMapMutex.Lock() + nc.networkAnnotationsMap[nc.getKey(namespace, networkName)] = annotations + nc.networkAnnotationsMapMutex.Unlock() +} + +// Get returns annotations map for the given namespace and network name, if it's not available +// return nil +func (nc *NetAttachDefCache) Get(namespace, networkName string) map[string]string { + nc.networkAnnotationsMapMutex.Lock() + defer nc.networkAnnotationsMapMutex.Unlock() + if annotationsMap, exists := nc.networkAnnotationsMap[nc.getKey(namespace, networkName)]; exists { + return annotationsMap + } + return nil +} + +func (nc *NetAttachDefCache) remove(namespace, networkName string) { + nc.networkAnnotationsMapMutex.Lock() + delete(nc.networkAnnotationsMap, nc.getKey(namespace, networkName)) + nc.networkAnnotationsMapMutex.Unlock() +} + +func (nc *NetAttachDefCache) getKey(namespace, networkName string) string { + return namespace + "/" + networkName +} + +// setupNetAttachDefClient creates K8s client for net-attach-def crd +func setupNetAttachDefClient() versioned.Interface { + config, err := rest.InClusterConfig() + if err != nil { + glog.Fatal(err) + } + clientset, err := versioned.NewForConfig(config) + if err != nil { + glog.Fatal(err) + } + return clientset +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types/types.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types/types.go new file mode 100644 index 000000000..6ab1ede7e --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types/types.go @@ -0,0 +1,34 @@ +// Copyright (c) 2020 Red Hat, Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +const ( + DownwardAPIMountPath = "/etc/podnetinfo" + AnnotationsPath = "annotations" + LabelsPath = "labels" + EnvNameContainerName = "CONTAINER_NAME" + Hugepages1GRequestPath = "hugepages_1G_request" + Hugepages2MRequestPath = "hugepages_2M_request" + Hugepages1GLimitPath = "hugepages_1G_limit" + Hugepages2MLimitPath = "hugepages_2M_limit" + ConfigMapMainFileKey = "config.json" +) + +// JsonPatchOperation the JSON path operation +type JsonPatchOperation struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/userdefinedinjections/userdefinedinjections.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/userdefinedinjections/userdefinedinjections.go new file mode 100644 index 000000000..963f92105 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/userdefinedinjections/userdefinedinjections.go @@ -0,0 +1,112 @@ +package userdefinedinjections + +import ( + "encoding/json" + "reflect" + "strings" + "sync" + + "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" + + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types" +) + +const ( + userDefinedInjectionsMainKey = "user-defined-injections" +) + +// UserDefinedInjections user defined injections +type UserDefinedInjections struct { + sync.Mutex + Patchs map[string]types.JsonPatchOperation +} + +// CreateUserInjectionsStructure returns empty UserDefinedInjections structure +func CreateUserInjectionsStructure() *UserDefinedInjections { + var userDefinedInjects = UserDefinedInjections{Patchs: make(map[string]types.JsonPatchOperation)} + return &userDefinedInjects +} + +// SetUserDefinedInjections sets additional injections to be applied in Pod spec +func (userDefinedInjects *UserDefinedInjections) SetUserDefinedInjections(injectionsCm *corev1.ConfigMap) { + if v, fileExists := injectionsCm.Data[types.ConfigMapMainFileKey]; fileExists { + var obj map[string]json.RawMessage + var err error + if err = json.Unmarshal([]byte(v), &obj); err != nil { + glog.Warningf("Error during json unmarshal of main: %v", err) + return + } + + if userDefinedInjections, mainExists := obj[userDefinedInjectionsMainKey]; mainExists { + var userDefinedInjectionsObj map[string]json.RawMessage + if err = json.Unmarshal([]byte(userDefinedInjections), &userDefinedInjectionsObj); err != nil { + glog.Warningf("Error during json unmarshal of injections: %v", err) + return + } + + // lock for writing + userDefinedInjects.Lock() + defer userDefinedInjects.Unlock() + + var patch types.JsonPatchOperation + var userDefinedPatchs = userDefinedInjects.Patchs + + for k, value := range userDefinedInjectionsObj { + existValue, exists := userDefinedPatchs[k] + // unmarshal userDefined injection to json patch + err := json.Unmarshal([]byte(value), &patch) + if err != nil { + glog.Errorf("Failed to unmarshal user-defined injection: %v", v) + continue + } + // metadata.Annotations is the only supported field for user definition + // jsonPatchOperation.Path should be "/metadata/annotations" + if patch.Path != "/metadata/annotations" { + glog.Errorf("Path: %v is not supported, only /metadata/annotations can be defined by user", patch.Path) + continue + } + + if !exists || !reflect.DeepEqual(existValue, patch) { + glog.Infof("Initializing user-defined injections with key: %v, value: %v", k, v) + userDefinedPatchs[k] = patch + } + } + + // remove stale entries from userDefined configMap + for k := range userDefinedPatchs { + if _, ok := userDefinedInjectionsObj[k]; ok { + continue + } + glog.Infof("Removing stale entry: %v from user-defined injections", k) + delete(userDefinedPatchs, k) + } + } else { + glog.Warningf("Map does not contains [%s]. Clear old entries.", userDefinedInjectionsMainKey) + userDefinedInjects.Patchs = make(map[string]types.JsonPatchOperation) + } + } else { + glog.Warningf("Map does not contains [%s]. Clear old entries", types.ConfigMapMainFileKey) + userDefinedInjects.Patchs = make(map[string]types.JsonPatchOperation) + } +} + +// CreateUserDefinedPatch creates customized patch for the specified POD +func (userDefinedInjects *UserDefinedInjections) CreateUserDefinedPatch(pod corev1.Pod) ([]types.JsonPatchOperation, error) { + var userDefinedPatch []types.JsonPatchOperation + + // lock for reading + userDefinedInjects.Lock() + defer userDefinedInjects.Unlock() + + for k, v := range userDefinedInjects.Patchs { + // The userDefinedInjects will be injected when: + // 1. Pod labels contain the patch key defined in userDefinedInjects + // 2. The value of patch key in pod labels(not in userDefinedInjects) is "true" + if podValue, exists := pod.ObjectMeta.Labels[k]; exists && strings.ToLower(podValue) == "true" { + userDefinedPatch = append(userDefinedPatch, v) + } + } + + return userDefinedPatch, nil +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook/tlsutils.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook/tlsutils.go new file mode 100644 index 000000000..ac24a4c99 --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook/tlsutils.go @@ -0,0 +1,140 @@ +// Copyright (c) 2019 Multus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhook + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "sync" + + "github.com/golang/glog" +) + +type tlsKeypairReloader struct { + certMutex sync.RWMutex + cert *tls.Certificate + certPath string + keyPath string +} + +type clientCertPool struct { + certPool *x509.CertPool + certPaths *ClientCAFlags + insecure bool +} + +type ClientCAFlags []string + +func (i *ClientCAFlags) String() string { + return "" +} + +func (i *ClientCAFlags) Set(path string) error { + *i = append(*i, path) + return nil +} + +func (keyPair *tlsKeypairReloader) Reload() error { + newCert, err := tls.LoadX509KeyPair(keyPair.certPath, keyPair.keyPath) + if err != nil { + return err + } + glog.V(2).Infof("cetificate reloaded") + keyPair.certMutex.Lock() + defer keyPair.certMutex.Unlock() + keyPair.cert = &newCert + return nil +} + +func (keyPair *tlsKeypairReloader) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + keyPair.certMutex.RLock() + defer keyPair.certMutex.RUnlock() + return keyPair.cert, nil + } +} + +// NewTlsKeypairReloader reload tlsKeypairReloader struct +func NewTlsKeypairReloader(certPath, keyPath string) (*tlsKeypairReloader, error) { + result := &tlsKeypairReloader{ + certPath: certPath, + keyPath: keyPath, + } + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, err + } + result.cert = &cert + + return result, nil +} + +//NewClientCertPool will load a single client CA +func NewClientCertPool(clientCaPaths *ClientCAFlags, insecure bool) (*clientCertPool, error) { + pool := &clientCertPool{ + certPaths: clientCaPaths, + insecure: insecure, + } + if !pool.insecure { + if err := pool.Load(); err != nil { + return nil, err + } + } + return pool, nil +} + +//Load a certificate into the client CA pool +func (pool *clientCertPool) Load() error { + if pool.insecure { + glog.Infof("can not load client CA pool. Remove --insecure flag to enable.") + return nil + } + + if len(*pool.certPaths) == 0 { + return fmt.Errorf("no client CA file path(s) found") + } + + pool.certPool = x509.NewCertPool() + for _, path := range *pool.certPaths { + caCertPem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to load client CA file from path '%s'", path) + } + if ok := pool.certPool.AppendCertsFromPEM(caCertPem); !ok { + return fmt.Errorf("failed to parse client CA file from path '%s'", path) + } + glog.Infof("added client CA to cert pool from path '%s'", path) + } + glog.Infof("added '%d' client CA(s) to cert pool", len(*pool.certPaths)) + return nil +} + +//GetCertPool returns a client CA pool +func (pool *clientCertPool) GetCertPool() *x509.CertPool { + if pool.insecure { + return nil + } + return pool.certPool +} + +//GetClientAuth determines the policy the http server will follow for TLS Client Authentication +func GetClientAuth(insecure bool) tls.ClientAuthType { + if insecure { + return tls.NoClientCert + } + return tls.RequireAndVerifyClientCert +} diff --git a/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook/webhook.go b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook/webhook.go new file mode 100644 index 000000000..2d8f05f5f --- /dev/null +++ b/vendor/github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook/webhook.go @@ -0,0 +1,961 @@ +// Copyright (c) 2018 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package webhook + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/golang/glog" + cniv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + "github.com/pkg/errors" + multus "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types" + + admissionv1 "k8s.io/api/admission/v1" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches" + netcache "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/tools" + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types" + "github.com/k8snetworkplumbingwg/network-resources-injector/pkg/userdefinedinjections" +) + +type hugepageResourceData struct { + ResourceName string + ContainerName string + Path string +} + +const ( + networksAnnotationKey = "k8s.v1.cni.cncf.io/networks" + nodeSelectorKey = "k8s.v1.cni.cncf.io/nodeSelector" + defaultNetworkAnnotationKey = "v1.multus-cni.io/default-network" +) + +var ( + clientset kubernetes.Interface + nadCache netcache.NetAttachDefCacheService + userDefinedInjections *userdefinedinjections.UserDefinedInjections + controlSwitches *controlswitches.ControlSwitches +) + +func SetControlSwitches(activeConfiguration *controlswitches.ControlSwitches) { + controlSwitches = activeConfiguration +} + +func SetUserInjectionStructure(injections *userdefinedinjections.UserDefinedInjections) { + userDefinedInjections = injections +} + +func prepareAdmissionReviewResponse(allowed bool, message string, ar *admissionv1.AdmissionReview) error { + if ar.Request != nil { + ar.Response = &admissionv1.AdmissionResponse{ + UID: ar.Request.UID, + Allowed: allowed, + } + if message != "" { + ar.Response.Result = &metav1.Status{ + Message: message, + } + } + return nil + } + return errors.New("received empty AdmissionReview request") +} + +func readAdmissionReview(req *http.Request, w http.ResponseWriter) (*admissionv1.AdmissionReview, int, error) { + var body []byte + + if req.Body != nil { + req.Body = http.MaxBytesReader(w, req.Body, 1<<20) + if data, err := ioutil.ReadAll(req.Body); err == nil { + body = data + } + } + + if len(body) == 0 { + err := errors.New("Error reading HTTP request: empty body") + glog.Errorf("%s", err) + return nil, http.StatusBadRequest, err + } + + /* validate HTTP request headers */ + contentType := req.Header.Get("Content-Type") + if contentType != "application/json" { + err := errors.Errorf("Invalid Content-Type='%s', expected 'application/json'", contentType) + glog.Errorf("%v", err) + return nil, http.StatusUnsupportedMediaType, err + } + + /* read AdmissionReview from the request body */ + ar, err := deserializeAdmissionReview(body) + if err != nil { + err := errors.Wrap(err, "error deserializing AdmissionReview") + glog.Errorf("%v", err) + return nil, http.StatusBadRequest, err + } + + return ar, http.StatusOK, nil +} + +func deserializeAdmissionReview(body []byte) (*admissionv1.AdmissionReview, error) { + ar := &admissionv1.AdmissionReview{} + runtimeScheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(runtimeScheme) + deserializer := codecs.UniversalDeserializer() + _, _, err := deserializer.Decode(body, nil, ar) + + /* Decode() won't return an error if the data wasn't actual AdmissionReview */ + if err == nil && ar.TypeMeta.Kind != "AdmissionReview" { + err = errors.New("received object is not an AdmissionReview") + } + + return ar, err +} + +func deserializeNetworkAttachmentDefinition(ar *admissionv1.AdmissionReview) (cniv1.NetworkAttachmentDefinition, error) { + /* unmarshal NetworkAttachmentDefinition from AdmissionReview request */ + netAttachDef := cniv1.NetworkAttachmentDefinition{} + err := json.Unmarshal(ar.Request.Object.Raw, &netAttachDef) + return netAttachDef, err +} + +func deserializePod(ar *admissionv1.AdmissionReview) (corev1.Pod, error) { + /* unmarshal Pod from AdmissionReview request */ + pod := corev1.Pod{} + err := json.Unmarshal(ar.Request.Object.Raw, &pod) + if pod.ObjectMeta.Namespace != "" { + return pod, err + } + + // AdmissionRequest contains an optional Namespace field + if ar.Request.Namespace != "" { + pod.ObjectMeta.Namespace = ar.Request.Namespace + return pod, nil + } + + ownerRef := pod.ObjectMeta.OwnerReferences + if ownerRef != nil && len(ownerRef) > 0 { + namespace, err := getNamespaceFromOwnerReference(pod.ObjectMeta.OwnerReferences[0]) + if err != nil { + return pod, err + } + pod.ObjectMeta.Namespace = namespace + } + + // pod.ObjectMeta.Namespace may still be empty at this point, + // but there is a chance that net-attach-def annotation contains + // a valid namespace + return pod, err +} + +func getNamespaceFromOwnerReference(ownerRef metav1.OwnerReference) (namespace string, err error) { + namespace = "" + switch ownerRef.Kind { + case "ReplicaSet": + var replicaSets *v1.ReplicaSetList + replicaSets, err = clientset.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } + for _, replicaSet := range replicaSets.Items { + if replicaSet.ObjectMeta.Name == ownerRef.Name && replicaSet.ObjectMeta.UID == ownerRef.UID { + namespace = replicaSet.ObjectMeta.Namespace + err = nil + break + } + } + case "DaemonSet": + var daemonSets *v1.DaemonSetList + daemonSets, err = clientset.AppsV1().DaemonSets("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } + for _, daemonSet := range daemonSets.Items { + if daemonSet.ObjectMeta.Name == ownerRef.Name && daemonSet.ObjectMeta.UID == ownerRef.UID { + namespace = daemonSet.ObjectMeta.Namespace + err = nil + break + } + } + case "StatefulSet": + var statefulSets *v1.StatefulSetList + statefulSets, err = clientset.AppsV1().StatefulSets("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } + for _, statefulSet := range statefulSets.Items { + if statefulSet.ObjectMeta.Name == ownerRef.Name && statefulSet.ObjectMeta.UID == ownerRef.UID { + namespace = statefulSet.ObjectMeta.Namespace + err = nil + break + } + } + case "ReplicationController": + var replicationControllers *corev1.ReplicationControllerList + replicationControllers, err = clientset.CoreV1().ReplicationControllers("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return + } + for _, replicationController := range replicationControllers.Items { + if replicationController.ObjectMeta.Name == ownerRef.Name && replicationController.ObjectMeta.UID == ownerRef.UID { + namespace = replicationController.ObjectMeta.Namespace + err = nil + break + } + } + default: + glog.Infof("owner reference kind is not supported: %v, using default namespace", ownerRef.Kind) + namespace = "default" + return + } + + if namespace == "" { + err = errors.New("pod namespace is not found") + } + + return + +} + +func toSafeJsonPatchKey(in string) string { + out := strings.Replace(in, "~", "~0", -1) + out = strings.Replace(out, "/", "~1", -1) + return out +} + +func parsePodNetworkSelections(podNetworks, defaultNamespace string) ([]*multus.NetworkSelectionElement, error) { + var networkSelections []*multus.NetworkSelectionElement + + if len(podNetworks) == 0 { + err := errors.New("empty string passed as network selection elements list") + glog.Error(err) + return nil, err + } + + /* try to parse as JSON array */ + err := json.Unmarshal([]byte(podNetworks), &networkSelections) + + /* if failed, try to parse as comma separated */ + if err != nil { + glog.Infof("'%s' is not in JSON format: %s... trying to parse as comma separated network selections list", podNetworks, err) + for _, networkSelection := range strings.Split(podNetworks, ",") { + networkSelection = strings.TrimSpace(networkSelection) + networkSelectionElement, err := parsePodNetworkSelectionElement(networkSelection, defaultNamespace) + if err != nil { + err := errors.Wrap(err, "error parsing network selection element") + glog.Error(err) + return nil, err + } + networkSelections = append(networkSelections, networkSelectionElement) + } + } + + /* fill missing namespaces with default value */ + for _, networkSelection := range networkSelections { + if networkSelection.Namespace == "" { + if defaultNamespace == "" { + // Ignore the AdmissionReview request when the following conditions are met: + // 1) net-attach-def annotation doesn't contain a valid namespace + // 2) defaultNamespace retrieved from admission request is empty + // Pod admission would fail in subsquent call "getNetworkAttachmentDefinition" + // if no namespace is specified. We don't want to fail the pod creation + // in such case since it is possible that pod is not a SR-IOV pod + glog.Warningf("The admission request doesn't contain a valid namespace, ignoring...") + return nil, nil + } else { + networkSelection.Namespace = defaultNamespace + } + } + } + + return networkSelections, nil +} + +func parsePodNetworkSelectionElement(selection, defaultNamespace string) (*multus.NetworkSelectionElement, error) { + var namespace, name, netInterface string + var networkSelectionElement *multus.NetworkSelectionElement + + units := strings.Split(selection, "/") + switch len(units) { + case 1: + namespace = defaultNamespace + name = units[0] + case 2: + namespace = units[0] + name = units[1] + default: + err := errors.Errorf("invalid network selection element - more than one '/' rune in: '%s'", selection) + glog.Info(err) + return networkSelectionElement, err + } + + units = strings.Split(name, "@") + switch len(units) { + case 1: + name = units[0] + netInterface = "" + case 2: + name = units[0] + netInterface = units[1] + default: + err := errors.Errorf("invalid network selection element - more than one '@' rune in: '%s'", selection) + glog.Info(err) + return networkSelectionElement, err + } + + validNameRegex, _ := regexp.Compile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`) + for _, unit := range []string{namespace, name, netInterface} { + ok := validNameRegex.MatchString(unit) + if !ok && len(unit) > 0 { + err := errors.Errorf("at least one of the network selection units is invalid: error found at '%s'", unit) + glog.Info(err) + return networkSelectionElement, err + } + } + + networkSelectionElement = &multus.NetworkSelectionElement{ + Namespace: namespace, + Name: name, + InterfaceRequest: netInterface, + } + + return networkSelectionElement, nil +} + +func getNetworkAttachmentDefinition(namespace, name string) (*cniv1.NetworkAttachmentDefinition, error) { + path := fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", namespace, name) + rawNetworkAttachmentDefinition, err := clientset.ExtensionsV1beta1().RESTClient().Get().AbsPath(path).DoRaw(context.TODO()) + if err != nil { + err := errors.Wrapf(err, "could not get Network Attachment Definition %s/%s", namespace, name) + glog.Error(err) + return nil, err + } + + networkAttachmentDefinition := cniv1.NetworkAttachmentDefinition{} + json.Unmarshal(rawNetworkAttachmentDefinition, &networkAttachmentDefinition) + + return &networkAttachmentDefinition, nil +} + +func parseNetworkAttachDefinition(net *multus.NetworkSelectionElement, reqs map[string]int64, nsMap map[string]string) (map[string]int64, map[string]string, error) { + /* for each network in annotation ask API server for network-attachment-definition */ + annotationsMap := nadCache.Get(net.Namespace, net.Name) + if annotationsMap == nil { + glog.Infof("cache entry not found, retrieving network attachment definition '%s/%s' from api server", net.Namespace, net.Name) + networkAttachmentDefinition, err := getNetworkAttachmentDefinition(net.Namespace, net.Name) + if err != nil { + /* if doesn't exist: deny pod */ + reason := errors.Wrapf(err, "could not find network attachment definition '%s/%s'", net.Namespace, net.Name) + glog.Error(reason) + return reqs, nsMap, reason + } + annotationsMap = networkAttachmentDefinition.GetAnnotations() + } + glog.Infof("network attachment definition '%s/%s' found", net.Namespace, net.Name) + + /* network object exists, so check if it contains resourceName annotation */ + for _, networkResourceNameKey := range controlSwitches.GetResourceNameKeys() { + if resourceName, exists := annotationsMap[networkResourceNameKey]; exists { + /* add resource to map/increment if it was already there */ + reqs[resourceName]++ + glog.Infof("resource '%s' needs to be requested for network '%s/%s'", resourceName, net.Namespace, net.Name) + } else { + glog.Infof("network '%s/%s' doesn't use custom resources, skipping...", net.Namespace, net.Name) + } + } + + /* parse the net-attach-def annotations for node selector label and add it to the desiredNsMap */ + if ns, exists := annotationsMap[nodeSelectorKey]; exists { + nsNameValue := strings.Split(ns, "=") + nsNameValueLen := len(nsNameValue) + if nsNameValueLen > 2 { + reason := fmt.Errorf("node selector in net-attach-def %s has more than one label", net.Name) + glog.Error(reason) + return reqs, nsMap, reason + } else if nsNameValueLen == 2 { + nsMap[strings.TrimSpace(nsNameValue[0])] = strings.TrimSpace(nsNameValue[1]) + } else { + nsMap[strings.TrimSpace(ns)] = "" + } + } + + return reqs, nsMap, nil +} + +func handleValidationError(w http.ResponseWriter, ar *admissionv1.AdmissionReview, orgErr error) { + err := prepareAdmissionReviewResponse(false, orgErr.Error(), ar) + if err != nil { + err := errors.Wrap(err, "error preparing AdmissionResponse") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + writeResponse(w, ar) +} + +func writeResponse(w http.ResponseWriter, ar *admissionv1.AdmissionReview) { + glog.Infof("sending response to the Kubernetes API server") + resp, _ := json.Marshal(ar) + w.Write(resp) +} + +func patchEmptyResources(patch []types.JsonPatchOperation, containerIndex uint, key string) []types.JsonPatchOperation { + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/" + fmt.Sprintf("%d", containerIndex) + "/resources/" + toSafeJsonPatchKey(key), + Value: corev1.ResourceList{}, + }) + return patch +} + +func addVolDownwardAPI(patch []types.JsonPatchOperation, hugepageResourceList []hugepageResourceData, pod *corev1.Pod) []types.JsonPatchOperation { + + if len(pod.Spec.Volumes) == 0 { + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/volumes", + Value: []corev1.Volume{}, + }) + } + + dAPIItems := []corev1.DownwardAPIVolumeFile{} + + if pod.Labels != nil && len(pod.Labels) > 0 { + labels := corev1.ObjectFieldSelector{ + FieldPath: "metadata.labels", + } + dAPILabels := corev1.DownwardAPIVolumeFile{ + Path: types.LabelsPath, + FieldRef: &labels, + } + dAPIItems = append(dAPIItems, dAPILabels) + } + + if pod.Annotations != nil && len(pod.Annotations) > 0 { + annotations := corev1.ObjectFieldSelector{ + FieldPath: "metadata.annotations", + } + dAPIAnnotations := corev1.DownwardAPIVolumeFile{ + Path: types.AnnotationsPath, + FieldRef: &annotations, + } + dAPIItems = append(dAPIItems, dAPIAnnotations) + } + + for _, hugepageResource := range hugepageResourceList { + hugepageSelector := corev1.ResourceFieldSelector{ + Resource: hugepageResource.ResourceName, + ContainerName: hugepageResource.ContainerName, + Divisor: *resource.NewQuantity(1*1024*1024, resource.BinarySI), + } + dAPIHugepage := corev1.DownwardAPIVolumeFile{ + Path: hugepageResource.Path, + ResourceFieldRef: &hugepageSelector, + } + dAPIItems = append(dAPIItems, dAPIHugepage) + } + + dAPIVolSource := corev1.DownwardAPIVolumeSource{ + Items: dAPIItems, + } + volSource := corev1.VolumeSource{ + DownwardAPI: &dAPIVolSource, + } + vol := corev1.Volume{ + Name: "podnetinfo", + VolumeSource: volSource, + } + + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/volumes/-", + Value: vol, + }) + + return patch +} + +func addVolumeMount(patch []types.JsonPatchOperation, containers []corev1.Container) []types.JsonPatchOperation { + + vm := corev1.VolumeMount{ + Name: "podnetinfo", + ReadOnly: true, + MountPath: types.DownwardAPIMountPath, + } + for containerIndex, container := range containers { + if len(container.VolumeMounts) == 0 { + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/" + strconv.Itoa(containerIndex) + "/volumeMounts", + Value: []corev1.VolumeMount{}, + }) + } + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/" + strconv.Itoa(containerIndex) + "/volumeMounts/-", + Value: vm, + }) + } + + return patch +} + +func createVolPatch(patch []types.JsonPatchOperation, hugepageResourceList []hugepageResourceData, pod *corev1.Pod) []types.JsonPatchOperation { + patch = addVolumeMount(patch, pod.Spec.Containers) + patch = addVolDownwardAPI(patch, hugepageResourceList, pod) + return patch +} + +func addEnvVar(patch []types.JsonPatchOperation, containerIndex int, firstElement bool, + envName string, envVal string) []types.JsonPatchOperation { + + env := corev1.EnvVar{ + Name: envName, + Value: envVal, + } + + if firstElement { + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/" + fmt.Sprintf("%d", containerIndex) + "/env", + Value: []corev1.EnvVar{env}, + }) + } else { + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/" + fmt.Sprintf("%d", containerIndex) + "/env/-", + Value: env, + }) + } + + return patch +} + +func createEnvPatch(patch []types.JsonPatchOperation, container *corev1.Container, + containerIndex int, envName string, envVal string) []types.JsonPatchOperation { + + // Determine if requested ENV already exists + found := false + firstElement := false + if len(container.Env) != 0 { + for _, env := range container.Env { + if env.Name == envName { + found = true + if env.Value != envVal { + glog.Warningf("Error, adding env '%s', name existed but value different: '%s' != '%s'", + envName, env.Value, envVal) + } + break + } + } + } else { + firstElement = true + } + + if !found { + patch = addEnvVar(patch, containerIndex, firstElement, envName, envVal) + } + return patch +} + +func createNodeSelectorPatch(patch []types.JsonPatchOperation, existing map[string]string, desired map[string]string) []types.JsonPatchOperation { + targetMap := make(map[string]string) + if existing != nil { + for k, v := range existing { + targetMap[k] = v + } + } + if desired != nil { + for k, v := range desired { + targetMap[k] = v + } + } + if len(targetMap) == 0 { + return patch + } + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/nodeSelector", + Value: targetMap, + }) + return patch +} + +func createResourcePatch(patch []types.JsonPatchOperation, Containers []corev1.Container, resourceRequests map[string]int64) []types.JsonPatchOperation { + /* check whether resources paths exists in the first container and add as the first patches if missing */ + if len(Containers[0].Resources.Requests) == 0 { + patch = patchEmptyResources(patch, 0, "requests") + } + if len(Containers[0].Resources.Limits) == 0 { + patch = patchEmptyResources(patch, 0, "limits") + } + + for resourceName := range resourceRequests { + for _, container := range Containers { + if _, exists := container.Resources.Limits[corev1.ResourceName(resourceName)]; exists { + delete(resourceRequests, resourceName) + } + if _, exists := container.Resources.Requests[corev1.ResourceName(resourceName)]; exists { + delete(resourceRequests, resourceName) + } + } + } + + resourceList := *getResourceList(resourceRequests) + + for resource, quantity := range resourceList { + patch = appendResource(patch, resource.String(), quantity, quantity) + } + + return patch +} + +func updateResourcePatch(patch []types.JsonPatchOperation, Containers []corev1.Container, resourceRequests map[string]int64) []types.JsonPatchOperation { + var existingrequestsMap map[corev1.ResourceName]resource.Quantity + var existingLimitsMap map[corev1.ResourceName]resource.Quantity + + if len(Containers[0].Resources.Requests) == 0 { + patch = patchEmptyResources(patch, 0, "requests") + } else { + existingrequestsMap = Containers[0].Resources.Requests + } + if len(Containers[0].Resources.Limits) == 0 { + patch = patchEmptyResources(patch, 0, "limits") + } else { + existingLimitsMap = Containers[0].Resources.Limits + } + + resourceList := *getResourceList(resourceRequests) + + for resourceName, quantity := range resourceList { + reqQuantity := quantity + limitQuantity := quantity + if value, ok := existingrequestsMap[resourceName]; ok { + reqQuantity.Add(value) + } + if value, ok := existingLimitsMap[resourceName]; ok { + limitQuantity.Add(value) + } + patch = appendResource(patch, resourceName.String(), reqQuantity, limitQuantity) + } + + return patch +} + +func appendResource(patch []types.JsonPatchOperation, resourceName string, reqQuantity, limitQuantity resource.Quantity) []types.JsonPatchOperation { + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/0/resources/requests/" + toSafeJsonPatchKey(resourceName), + Value: reqQuantity, + }) + patch = append(patch, types.JsonPatchOperation{ + Operation: "add", + Path: "/spec/containers/0/resources/limits/" + toSafeJsonPatchKey(resourceName), + Value: limitQuantity, + }) + + return patch +} + +func getResourceList(resourceRequests map[string]int64) *corev1.ResourceList { + resourceList := corev1.ResourceList{} + for name, number := range resourceRequests { + resourceList[corev1.ResourceName(name)] = *resource.NewQuantity(number, resource.DecimalSI) + } + + return &resourceList +} + +func appendAddAnnotPatch(patch []types.JsonPatchOperation, pod corev1.Pod, userDefinedPatch []types.JsonPatchOperation) []types.JsonPatchOperation { + annotations := make(map[string]string) + patchOp := types.JsonPatchOperation{ + Operation: "add", + Path: "/metadata/annotations", + Value: annotations, + } + + for _, p := range userDefinedPatch { + if p.Path == "/metadata/annotations" && p.Operation == "add" { + //loop over user defined injected annotations key-value pairs + for k, v := range p.Value.(map[string]interface{}) { + if _, exists := annotations[k]; exists { + glog.Warningf("ignoring duplicate user defined injected annotation: %s: %s", k, v.(string)) + } else { + annotations[k] = v.(string) + } + } + } + } + + if len(annotations) > 0 { + // attempt to add existing pod annotation but do not override + for k, v := range pod.ObjectMeta.Annotations { + if _, exists := annotations[k]; !exists { + annotations[k] = v + } + } + patch = append(patch, patchOp) + } + + return patch +} + +func appendUserDefinedPatch(patch []types.JsonPatchOperation, pod corev1.Pod, userDefinedPatch []types.JsonPatchOperation) []types.JsonPatchOperation { + //Add operation for annotations is currently only supported + return appendAddAnnotPatch(patch, pod, userDefinedPatch) +} + +func getNetworkSelections(annotationKey string, pod corev1.Pod, userDefinedPatch []types.JsonPatchOperation) (string, bool) { + // User defined annotateKey takes precedence than userDefined injections + glog.Infof("search %s in original pod annotations", annotationKey) + nets, exists := pod.ObjectMeta.Annotations[annotationKey] + if exists { + glog.Infof("%s is defined in original pod annotations", annotationKey) + return nets, exists + } + + glog.Infof("search %s in user-defined injections", annotationKey) + // userDefinedPatch may contain user defined net-attach-defs + if len(userDefinedPatch) > 0 { + for _, p := range userDefinedPatch { + if p.Operation == "add" && p.Path == "/metadata/annotations" { + for k, v := range p.Value.(map[string]interface{}) { + if k == annotationKey { + glog.Infof("%s is found in user-defined annotations", annotationKey) + return v.(string), true + } + } + } + } + } + glog.Infof("%s is not found in either pod annotations or user-defined injections", annotationKey) + return "", false +} + +// MutateHandler handles AdmissionReview requests and sends responses back to the K8s API server +func MutateHandler(w http.ResponseWriter, req *http.Request) { + glog.Infof("Received mutation request. Features status: %s", controlSwitches.GetAllFeaturesState()) + var err error + + /* read AdmissionReview from the HTTP request */ + ar, httpStatus, err := readAdmissionReview(req, w) + if err != nil { + http.Error(w, err.Error(), httpStatus) + return + } + + /* read pod annotations */ + /* if networks missing skip everything */ + pod, err := deserializePod(ar) + if err != nil { + handleValidationError(w, ar, err) + return + } + glog.Infof("AdmissionReview request received for pod %s/%s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name) + + userDefinedPatch, err := userDefinedInjections.CreateUserDefinedPatch(pod) + if err != nil { + glog.Warningf("failed to create user-defined injection patch for pod %s/%s, err: %v", + pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, err) + } + + defaultNetSelection, defExist := getNetworkSelections(defaultNetworkAnnotationKey, pod, userDefinedPatch) + additionalNetSelections, addExists := getNetworkSelections(networksAnnotationKey, pod, userDefinedPatch) + + if defExist || addExists { + /* map of resources request needed by a pod and a number of them */ + resourceRequests := make(map[string]int64) + + /* map of node labels on which pod needs to be scheduled*/ + desiredNsMap := make(map[string]string) + + if defaultNetSelection != "" { + defNetwork, err := parsePodNetworkSelections(defaultNetSelection, pod.ObjectMeta.Namespace) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if len(defNetwork) == 1 { + resourceRequests, desiredNsMap, err = parseNetworkAttachDefinition(defNetwork[0], resourceRequests, desiredNsMap) + if err != nil { + err = prepareAdmissionReviewResponse(false, err.Error(), ar) + if err != nil { + glog.Errorf("error preparing AdmissionReview response for pod %s/%s, error: %v", + pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + writeResponse(w, ar) + return + } + } + } + if additionalNetSelections != "" { + /* unmarshal list of network selection objects */ + networks, err := parsePodNetworkSelections(additionalNetSelections, pod.ObjectMeta.Namespace) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + for _, n := range networks { + resourceRequests, desiredNsMap, err = parseNetworkAttachDefinition(n, resourceRequests, desiredNsMap) + if err != nil { + err = prepareAdmissionReviewResponse(false, err.Error(), ar) + if err != nil { + glog.Errorf("error preparing AdmissionReview response for pod %s/%s, error: %v", + pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + writeResponse(w, ar) + return + } + } + glog.Infof("pod %s/%s has resource requests: %v and node selectors: %v", pod.ObjectMeta.Namespace, + pod.ObjectMeta.Name, resourceRequests, desiredNsMap) + } + + /* patch with custom resources requests and limits */ + err = prepareAdmissionReviewResponse(true, "allowed", ar) + if err != nil { + glog.Errorf("error preparing AdmissionReview response for pod %s/%s, error: %v", + pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + var patch []types.JsonPatchOperation + if len(resourceRequests) == 0 { + glog.Infof("pod %s/%s doesn't need any custom network resources", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name) + } else { + if controlSwitches.IsHonorExistingResourcesEnabled() { + patch = updateResourcePatch(patch, pod.Spec.Containers, resourceRequests) + } else { + patch = createResourcePatch(patch, pod.Spec.Containers, resourceRequests) + } + + // Determine if hugepages are being requested for a given container, + // and if so, expose the value to the container via Downward API. + var hugepageResourceList []hugepageResourceData + if controlSwitches.IsHugePagedownAPIEnabled() { + for containerIndex, container := range pod.Spec.Containers { + found := false + if len(container.Resources.Requests) != 0 { + if quantity, exists := container.Resources.Requests["hugepages-1Gi"]; exists && quantity.IsZero() == false { + hugepageResource := hugepageResourceData{ + ResourceName: "requests.hugepages-1Gi", + ContainerName: container.Name, + Path: types.Hugepages1GRequestPath + "_" + container.Name, + } + hugepageResourceList = append(hugepageResourceList, hugepageResource) + found = true + } + if quantity, exists := container.Resources.Requests["hugepages-2Mi"]; exists && quantity.IsZero() == false { + hugepageResource := hugepageResourceData{ + ResourceName: "requests.hugepages-2Mi", + ContainerName: container.Name, + Path: types.Hugepages2MRequestPath + "_" + container.Name, + } + hugepageResourceList = append(hugepageResourceList, hugepageResource) + found = true + } + } + if len(container.Resources.Limits) != 0 { + if quantity, exists := container.Resources.Limits["hugepages-1Gi"]; exists && quantity.IsZero() == false { + hugepageResource := hugepageResourceData{ + ResourceName: "limits.hugepages-1Gi", + ContainerName: container.Name, + Path: types.Hugepages1GLimitPath + "_" + container.Name, + } + hugepageResourceList = append(hugepageResourceList, hugepageResource) + found = true + } + if quantity, exists := container.Resources.Limits["hugepages-2Mi"]; exists && quantity.IsZero() == false { + hugepageResource := hugepageResourceData{ + ResourceName: "limits.hugepages-2Mi", + ContainerName: container.Name, + Path: types.Hugepages2MLimitPath + "_" + container.Name, + } + hugepageResourceList = append(hugepageResourceList, hugepageResource) + found = true + } + } + + // If Hugepages are being added to Downward API, add the + // 'container.Name' as an environment variable to the container + // so container knows its name and can process hugepages properly. + if found { + patch = createEnvPatch(patch, &container, containerIndex, + types.EnvNameContainerName, container.Name) + } + } + } + patch = createVolPatch(patch, hugepageResourceList, &pod) + patch = appendUserDefinedPatch(patch, pod, userDefinedPatch) + } + patch = createNodeSelectorPatch(patch, pod.Spec.NodeSelector, desiredNsMap) + glog.Infof("patch after all mutations: %v for pod %s/%s", patch, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name) + + patchBytes, _ := json.Marshal(patch) + ar.Response.Patch = patchBytes + ar.Response.PatchType = func() *admissionv1.PatchType { + pt := admissionv1.PatchTypeJSONPatch + return &pt + }() + } else { + /* network annotation not provided or empty */ + glog.Infof("pod %s/%s spec doesn't have network annotations. Skipping...", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name) + err = prepareAdmissionReviewResponse(true, "Pod spec doesn't have network annotations. Skipping...", ar) + if err != nil { + glog.Errorf("error preparing AdmissionReview response for pod %s/%s, error: %v", + pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } + + writeResponse(w, ar) +} + +// SetNetAttachDefCache sets up the net attach def cache service +func SetNetAttachDefCache(cache netcache.NetAttachDefCacheService) { + nadCache = cache +} + +// SetupInClusterClient setups K8s client to communicate with the API server +func SetupInClusterClient() kubernetes.Interface { + /* setup Kubernetes API client */ + config, err := rest.InClusterConfig() + if err != nil { + glog.Fatal(err) + } + clientset, err = kubernetes.NewForConfig(config) + if err != nil { + glog.Fatal(err) + } + return clientset +} diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/LICENSE b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/NOTICE b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/NOTICE new file mode 100644 index 000000000..251a004bc --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/NOTICE @@ -0,0 +1,2 @@ +Copyright 2016 Intel Corporation +Copyright 2021 Multus Authors diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging/doc.go b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging/doc.go new file mode 100644 index 000000000..977b06a41 --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging/doc.go @@ -0,0 +1,16 @@ +// Copyright (c) 2022 Multus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logging is the package that contains logging library. +package logging diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging/logging.go b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging/logging.go new file mode 100644 index 000000000..4cf001e8d --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging/logging.go @@ -0,0 +1,188 @@ +// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2021 Multus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + lumberjack "gopkg.in/natefinch/lumberjack.v2" +) + +// Level type +type Level uint32 + +// PanicLevel...MaxLevel indicates the logging level +const ( + PanicLevel Level = iota + ErrorLevel + VerboseLevel + DebugLevel + MaxLevel + UnknownLevel +) + +var loggingStderr bool +var loggingW io.Writer +var loggingLevel Level +var logger *lumberjack.Logger + +const defaultTimestampFormat = time.RFC3339 + +// LogOptions specifies the configuration of the log +type LogOptions struct { + MaxAge *int `json:"maxAge,omitempty"` + MaxSize *int `json:"maxSize,omitempty"` + MaxBackups *int `json:"maxBackups,omitempty"` + Compress *bool `json:"compress,omitempty"` +} + +// SetLogOptions set the LoggingOptions of NetConf +func SetLogOptions(options *LogOptions) { + // give some default value + logger.MaxSize = 100 + logger.MaxAge = 5 + logger.MaxBackups = 5 + logger.Compress = true + if options != nil { + if options.MaxAge != nil { + logger.MaxAge = *options.MaxAge + } + if options.MaxSize != nil { + logger.MaxSize = *options.MaxSize + } + if options.MaxBackups != nil { + logger.MaxBackups = *options.MaxBackups + } + if options.Compress != nil { + logger.Compress = *options.Compress + } + } + loggingW = logger +} + +func (l Level) String() string { + switch l { + case PanicLevel: + return "panic" + case VerboseLevel: + return "verbose" + case ErrorLevel: + return "error" + case DebugLevel: + return "debug" + } + return "unknown" +} + +func printf(level Level, format string, a ...interface{}) { + header := "%s [%s] " + t := time.Now() + if level > loggingLevel { + return + } + + if loggingStderr { + fmt.Fprintf(os.Stderr, header, t.Format(defaultTimestampFormat), level) + fmt.Fprintf(os.Stderr, format, a...) + fmt.Fprintf(os.Stderr, "\n") + } + + if loggingW != nil { + fmt.Fprintf(loggingW, header, t.Format(defaultTimestampFormat), level) + fmt.Fprintf(loggingW, format, a...) + fmt.Fprintf(loggingW, "\n") + } +} + +// Debugf prints logging if logging level >= debug +func Debugf(format string, a ...interface{}) { + printf(DebugLevel, format, a...) +} + +// Verbosef prints logging if logging level >= verbose +func Verbosef(format string, a ...interface{}) { + printf(VerboseLevel, format, a...) +} + +// Errorf prints logging if logging level >= error +func Errorf(format string, a ...interface{}) error { + printf(ErrorLevel, format, a...) + return fmt.Errorf(format, a...) +} + +// Panicf prints logging plus stack trace. This should be used only for unrecoverable error +func Panicf(format string, a ...interface{}) { + printf(PanicLevel, format, a...) + printf(PanicLevel, "========= Stack trace output ========") + printf(PanicLevel, "%+v", errors.New("Multus Panic")) + printf(PanicLevel, "========= Stack trace output end ========") +} + +// GetLoggingLevel gets current logging level +func GetLoggingLevel() Level { + return loggingLevel +} + +func getLoggingLevel(levelStr string) Level { + switch strings.ToLower(levelStr) { + case "debug": + return DebugLevel + case "verbose": + return VerboseLevel + case "error": + return ErrorLevel + case "panic": + return PanicLevel + } + fmt.Fprintf(os.Stderr, "multus logging: cannot set logging level to %s\n", levelStr) + return UnknownLevel +} + +// SetLogLevel sets logging level +func SetLogLevel(levelStr string) { + level := getLoggingLevel(levelStr) + if level < MaxLevel { + loggingLevel = level + } +} + +// SetLogStderr sets flag for logging stderr output +func SetLogStderr(enable bool) { + loggingStderr = enable +} + +// SetLogFile sets logging file +func SetLogFile(filename string) { + if filename == "" { + return + } + + logger.Filename = filename + loggingW = logger + +} + +func init() { + loggingStderr = true + loggingW = nil + loggingLevel = PanicLevel + logger = &lumberjack.Logger{} +} diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/conf.go b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/conf.go new file mode 100644 index 000000000..36d8d6f9f --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/conf.go @@ -0,0 +1,611 @@ +// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2021 Multus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "net" + "os" + "strings" + "sync" + + "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/skel" + cni100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/version" + nadutils "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils" + "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging" +) + +const ( + defaultCNIDir = "/var/lib/cni/multus" + defaultConfDir = "/etc/cni/multus/net.d" + defaultBinDir = "/opt/cni/bin" + defaultReadinessIndicatorFile = "" + defaultMultusNamespace = "kube-system" + defaultNonIsolatedNamespace = "default" +) + +// ChrootMutex provides lock to access host filesystem +var ChrootMutex *sync.Mutex + +// LoadDelegateNetConfList reads DelegateNetConf from bytes +func LoadDelegateNetConfList(bytes []byte, delegateConf *DelegateNetConf) error { + logging.Debugf("LoadDelegateNetConfList: %s, %v", string(bytes), delegateConf) + + if err := json.Unmarshal(bytes, &delegateConf.ConfList); err != nil { + return logging.Errorf("LoadDelegateNetConfList: error unmarshalling delegate conflist: %v", err) + } + + if delegateConf.ConfList.Plugins == nil { + return logging.Errorf("LoadDelegateNetConfList: delegate must have the 'type' or 'plugin' field") + } + + if delegateConf.ConfList.Plugins[0].Type == "" { + return logging.Errorf("LoadDelegateNetConfList: a plugin delegate must have the 'type' field") + } + delegateConf.ConfListPlugin = true + delegateConf.Name = delegateConf.ConfList.Name + return nil +} + +// LoadDelegateNetConf converts raw CNI JSON into a DelegateNetConf structure +func LoadDelegateNetConf(bytes []byte, netElement *NetworkSelectionElement, deviceID string, resourceName string) (*DelegateNetConf, error) { + var err error + logging.Debugf("LoadDelegateNetConf: %s, %v, %s", string(bytes), netElement, deviceID) + + delegateConf := &DelegateNetConf{} + if err := json.Unmarshal(bytes, &delegateConf.Conf); err != nil { + return nil, logging.Errorf("LoadDelegateNetConf: error unmarshalling delegate config: %v", err) + } + delegateConf.Name = delegateConf.Conf.Name + + // Do some minimal validation + if delegateConf.Conf.Type == "" { + if err := LoadDelegateNetConfList(bytes, delegateConf); err != nil { + return nil, logging.Errorf("LoadDelegateNetConf: failed with: %v", err) + } + if deviceID != "" { + bytes, err = addDeviceIDInConfList(bytes, deviceID) + if err != nil { + return nil, logging.Errorf("LoadDelegateNetConf: failed to add deviceID in NetConfList bytes: %v", err) + } + delegateConf.ResourceName = resourceName + delegateConf.DeviceID = deviceID + } + if netElement != nil && netElement.CNIArgs != nil { + bytes, err = addCNIArgsInConfList(bytes, netElement.CNIArgs) + if err != nil { + return nil, logging.Errorf("LoadDelegateNetConf(): failed to add cni-args in NetConfList bytes: %v", err) + } + } + } else { + if deviceID != "" { + bytes, err = delegateAddDeviceID(bytes, deviceID) + if err != nil { + return nil, logging.Errorf("LoadDelegateNetConf: failed to add deviceID in NetConf bytes: %v", err) + } + // Save them for housekeeping + delegateConf.ResourceName = resourceName + delegateConf.DeviceID = deviceID + } + if netElement != nil && netElement.CNIArgs != nil { + bytes, err = addCNIArgsInConfig(bytes, netElement.CNIArgs) + if err != nil { + return nil, logging.Errorf("LoadDelegateNetConf(): failed to add cni-args in NetConfList bytes: %v", err) + } + } + } + + if netElement != nil { + if netElement.Name != "" { + // Overwrite CNI config name with net-attach-def name + delegateConf.Name = fmt.Sprintf("%s/%s", netElement.Namespace, netElement.Name) + } + if netElement.InterfaceRequest != "" { + delegateConf.IfnameRequest = netElement.InterfaceRequest + } + if netElement.MacRequest != "" { + delegateConf.MacRequest = netElement.MacRequest + } + if netElement.IPRequest != nil { + delegateConf.IPRequest = netElement.IPRequest + } + if netElement.BandwidthRequest != nil { + delegateConf.BandwidthRequest = netElement.BandwidthRequest + } + if netElement.PortMappingsRequest != nil { + delegateConf.PortMappingsRequest = netElement.PortMappingsRequest + } + if netElement.GatewayRequest != nil { + var list []net.IP + if delegateConf.GatewayRequest != nil { + list = append(*delegateConf.GatewayRequest, *netElement.GatewayRequest...) + } else { + list = *netElement.GatewayRequest + } + delegateConf.GatewayRequest = &list + } + if netElement.InfinibandGUIDRequest != "" { + delegateConf.InfinibandGUIDRequest = netElement.InfinibandGUIDRequest + } + if netElement.DeviceID != "" { + if deviceID != "" { + logging.Debugf("Warning: Both RuntimeConfig and ResourceMap provide deviceID. Ignoring RuntimeConfig") + } else { + delegateConf.DeviceID = netElement.DeviceID + } + } + } + + delegateConf.Bytes = bytes + + return delegateConf, nil +} + +// mergeCNIRuntimeConfig creates CNI runtimeconfig from delegate +func mergeCNIRuntimeConfig(runtimeConfig *RuntimeConfig, delegate *DelegateNetConf) *RuntimeConfig { + logging.Debugf("mergeCNIRuntimeConfig: %v %v", runtimeConfig, delegate) + var mergedRuntimeConfig RuntimeConfig + + if runtimeConfig == nil { + mergedRuntimeConfig = RuntimeConfig{} + } else { + mergedRuntimeConfig = *runtimeConfig + } + + // multus inject RuntimeConfig only in case of non MasterPlugin. + if delegate.MasterPlugin != true { + logging.Debugf("mergeCNIRuntimeConfig: add runtimeConfig for net-attach-def: %v", mergedRuntimeConfig) + if delegate.PortMappingsRequest != nil { + mergedRuntimeConfig.PortMaps = delegate.PortMappingsRequest + } + if delegate.BandwidthRequest != nil { + mergedRuntimeConfig.Bandwidth = delegate.BandwidthRequest + } + if delegate.IPRequest != nil { + mergedRuntimeConfig.IPs = delegate.IPRequest + } + if delegate.MacRequest != "" { + mergedRuntimeConfig.Mac = delegate.MacRequest + } + if delegate.InfinibandGUIDRequest != "" { + mergedRuntimeConfig.InfinibandGUID = delegate.InfinibandGUIDRequest + } + if delegate.DeviceID != "" { + mergedRuntimeConfig.DeviceID = delegate.DeviceID + } + logging.Debugf("mergeCNIRuntimeConfig: add runtimeConfig for net-attach-def: %v", mergedRuntimeConfig) + } + return &mergedRuntimeConfig +} + +// CreateCNIRuntimeConf create CNI RuntimeConf for a delegate. If delegate configuration +// exists, merge data with the runtime config. +func CreateCNIRuntimeConf(args *skel.CmdArgs, k8sArgs *K8sArgs, ifName string, rc *RuntimeConfig, delegate *DelegateNetConf) (*libcni.RuntimeConf, string) { + podName := string(k8sArgs.K8S_POD_NAME) + podNamespace := string(k8sArgs.K8S_POD_NAMESPACE) + podUID := string(k8sArgs.K8S_POD_UID) + sandboxID := string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID) + return newCNIRuntimeConf(args.ContainerID, sandboxID, podName, podNamespace, podUID, args.Netns, ifName, rc, delegate) +} + +// newCNIRuntimeConf creates the CNI `RuntimeConf` for the given ADD / DEL request. +func newCNIRuntimeConf(containerID, sandboxID, podName, podNamespace, podUID, netNs, ifName string, rc *RuntimeConfig, delegate *DelegateNetConf) (*libcni.RuntimeConf, string) { + logging.Debugf("LoadCNIRuntimeConf: %s, %v %v", ifName, rc, delegate) + + delegateRc := delegateRuntimeConfig(containerID, delegate, rc, ifName) + // In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go#buildCNIRuntimeConf + rt := createRuntimeConf(netNs, podNamespace, podName, containerID, sandboxID, podUID, ifName) + + var cniDeviceInfoFile string + + // Populate rt.Args with CNI_ARGS if the rt.Args value is not set + cniArgs := os.Getenv("CNI_ARGS") + if cniArgs != "" { + logging.Debugf("ARGS: %s", cniArgs) + for _, arg := range strings.Split(cniArgs, ";") { + // SplitN to handle = within values, like BLAH=foo=bar + keyval := strings.SplitN(arg, "=", 2) + if len(keyval) != 2 { + logging.Errorf("CreateCNIRuntimeConf: CNI_ARGS %s %s %d is not recognized as CNI arg, skipped", arg, keyval, len(keyval)) + continue + } + + envKey := string(keyval[0]) + envVal := string(keyval[1]) + found := false + for i := range rt.Args { + // Update existing key if its value is empty + if rt.Args[i][0] == envKey && rt.Args[i][1] == "" && envVal != "" { + logging.Debugf("CreateCNIRuntimeConf: add new val: %s", arg) + rt.Args[i][1] = envVal + found = true + break + } + } + if !found { + // Add the new key if it didn't exist yet + rt.Args = append(rt.Args, [2]string{envKey, envVal}) + } + } + } + + if delegateRc != nil { + cniDeviceInfoFile = delegateRc.CNIDeviceInfoFile + capabilityArgs := map[string]interface{}{} + if len(delegateRc.PortMaps) != 0 { + capabilityArgs["portMappings"] = delegateRc.PortMaps + } + if delegateRc.Bandwidth != nil { + capabilityArgs["bandwidth"] = delegateRc.Bandwidth + } + if len(delegateRc.IPs) != 0 { + capabilityArgs["ips"] = delegateRc.IPs + } + if len(delegateRc.Mac) != 0 { + capabilityArgs["mac"] = delegateRc.Mac + } + if len(delegateRc.InfinibandGUID) != 0 { + capabilityArgs["infinibandGUID"] = delegateRc.InfinibandGUID + } + if delegateRc.DeviceID != "" { + capabilityArgs["deviceID"] = delegateRc.DeviceID + } + if delegateRc.CNIDeviceInfoFile != "" { + capabilityArgs["CNIDeviceInfoFile"] = delegateRc.CNIDeviceInfoFile + } + rt.CapabilityArgs = capabilityArgs + } + return rt, cniDeviceInfoFile +} + +// createRuntimeConf creates the CNI `RuntimeConf` for the given ADD / DEL request. +func createRuntimeConf(netNs, podNamespace, podName, containerID, sandboxID, podUID, ifName string) *libcni.RuntimeConf { + return &libcni.RuntimeConf{ + ContainerID: containerID, + NetNS: netNs, + IfName: ifName, + // NOTE: Verbose logging depends on this order, so please keep Args order. + Args: [][2]string{ + {"IgnoreUnknown", "true"}, + {"K8S_POD_NAMESPACE", podNamespace}, + {"K8S_POD_NAME", podName}, + {"K8S_POD_INFRA_CONTAINER_ID", sandboxID}, + {"K8S_POD_UID", podUID}, + }, + } +} + +// delegateRuntimeConfig creates the CNI `RuntimeConf` for the given ADD / DEL request. +func delegateRuntimeConfig(containerID string, delegate *DelegateNetConf, rc *RuntimeConfig, ifName string) *RuntimeConfig { + var delegateRc *RuntimeConfig + + if delegate != nil { + delegateRc = mergeCNIRuntimeConfig(rc, delegate) + if delegateRc.DeviceID != "" { + if delegateRc.CNIDeviceInfoFile != "" { + logging.Debugf("Warning: Existing value of CNIDeviceInfoFile will be overwritten %s", delegateRc.CNIDeviceInfoFile) + } + autoDeviceInfo := fmt.Sprintf("%s-%s_%s", delegate.Name, containerID, ifName) + delegateRc.CNIDeviceInfoFile = nadutils.GetCNIDeviceInfoPath(autoDeviceInfo) + logging.Debugf("Adding auto-generated CNIDeviceInfoFile: %s", delegateRc.CNIDeviceInfoFile) + } + } else { + delegateRc = rc + } + return delegateRc +} + +// GetGatewayFromResult retrieves gateway IP addresses from CNI result +func GetGatewayFromResult(result *cni100.Result) []net.IP { + var gateways []net.IP + + for _, route := range result.Routes { + if mask, _ := route.Dst.Mask.Size(); mask == 0 { + gateways = append(gateways, route.GW) + } + } + return gateways +} + +// GetDefaultNetConf returns NetConf with default variables +func GetDefaultNetConf() *NetConf { + // LogToStderr's default value set to true + return &NetConf{ + BinDir: defaultBinDir, + ConfDir: defaultConfDir, + CNIDir: defaultCNIDir, + LogToStderr: true, + MultusNamespace: defaultMultusNamespace, + NonIsolatedNamespaces: []string{defaultNonIsolatedNamespace}, + ReadinessIndicatorFile: defaultReadinessIndicatorFile, + SystemNamespaces: []string{"kube-system"}, + } + +} + +// LoadNetConf converts inputs (i.e. stdin) to NetConf +func LoadNetConf(bytes []byte) (*NetConf, error) { + netconf := GetDefaultNetConf() + + logging.Debugf("LoadNetConf: %s", string(bytes)) + if err := json.Unmarshal(bytes, netconf); err != nil { + return nil, logging.Errorf("LoadNetConf: failed to load netconf: %v", err) + } + + // Logging + logging.SetLogStderr(netconf.LogToStderr) + logging.SetLogOptions(netconf.LogOptions) + if netconf.LogFile != "" { + logging.SetLogFile(netconf.LogFile) + } + if netconf.LogLevel != "" { + logging.SetLogLevel(netconf.LogLevel) + } + + // Parse previous result + if netconf.RawPrevResult != nil { + resultBytes, err := json.Marshal(netconf.RawPrevResult) + if err != nil { + return nil, logging.Errorf("LoadNetConf: could not serialize prevResult: %v", err) + } + res, err := version.NewResult(netconf.CNIVersion, resultBytes) + if err != nil { + return nil, logging.Errorf("LoadNetConf: could not parse prevResult: %v", err) + } + netconf.RawPrevResult = nil + netconf.PrevResult, err = cni100.NewResultFromResult(res) + if err != nil { + return nil, logging.Errorf("LoadNetConf: could not convert result to current version: %v", err) + } + } + + // Delegates must always be set. If no kubeconfig is present, the + // delegates are executed in-order. If a kubeconfig is present, + // at least one delegate must be present and the first delegate is + // the master plugin. Kubernetes CRD delegates are then appended to + // the existing delegate list and all delegates executed in-order. + + if len(netconf.RawDelegates) == 0 && netconf.ClusterNetwork == "" { + return nil, logging.Errorf("LoadNetConf: at least one delegate/clusterNetwork must be specified") + } + + // setup namespace isolation + if netconf.RawNonIsolatedNamespaces != "" { + // Parse the comma separated list + nonisolated := strings.Split(netconf.RawNonIsolatedNamespaces, ",") + // Cleanup the whitespace + for i, nonv := range nonisolated { + nonisolated[i] = strings.TrimSpace(nonv) + } + netconf.NonIsolatedNamespaces = nonisolated + } + + // get RawDelegates and put delegates field + if netconf.ClusterNetwork == "" { + // for Delegates + if len(netconf.RawDelegates) == 0 { + return nil, logging.Errorf("LoadNetConf: at least one delegate must be specified") + } + for idx, rawConf := range netconf.RawDelegates { + bytes, err := json.Marshal(rawConf) + if err != nil { + return nil, logging.Errorf("LoadNetConf: error marshalling delegate %d config: %v", idx, err) + } + delegateConf, err := LoadDelegateNetConf(bytes, nil, "", "") + if err != nil { + return nil, logging.Errorf("LoadNetConf: failed to load delegate %d config: %v", idx, err) + } + netconf.Delegates = append(netconf.Delegates, delegateConf) + } + netconf.RawDelegates = nil + + // First delegate is always the master plugin + netconf.Delegates[0].MasterPlugin = true + } + + return netconf, nil +} + +// AddDelegates appends the new delegates to the delegates list +func (n *NetConf) AddDelegates(newDelegates []*DelegateNetConf) error { + logging.Debugf("AddDelegates: %v", newDelegates) + n.Delegates = append(n.Delegates, newDelegates...) + return nil +} + +// delegateAddDeviceID injects deviceID information in delegate bytes +func delegateAddDeviceID(inBytes []byte, deviceID string) ([]byte, error) { + var rawConfig map[string]interface{} + var err error + + err = json.Unmarshal(inBytes, &rawConfig) + if err != nil { + return nil, logging.Errorf("delegateAddDeviceID: failed to unmarshal inBytes: %v", err) + } + // Inject deviceID + rawConfig["deviceID"] = deviceID + rawConfig["pciBusID"] = deviceID + configBytes, err := json.Marshal(rawConfig) + if err != nil { + return nil, logging.Errorf("delegateAddDeviceID: failed to re-marshal Spec.Config: %v", err) + } + logging.Debugf("delegateAddDeviceID updated configBytes %s", string(configBytes)) + return configBytes, nil +} + +// addDeviceIDInConfList injects deviceID information in delegate bytes +func addDeviceIDInConfList(inBytes []byte, deviceID string) ([]byte, error) { + var rawConfig map[string]interface{} + var err error + + err = json.Unmarshal(inBytes, &rawConfig) + if err != nil { + return nil, logging.Errorf("addDeviceIDInConfList: failed to unmarshal inBytes: %v", err) + } + + pList, ok := rawConfig["plugins"] + if !ok { + return nil, logging.Errorf("addDeviceIDInConfList: unable to get plugin list") + } + + pMap, ok := pList.([]interface{}) + if !ok { + return nil, logging.Errorf("addDeviceIDInConfList: unable to typecast plugin list") + } + + for idx, plugin := range pMap { + currentPlugin, ok := plugin.(map[string]interface{}) + if !ok { + return nil, logging.Errorf("addDeviceIDInConfList: unable to typecast plugin #%d", idx) + } + // Inject deviceID + currentPlugin["deviceID"] = deviceID + currentPlugin["pciBusID"] = deviceID + } + + configBytes, err := json.Marshal(rawConfig) + if err != nil { + return nil, logging.Errorf("addDeviceIDInConfList: failed to re-marshal: %v", err) + } + logging.Debugf("addDeviceIDInConfList: updated configBytes %s", string(configBytes)) + return configBytes, nil +} + +// injectCNIArgs injects given args to cniConfig +func injectCNIArgs(cniConfig *map[string]interface{}, args *map[string]interface{}) error { + if argsval, ok := (*cniConfig)["args"]; ok { + argsvalmap := argsval.(map[string]interface{}) + if cnival, ok := argsvalmap["cni"]; ok { + cnivalmap := cnival.(map[string]interface{}) + // merge it if conf has args + for key, val := range *args { + cnivalmap[key] = val + } + } else { + argsvalmap["cni"] = *args + } + } else { + argsval := map[string]interface{}{} + argsval["cni"] = *args + (*cniConfig)["args"] = argsval + } + return nil +} + +// addCNIArgsInConfig injects given cniArgs to CNI config in inBytes +func addCNIArgsInConfig(inBytes []byte, cniArgs *map[string]interface{}) ([]byte, error) { + var rawConfig map[string]interface{} + var err error + + err = json.Unmarshal(inBytes, &rawConfig) + if err != nil { + return nil, logging.Errorf("addCNIArgsInConfig(): failed to unmarshal inBytes: %v", err) + } + + injectCNIArgs(&rawConfig, cniArgs) + + configBytes, err := json.Marshal(rawConfig) + if err != nil { + return nil, logging.Errorf("addCNIArgsInConfig(): failed to re-marshal: %v", err) + } + return configBytes, nil +} + +// addCNIArgsInConfList injects given cniArgs to CNI conflist in inBytes +func addCNIArgsInConfList(inBytes []byte, cniArgs *map[string]interface{}) ([]byte, error) { + var rawConfig map[string]interface{} + var err error + + err = json.Unmarshal(inBytes, &rawConfig) + if err != nil { + return nil, logging.Errorf("addCNIArgsInConfList(): failed to unmarshal inBytes: %v", err) + } + + pList, ok := rawConfig["plugins"] + if !ok { + return nil, logging.Errorf("addCNIArgsInConfList(): unable to get plugin list") + } + + pMap, ok := pList.([]interface{}) + if !ok { + return nil, logging.Errorf("addCNIArgsInConfList(): unable to typecast plugin list") + } + + for idx := range pMap { + valMap := pMap[idx].(map[string]interface{}) + injectCNIArgs(&valMap, cniArgs) + } + + configBytes, err := json.Marshal(rawConfig) + if err != nil { + return nil, logging.Errorf("addCNIArgsInConfList(): failed to re-marshal: %v", err) + } + return configBytes, nil +} + +// CheckGatewayConfig check gatewayRequest and mark IsFilter{V4,V6}Gateway flag if +// gw filtering is required +func CheckGatewayConfig(delegates []*DelegateNetConf) error { + + v4Gateways := 0 + v6Gateways := 0 + + // Check the gateway + for _, delegate := range delegates { + if delegate.GatewayRequest != nil { + for _, gw := range *delegate.GatewayRequest { + if gw.To4() != nil { + v4Gateways++ + } else { + v6Gateways++ + } + } + } + } + + if v4Gateways > 1 || v6Gateways > 1 { + return fmt.Errorf("multus does not support ECMP for default-route") + } + + // set filter flag for each delegate + for i, delegate := range delegates { + delegates[i].IsFilterV4Gateway = true + delegates[i].IsFilterV6Gateway = true + if delegate.GatewayRequest != nil { + for _, gw := range *delegate.GatewayRequest { + if gw.To4() != nil { + delegates[i].IsFilterV4Gateway = false + } else { + delegates[i].IsFilterV6Gateway = false + } + } + } + } + return nil +} + +// CheckSystemNamespaces checks whether given namespace is in systemNamespaces or not. +func CheckSystemNamespaces(namespace string, systemNamespaces []string) bool { + for _, nsname := range systemNamespaces { + if namespace == nsname { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/doc.go b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/doc.go new file mode 100644 index 000000000..fa4e51c5b --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/doc.go @@ -0,0 +1,16 @@ +// Copyright (c) 2022 Multus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types contains common types in the multus. +package types diff --git a/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/types.go b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/types.go new file mode 100644 index 000000000..1a410db59 --- /dev/null +++ b/vendor/gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types/types.go @@ -0,0 +1,183 @@ +// Copyright (c) 2018 Intel Corporation +// Copyright (c) 2021 Multus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net" + + "gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging" + + "github.com/containernetworking/cni/pkg/types" + cni100 "github.com/containernetworking/cni/pkg/types/100" + v1 "k8s.io/api/core/v1" +) + +// NetConf for cni config file written in json +type NetConf struct { + types.NetConf + + // support chaining for master interface and IP decisions + // occurring prior to running ipvlan plugin + RawPrevResult *map[string]interface{} `json:"prevResult"` + PrevResult *cni100.Result `json:"-"` + + ConfDir string `json:"confDir"` + CNIDir string `json:"cniDir"` + BinDir string `json:"binDir"` + // RawDelegates is private to the NetConf class; use Delegates instead + RawDelegates []map[string]interface{} `json:"delegates"` + // These parameters are exclusive in one config file: + // - Delegates (directly add delegate CNI config into multus CNI config) + // - ClusterNetwork+DefaultNetworks (add CNI config through CRD, directory or file) + Delegates []*DelegateNetConf `json:"-"` + ClusterNetwork string `json:"clusterNetwork"` + DefaultNetworks []string `json:"defaultNetworks"` + Kubeconfig string `json:"kubeconfig"` + LogFile string `json:"logFile"` + LogLevel string `json:"logLevel"` + LogToStderr bool `json:"logToStderr,omitempty"` + LogOptions *logging.LogOptions `json:"logOptions,omitempty"` + RuntimeConfig *RuntimeConfig `json:"runtimeConfig,omitempty"` + // Default network readiness options + ReadinessIndicatorFile string `json:"readinessindicatorfile"` + // Option to isolate the usage of CR's to the namespace in which a pod resides. + NamespaceIsolation bool `json:"namespaceIsolation"` + RawNonIsolatedNamespaces string `json:"globalNamespaces"` + NonIsolatedNamespaces []string `json:"-"` + + // Option to set system namespaces (to avoid to add defaultNetworks) + SystemNamespaces []string `json:"systemNamespaces"` + // Option to set the namespace that multus-cni uses (clusterNetwork/defaultNetworks) + MultusNamespace string `json:"multusNamespace"` + + // Retry delegate DEL message to next when some error + RetryDeleteOnError bool `json:"retryDeleteOnError"` +} + +// RuntimeConfig specifies CNI RuntimeConfig +type RuntimeConfig struct { + PortMaps []*PortMapEntry `json:"portMappings,omitempty"` + Bandwidth *BandwidthEntry `json:"bandwidth,omitempty"` + IPs []string `json:"ips,omitempty"` + Mac string `json:"mac,omitempty"` + InfinibandGUID string `json:"infinibandGUID,omitempty"` + DeviceID string `json:"deviceID,omitempty"` + CNIDeviceInfoFile string `json:"CNIDeviceInfoFile,omitempty"` +} + +// PortMapEntry for CNI PortMapEntry +type PortMapEntry struct { + HostPort int `json:"hostPort"` + ContainerPort int `json:"containerPort"` + Protocol string `json:"protocol,omitempty"` + HostIP string `json:"hostIP,omitempty"` +} + +// BandwidthEntry for CNI BandwidthEntry +type BandwidthEntry struct { + IngressRate int `json:"ingressRate"` + IngressBurst int `json:"ingressBurst"` + + EgressRate int `json:"egressRate"` + EgressBurst int `json:"egressBurst"` +} + +// DelegateNetConf for net-attach-def for pod +type DelegateNetConf struct { + Conf types.NetConf + ConfList types.NetConfList + Name string + IfnameRequest string `json:"ifnameRequest,omitempty"` + MacRequest string `json:"macRequest,omitempty"` + InfinibandGUIDRequest string `json:"infinibandGUIDRequest,omitempty"` + IPRequest []string `json:"ipRequest,omitempty"` + PortMappingsRequest []*PortMapEntry `json:"-"` + BandwidthRequest *BandwidthEntry `json:"-"` + GatewayRequest *[]net.IP `json:"default-route,omitempty"` + IsFilterV4Gateway bool + IsFilterV6Gateway bool + // MasterPlugin is only used internal housekeeping + MasterPlugin bool `json:"-"` + // Conflist plugin is only used internal housekeeping + ConfListPlugin bool `json:"-"` + // DeviceID is only used internal housekeeping + DeviceID string `json:"deviceID,omitempty"` + // ResourceName is only used internal housekeeping + ResourceName string `json:"resourceName,omitempty"` + + // Raw JSON + Bytes []byte +} + +// NetworkSelectionElement represents one element of the JSON format +// Network Attachment Selection Annotation as described in section 4.1.2 +// of the CRD specification. +type NetworkSelectionElement struct { + // Name contains the name of the Network object this element selects + Name string `json:"name"` + // Namespace contains the optional namespace that the network referenced + // by Name exists in + Namespace string `json:"namespace,omitempty"` + // IPRequest contains an optional requested IP address for this network + // attachment + IPRequest []string `json:"ips,omitempty"` + // MacRequest contains an optional requested MAC address for this + // network attachment + MacRequest string `json:"mac,omitempty"` + // InfinibandGUID request contains an optional requested Infiniband GUID address + // for this network attachment + InfinibandGUIDRequest string `json:"infiniband-guid,omitempty"` + // InterfaceRequest contains an optional requested name for the + // network interface this attachment will create in the container + InterfaceRequest string `json:"interface,omitempty"` + // DeprecatedInterfaceRequest is obsolated parameter at pre 3.2. + // This will be removed in 4.0 release. + DeprecatedInterfaceRequest string `json:"interfaceRequest,omitempty"` + // PortMappingsRequest contains an optional requested port mapping + // for the network + PortMappingsRequest []*PortMapEntry `json:"portMappings,omitempty"` + // BandwidthRequest contains an optional requested bandwidth for + // the network + BandwidthRequest *BandwidthEntry `json:"bandwidth,omitempty"` + // DeviceID contains an optional requested deviceID the network + DeviceID string `json:"deviceID,omitempty"` + // CNIArgs contains additional CNI arguments for the network interface + CNIArgs *map[string]interface{} `json:"cni-args"` + // GatewayRequest contains default route IP address for the pod + GatewayRequest *[]net.IP `json:"default-route,omitempty"` +} + +// K8sArgs is the valid CNI_ARGS used for Kubernetes +type K8sArgs struct { + types.CommonArgs + IP net.IP + K8S_POD_NAME types.UnmarshallableString //revive:disable-line + K8S_POD_NAMESPACE types.UnmarshallableString //revive:disable-line + K8S_POD_INFRA_CONTAINER_ID types.UnmarshallableString //revive:disable-line + K8S_POD_UID types.UnmarshallableString //revive:disable-line +} + +// ResourceInfo is struct to hold Pod device allocation information +type ResourceInfo struct { + Index int + DeviceIDs []string +} + +// ResourceClient provides a kubelet Pod resource handle +type ResourceClient interface { + // GetPodResourceMap returns an instance of a map of Pod ResourceInfo given a (Pod name, namespace) tuple + GetPodResourceMap(*v1.Pod) (map[string]*ResourceInfo, error) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9670587d3..fe85a57cb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -71,6 +71,7 @@ github.com/chai2010/gettext-go/po github.com/clarketm/json # github.com/containernetworking/cni v1.2.3 ## explicit; go 1.21 +github.com/containernetworking/cni/libcni github.com/containernetworking/cni/pkg/invoke github.com/containernetworking/cni/pkg/ns github.com/containernetworking/cni/pkg/skel @@ -186,9 +187,10 @@ github.com/evanphx/json-patch/v5/internal/json # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d ## explicit github.com/exponent-io/jsonpath -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 @@ -232,7 +234,7 @@ github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v1.2.1 +# github.com/golang/glog v1.2.4 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -370,6 +372,22 @@ github.com/k8snetworkplumbingwg/govdpa/pkg/kvdpa ## explicit; go 1.17 github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1 +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1 +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/internalinterfaces +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1 +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1 +github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils +# github.com/k8snetworkplumbingwg/network-resources-injector v1.7.1 +## explicit; go 1.21 +github.com/k8snetworkplumbingwg/network-resources-injector/pkg/controlswitches +github.com/k8snetworkplumbingwg/network-resources-injector/pkg/tools +github.com/k8snetworkplumbingwg/network-resources-injector/pkg/types +github.com/k8snetworkplumbingwg/network-resources-injector/pkg/userdefinedinjections +github.com/k8snetworkplumbingwg/network-resources-injector/pkg/webhook # github.com/k8snetworkplumbingwg/sriov-network-device-plugin v0.0.0-20221127172732-a5a7395122e3 ## explicit; go 1.17 github.com/k8snetworkplumbingwg/sriov-network-device-plugin/pkg/utils @@ -1042,6 +1060,10 @@ gopkg.in/inf.v0 # gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 +# gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.0.2 +## explicit; go 1.18 +gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/logging +gopkg.in/k8snetworkplumbingwg/multus-cni.v4/pkg/types # gopkg.in/natefinch/lumberjack.v2 v2.2.1 ## explicit; go 1.13 gopkg.in/natefinch/lumberjack.v2