diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index ebaf2d1f8..653c00038 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -173,6 +173,12 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' schema: type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true user: type: string databases: diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index bf731be2e..be374e1ee 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -511,7 +511,8 @@ properties of the persistent storage that stores Postgres data. Those parameters are defined under the `sidecars` key. They consist of a list of dictionaries, each defining one sidecar (an extra container running -along the main Postgres container on the same pod). The following keys can be +along the main Postgres container on the same pod). Same way sidecars can be +defined for the pooler, using `connectionPooler.sidecars` key. The following keys can be defined in the sidecar dictionary: * **name** @@ -601,6 +602,9 @@ for both master and replica pooler services (if `enableReplicaConnectionPooler` * **resources** Resource configuration for connection pooler deployment. +* **sidecars** + Extra containers to run alongside with PGBouncer container in the same pod. + ## Custom TLS certificates Those parameters are grouped under the `tls` top-level key. Note, you have to diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 9207c83d4..b849c619b 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -171,6 +171,12 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' schema: type: string + sidecars: + type: array + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true user: type: string databases: diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index da88b0855..4f4c75fe4 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -275,6 +275,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ "schema": { Type: "string", }, + "sidecars": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: util.True(), + }, + }, + }, "user": { Type: "string", }, diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 3d731743f..a786abcd7 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -241,12 +241,13 @@ type PostgresStatus struct { // makes sense to expose. E.g. pool size (min/max boundaries), max client // connections etc. type ConnectionPooler struct { - NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` - Schema string `json:"schema,omitempty"` - User string `json:"user,omitempty"` - Mode string `json:"mode,omitempty"` - DockerImage string `json:"dockerImage,omitempty"` - MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` + NumberOfInstances *int32 `json:"numberOfInstances,omitempty"` + Schema string `json:"schema,omitempty"` + User string `json:"user,omitempty"` + Mode string `json:"mode,omitempty"` + DockerImage string `json:"dockerImage,omitempty"` + MaxDBConnections *int32 `json:"maxDBConnections,omitempty"` + Sidecars []Sidecar `json:"sidecars,omitempty"` *Resources `json:"resources,omitempty"` } diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 557f8889c..0a7d91977 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -111,6 +111,13 @@ func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) { *out = new(int32) **out = **in } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]Sidecar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = new(Resources) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 25d4514d1..51045ed6b 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -342,6 +342,18 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( }, } + sidecars := []v1.Container{} + if connectionPoolerSpec.Sidecars != nil && len(connectionPoolerSpec.Sidecars) > 0 { + sidecars, err = c.generateSidecarContainers( + connectionPoolerSpec.Sidecars, + makeDefaultConnectionPoolerResources(&c.OpConfig), + 0, + ) + if err != nil { + return nil, fmt.Errorf("could not generate pooler sidecar containers: %v", err) + } + } + // If the cluster has custom TLS certificates configured, we do the following: // 1. Add environment variables to tell pgBouncer where to find the TLS certificates // 2. Reference the secret in a volume @@ -403,7 +415,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( }, Spec: v1.PodSpec{ TerminationGracePeriodSeconds: &gracePeriod, - Containers: []v1.Container{poolerContainer}, + Containers: append([]v1.Container{poolerContainer}, sidecars...), Tolerations: tolerationsSpec, Volumes: poolerVolumes, SecurityContext: &securityContext, diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index e6472d017..ca547d51e 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -869,6 +869,65 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) { } } +func TestConnectionPoolerSidecars (t *testing.T) { + var cluster = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + cluster.Statefulset = &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sts", + }, + } + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{ + Master: { + Deployment: nil, + Service: nil, + LookupFunction: true, + Name: "", + Role: Master, + }, + } + cluster.Spec = acidv1.PostgresSpec{ + ConnectionPooler: &acidv1.ConnectionPooler{ + Sidecars: []acidv1.Sidecar{ + acidv1.Sidecar{ + Name: "sidecar", + DockerImage: "image", + Env: []v1.EnvVar{ + { + Name: "SOME_VAR", + Value: "some-value", + }, + }, + }, + }, + }, + } + + deployment, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master]) + assert.NoError(t, err) + + containers := deployment.Spec.Template.Spec.Containers + assert.Equal(t, 2, len(containers), "wrong number of containers") + assert.Equal(t, "sidecar", containers[1].Name, "wrong name of sidecar") + assert.Equal(t, "image", containers[1].Image, "wrong image of sidecar") + assert.Equal(t, "SOME_VAR", containers[1].Env[0].Name, "wrong name of env var in sidecar") + assert.Equal(t, "some-value", containers[1].Env[0].Value, "wrong value of env var in sidecar") +} + func testServiceAccount(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { poolerServiceAccount := podSpec.Spec.ServiceAccountName