diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1faf975 --- /dev/null +++ b/.gitignore @@ -0,0 +1,85 @@ +############################## +## Java +############################## +.mtj.tmp/ +*.class +*.jar +*.war +*.ear +*.nar +hs_err_pid* + +############################## +## Maven +############################## +target/ +pom.xml.tag +pom.xml.releaseBackup +pom.xml.versionsBackup +pom.xml.next +pom.xml.bak +pom.xml.asc +release.properties +dependency-reduced-pom.xml +buildNumber.properties +.mvn/timing.properties +.mvn/wrapper/maven-wrapper.jar + +############################## +## Gradle +############################## +bin/ +build/ +.gradle +.gradletasknamecache +gradle-app.setting +!gradle-wrapper.jar + +############################## +## IntelliJ +############################## +out/ +.idea/ +.idea_modules/ +*.iml +*.ipr +*.iws + +############################## +## Eclipse +############################## +.settings/ +bin/ +tmp/ +.metadata +.classpath +.project +*.tmp +*.bak +*.swp +*~.nib +sqrl-core/src/test/resources/local.properties +.loadpath +.factorypath + +############################## +## NetBeans +############################## +nbproject/private/ +build/ +nbbuild/ +dist/ +nbdist/ +nbactions.xml +nb-configuration.xml + +############################## +## Visual Studio Code +############################## +.vscode/ +.code-workspace + +############################## +## OS X +############################## +.DS_Store diff --git a/Chart.yaml b/Chart.yaml new file mode 100644 index 0000000..9276b39 --- /dev/null +++ b/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: sqrl-helm-charts +description: A Helm chart for deploying SQRL pipeline services +version: 0.1.0 +appVersion: "1.0" diff --git a/README.md b/README.md index b893268..275296a 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,252 @@ -# k8s -A SQRL profile for getting started with k8s +# SQRL Compiler Kubernetes Profile + +This project provides a profile for the SQRL compiler to generate Helm charts and `values.yaml` files for deploying your SQRL pipeline to a Kubernetes cluster. Follow the instructions below to set up your Kubernetes cluster, compile your project, and deploy using Helm. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Setting Up a Local Kubernetes Cluster](#setting-up-a-local-kubernetes-cluster) + - [Using Minikube](#using-minikube) + - [Using Docker Desktop](#using-docker-desktop) +- [Installing Required Operators](#installing-required-operators) +- [Compiling with SQRL Compiler](#compiling-with-sqrl-compiler) +- [Configuring Flink Upstream Image](#configuring-flink-upstream-image) + - [Supporting User-Defined Functions (UDFs)](#supporting-user-defined-functions-udfs) + - [Using the Default Upstream Flink Image](#using-the-default-upstream-flink-image) + - [Creating a Custom Upstream Docker Image](#creating-a-custom-upstream-docker-image) +- [Deploying with Helm](#deploying-with-helm) +- [Introspecting the Running Flink Web UI](#introspecting-the-running-flink-web-ui) +- [Limitations](#limitations) +- [Support](#support) +- [License](#license) + +## Prerequisites + +Ensure you have the following installed: + +- **Kubernetes Cluster** +- **kubectl** command-line tool +- **Helm** +- **SQRL** +- **Docker** + +## Setting Up a Local Kubernetes Cluster + +If you do not have access to a Kubernetes cluster, you can set up a local cluster using Minikube or Docker Desktop. + +### Using Minikube + +#### Install Minikube + +For **macOS**: + +```bash +brew install minikube +``` + +For other platforms, follow the [Minikube installation guide](https://minikube.sigs.k8s.io/docs/start/). + +If using minikube, be aware that you must mount a local volume to access UDFs. + +#### Start Minikube + +```bash +minikube start +``` + +### Using Docker Desktop + +Docker Desktop includes a built-in Kubernetes cluster. + +1. **Install Docker Desktop:** + + [Download Docker Desktop](https://www.docker.com/products/docker-desktop) for your operating system. + +2. **Enable Kubernetes:** + + - Open Docker Desktop Preferences. + - Navigate to the **Kubernetes** tab. + - Check **Enable Kubernetes**. + - Click **Apply & Restart**. + +## Installing Required Operators + +Install the required operators using the instructions below. + +**Note:** Install all operators in the same namespace (e.g., `sqrl`) to ensure proper service discovery and interaction. + +Create the namespace: + +```bash +kubectl create namespace sqrl +``` + +### Strimzi (Kafka Operator) + +[Strimzi Documentation](https://strimzi.io/quickstarts/) + +### CloudNativePG (PostgreSQL Operator) + +[CloudNativePG Documentation](https://cloudnative-pg.io/documentation/1.16/quickstart/) + +### Apache Flink Operator + +[Flink Operator Documentation](https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/) + +## Compiling with SQRL Compiler + +Compile your project using the Kubernetes profile to generate the Helm charts and `values.yaml` file. + +```bash +sqrl compile --profile k8s ... +``` + +This command generates Helm charts and a `values.yaml` file in the default `build/deploy` directory. + +## Configuring Flink Upstream Image + +### Supporting User-Defined Functions (UDFs) + +To include your custom UDFs in the Flink deployment, you have two options: + +1. **Download UDFs from a Repository:** + + Configure your Flink job to download UDFs from the [DataSQRL repository](https://dev.datasqrl.com/) which would be accessible by your Kubernetes cluster. + +2. **Include UDFs in a Custom Upstream Image:** + + Embed your UDFs directly into a custom Flink Docker image. + +3. **Build a Flink job JAR as an uberjar:** + + Build your own flink job jar and place it in a place accessible in your k8s environment. The default job jar runner can be overridden. + +### Using the Default Upstream Flink Image + +We provide a default upstream Flink image at `datasqrl/flink-1.19-v0.5`, which includes all standard connectors that datasqrl supports out of the box. Any UDFs can be resolved by the JAR launcher. + +In your `package.json`, specify the default image under the `values` object: + +```json +{ + "values": { + "k8s": { + "flink-upstream-image": "datasqrl/flink-1.19-v0.5" + } + } +} +``` + +### Creating a Custom Upstream Docker Image + +If you need to include bespoke connectors or UDFs directly in the image, you can create a custom upstream Docker image. + +1. **Modify the Dockerfile:** + + Navigate to the `flink-upstream-image` directory and edit the Dockerfile to include your UDFs or connectors. + +2. **Build and Publish the Image:** + + ```bash + docker build -t your-repo/your-flink-image:latest . + docker push your-repo/your-flink-image:latest + ``` + +3. **Specify the Custom Image in `package.json`:** + + Update your `package.json` under the `values` object: + + ```json + { + "values": { + "k8s": { + "flink-upstream-image": "your-repo/your-flink-image:latest" + } + } + } + ``` + +4. **Set UDF_PATH Argument (Optional):** + + If you're not embedding UDFs in the image, specify the path using the Flink bootstrapper's `UDF_PATH` argument: + + ```yaml + args: + - --UDF_PATH=/path/to/your/udfs + ``` + +## Deploying with Helm + +Deploy the generated Helm charts to your Kubernetes cluster: + +```bash +helm install my-sqrl-project ./build/deploy -n sqrl +``` + +Replace `my-sqrl-project` with your desired release name. Ensure you're in the directory containing the `build/deploy` folder or provide the correct path. + +## Introspecting the Running Flink Web UI + +Access the Flink Web UI to monitor your Flink cluster: + +1. **Port-Forward the Flink Service:** + + ```bash + kubectl port-forward svc/flink-jobmanager 8081:8081 -n sqrl + ``` + +2. **Access the Web UI:** + + Open your browser and navigate to `http://localhost:8081`. + +## Accessing the GraphQL UI + +1. **Port-Forward the GraphQL Service:** + ```bash + . kubectl port-forward svc/vertx-server 8888:8888 -n sqrl + ``` + +2. **Access the Web UI:** + + Open your browser and navigate to `http://localhost:8888/graphiql/`. + +## Limitations + +- **Local UDFs:** Need to be uploaded to a repository or included in a custom Docker image. +- **Connectors:** SQRL assumes all connectors are streams by default. If you need to support a connector with a different changelog stream, please [open an issue](https://github.com/your-repo/issues). + +## Structure +The helm charts take the following structure. Fork this repository and make changes and use it as your own default profile. +``` +sqrl-helm-charts/ +├── Chart.yaml +├── values.yaml +└── templates/ + ├── kafka/ + │ ├── deployment.yaml + │ └── service.yaml + ├── postgres/ + │ ├── configmap.yaml + │ ├── deployment.yaml + │ └── service.yaml + ├── flink/ + │ ├── configmap.yaml + │ ├── deployment.yaml + │ └── service.yaml + └── vertx/ + ├── configmap.yaml + ├── deployment.yaml + └── service.yaml +``` + +## Support + +For issues or questions, please [open a ticket](https://github.com/your-repo/issues). + +## License + +This project is licensed under the [MIT License](LICENSE). + +--- + +**Note:** Replace placeholders like `your-repo`, `your-flink-image`, and `your-target-directory` with your actual repository names and paths. diff --git a/k8s-local/flink.yaml b/k8s-local/flink.yaml new file mode 100644 index 0000000..96043be --- /dev/null +++ b/k8s-local/flink.yaml @@ -0,0 +1,117 @@ +### +# Quickstart: +# kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v1.8.2/cert-manager.yaml +# helm repo add flink-operator-repo https://downloads.apache.org/flink/flink-kubernetes-operator-1.10.0/ +# helm install flink-kubernetes-operator flink-operator-repo/flink-kubernetes-operator +################################################################################ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sql-scripts-configmap +data: + flink.sql: | + CREATE TABLE GeneratedSource ( + id INT, + name STRING + ) WITH ( + 'connector' = 'datagen', + 'rows-per-second' = '10', + 'fields.id.kind' = 'sequence', + 'fields.id.start' = '1', + 'fields.id.end' = '1000', + 'fields.name.length' = '10' + ); + + CREATE TABLE PrintSink ( + id INT, + name STRING + ) WITH ( + 'connector' = 'print' + ); + + INSERT INTO PrintSink + SELECT id, name FROM GeneratedSource; + +--- +apiVersion: flink.apache.org/v1beta1 +kind: FlinkDeployment +metadata: + name: sql-example +spec: + image: flink:1.19.0 + flinkVersion: v1_19 + jobManager: + resource: + memory: "2048m" + cpu: 1 + podTemplate: + spec: + containers: + - name: flink-main-container + volumeMounts: + - name: sql-scripts + mountPath: /opt/flink/usrlib/sql-scripts + volumes: + - name: sql-scripts + configMap: + name: sql-scripts-configmap + taskManager: + resource: + memory: "2048m" + cpu: 1 + podTemplate: + spec: + containers: + - name: flink-main-container + volumeMounts: + - name: sql-scripts + mountPath: /opt/flink/usrlib/sql-scripts + volumes: + - name: sql-scripts + configMap: + name: sql-scripts-configmap + serviceAccount: flink +--- +apiVersion: flink.apache.org/v1beta1 +kind: FlinkSessionJob +metadata: + name: sql-example +spec: + deploymentName: sql-example + job: + jarURI: https://sqrl-k8s-cloud.s3.amazonaws.com/flink-sql-runner-example-1.jar + args: ["/opt/flink/usrlib/sql-scripts/flink.sql"] + parallelism: 1 + upgradeMode: stateless + + +--- +# Optional to add a lb +apiVersion: v1 +kind: Service +metadata: + name: flink-jobmanager-ui +spec: + type: LoadBalancer + ports: + - port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: sql-example diff --git a/k8s-local/kafka-cluster.yaml b/k8s-local/kafka-cluster.yaml new file mode 100644 index 0000000..2e99588 --- /dev/null +++ b/k8s-local/kafka-cluster.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafdrop + namespace: kafka # Deploy in the same namespace + labels: + app: kafdrop +spec: + replicas: 1 + selector: + matchLabels: + app: kafdrop + template: + metadata: + labels: + app: kafdrop + spec: + containers: + - name: kafdrop + image: obsidiandynamics/kafdrop:latest + ports: + - containerPort: 9000 + env: + - name: KAFKA_BROKERCONNECT + value: "my-cluster-kafka-bootstrap:9092" # Replace with your Kafka bootstrap service + livenessProbe: + httpGet: + path: / + port: 9000 + initialDelaySeconds: 15 + timeoutSeconds: 2 + readinessProbe: + httpGet: + path: / + port: 9000 + initialDelaySeconds: 15 + timeoutSeconds: 2 + +--- +apiVersion: v1 +kind: Service +metadata: + name: kafdrop + namespace: kafka +spec: + type: LoadBalancer # Use LoadBalancer, NodePort, or ClusterIP depending on your environment + ports: + - port: 9000 + targetPort: 9000 + selector: + app: kafdrop diff --git a/k8s-local/kafka.yaml b/k8s-local/kafka.yaml new file mode 100644 index 0000000..5b2d22d --- /dev/null +++ b/k8s-local/kafka.yaml @@ -0,0 +1,30 @@ + +### +# Quickstart: +# kubectl create namespace kafka +# kubectl create -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka +# +# Deploy a single node cluster: +# kubectl apply -f https://strimzi.io/examples/latest/kafka/kraft/kafka-single-node.yaml -n kafka +# +# Wait: kubectl wait kafka/my-cluster --for=condition=Ready --timeout=300s -n kafka +# +# Commands: kubectl -n kafka get kafkatopics +--- + +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: notificationstream-1 + namespace: kafka + labels: + strimzi.io/cluster: my-cluster + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-weight": "1" +spec: + partitions: 1 + replicas: 1 + config: + retention.ms: 7200000 + segment.bytes: 1073741824 diff --git a/k8s-local/linux-pod.yaml b/k8s-local/linux-pod.yaml new file mode 100644 index 0000000..ab9f50d --- /dev/null +++ b/k8s-local/linux-pod.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: local-pv +spec: + storageClassName: manual + capacity: + storage: 1Gi + accessModes: + - ReadWriteOnce + hostPath: + path: /Users/henneberger/sqrl-k8s/ + claimRef: + namespace: default + name: local-pvc +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: local-pvc +spec: + storageClassName: manual # Added storageClassName + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: linux-pod +spec: + volumes: + - name: my-volume + persistentVolumeClaim: + claimName: local-pvc + containers: + - name: basic-linux + image: ubuntu + command: ["/bin/sh", "-c", "while true; do sleep 3600; done"] + volumeMounts: + - mountPath: /mnt/host + name: my-volume diff --git a/k8s-local/postgres.yaml b/k8s-local/postgres.yaml new file mode 100644 index 0000000..efc87af --- /dev/null +++ b/k8s-local/postgres.yaml @@ -0,0 +1,84 @@ +## +# kubectl apply -f https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml --server-side +# Note: The --server-side to bypass The CustomResourceDefinition "poolers.postgresql.cnpg.io" is invalid: metadata.annotations: Too long: must have at most 262144 bytes +## +apiVersion: v1 +kind: ConfigMap +metadata: + name: post-init-sql-configmap +data: + schema.sql: | + CREATE TABLE GeneratedSource ( + id INT, + name varchar + ); +--- + +apiVersion: v1 +data: + username: datasqrl + password: postgres +kind: Secret +metadata: + name: db-secret +type: kubernetes.io/basic-auth + +--- + +# install pgvector plugin using postInitTemplateSQL +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: database + labels: + app: database + component: db +spec: + instances: 1 + primaryUpdateStrategy: unsupervised + + # superuserSecret: + # name: superuser-secret + + # TODO: solve -h localhost not working problem + # https://stackoverflow.com/questions/4328679/how-to-configure-postgresql-so-it-accepts-loginpassword-auth + postgresql: + parameters: + shared_buffers: 256MB + pg_stat_statements.max: '10000' + pg_stat_statements.track: all + auto_explain.log_min_duration: '10s' + + pg_hba: + - local all all trust + - host all all all trust + + # TODO: fix bug any password can login + bootstrap: + initdb: + database: datasqrl + owner: datasqrl + secret: + name: db-secret + postInitApplicationSQLRefs: + configMapRefs: + - name: post-init-sql-configmap + key: schema.sql + storage: + size: 1Gi + +--- +# Optional to add a lb +apiVersion: v1 +kind: Service +metadata: + name: postgres-port +spec: + type: LoadBalancer + ports: + - port: 5432 + targetPort: 5432 + protocol: TCP + selector: + cnpg.io/cluster: database + cnpg.io/podRole: instance diff --git a/k8s-local/server.yaml b/k8s-local/server.yaml new file mode 100644 index 0000000..f88a2e9 --- /dev/null +++ b/k8s-local/server.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: server-deployment + labels: + app: server +spec: + replicas: 1 + selector: + matchLabels: + app: server + template: + metadata: + labels: + app: server + spec: + containers: + - name: server + image: 'datasqrl/sqrl-server:latest' + workingDir: /opt/sqrl + command: ["java", "-jar", "vertx-server.jar"] + env: + - name: PGHOST + value: "database-rw" + - name: PGPORT + value: "5432" + - name: PGDATABASE + value: "datasqrl" + - name: PGUSER + value: "postgres" + - name: PGPASSWORD + value: "postgres" + - name: PROPERTIES_BOOTSTRAP_SERVERS + value: my-cluster-kafka-bootstrap:9092 + ports: + - containerPort: 8888 + readinessProbe: + httpGet: + path: graphiql/ + port: 8888 + initialDelaySeconds: 30 + periodSeconds: 10 + imagePullPolicy: IfNotPresent + +--- + +apiVersion: v1 +kind: Service +metadata: + name: server-service +spec: + selector: + app: server + ports: + - protocol: TCP + port: 8888 + targetPort: 8888 \ No newline at end of file diff --git a/k8s-local/values.yaml b/k8s-local/values.yaml new file mode 100644 index 0000000..f5f87f8 --- /dev/null +++ b/k8s-local/values.yaml @@ -0,0 +1,3 @@ +# 1. Allow starting kafka before since it takes awhile +# UI enabled for each engine is optional +# Export ports for each service (optional) \ No newline at end of file diff --git a/templates/flink/configmap.yaml b/templates/flink/configmap.yaml new file mode 100644 index 0000000..1370eac --- /dev/null +++ b/templates/flink/configmap.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: flink-sql +data: + job.sql: | + -- Flink SQL job script + SELECT * FROM example_table; diff --git a/templates/flink/deployment.yaml b/templates/flink/deployment.yaml new file mode 100644 index 0000000..586f26f --- /dev/null +++ b/templates/flink/deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: flink + labels: + app: flink +spec: + replicas: 1 + selector: + matchLabels: + app: flink + template: + metadata: + labels: + app: flink + spec: + containers: + - name: jobmanager + image: {{ .Values.flink.image }} + args: ["jobmanager"] + ports: + - containerPort: {{ .Values.flink.servicePort }} + volumeMounts: + - name: sql-config + mountPath: /opt/flink/sql + volumes: + - name: sql-config + configMap: + name: flink-sql diff --git a/templates/flink/service.yaml b/templates/flink/service.yaml new file mode 100644 index 0000000..d96d17c --- /dev/null +++ b/templates/flink/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: flink + labels: + app: flink +spec: + selector: + app: flink + ports: + - protocol: TCP + port: {{ .Values.flink.servicePort }} + targetPort: {{ .Values.flink.servicePort }} diff --git a/templates/kafka/kafka.yaml b/templates/kafka/kafka.yaml new file mode 100644 index 0000000..d6481d1 --- /dev/null +++ b/templates/kafka/kafka.yaml @@ -0,0 +1,22 @@ +apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: {{ .Values.kafka.kafkaClusterName }} +spec: + kafka: + version: 3.0.0 + replicas: {{ .Values.kafka.replicas }} + listeners: + - name: plain + port: 9092 + type: internal + tls: false + storage: + type: ephemeral + zookeeper: + replicas: 3 + storage: + type: ephemeral + entityOperator: + topicOperator: {} + userOperator: {} diff --git a/templates/postgres/configmap.yaml b/templates/postgres/configmap.yaml new file mode 100644 index 0000000..21e5400 --- /dev/null +++ b/templates/postgres/configmap.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-schema +data: + init.sql: | + -- PostgreSQL schema initialization script + CREATE TABLE example ( + id SERIAL PRIMARY KEY, + data TEXT NOT NULL + ); diff --git a/templates/postgres/postgrescluster.yaml b/templates/postgres/postgrescluster.yaml new file mode 100644 index 0000000..3290d9e --- /dev/null +++ b/templates/postgres/postgrescluster.yaml @@ -0,0 +1,26 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: {{ .Values.postgres.postgresClusterName }} + namespace: {{ .Values.kafka.namespace }} +spec: + instances: 1 + primaryUpdateStrategy: unsupervised + storage: + size: 1Gi + storageClass: standard + postgresql: + version: "14" + bootstrap: + initdb: + options: "--data-checksums" + superUserSecret: + name: {{ .Values.postgres.postgresClusterName }}-secret + databases: + - name: appdb + users: + - name: appuser + databases: + - appdb + monitor: + enabled: true diff --git a/templates/postgres/service.yaml b/templates/postgres/service.yaml new file mode 100644 index 0000000..15af97a --- /dev/null +++ b/templates/postgres/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: postgres + labels: + app: postgres +spec: + selector: + app: postgres + ports: + - protocol: TCP + port: {{ .Values.postgres.servicePort }} + targetPort: {{ .Values.postgres.servicePort }} diff --git a/templates/vertx/configmap.yaml b/templates/vertx/configmap.yaml new file mode 100644 index 0000000..0023f12 --- /dev/null +++ b/templates/vertx/configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: vertx-server-config +data: + application.conf: | + # Application configuration + http.port = 8888 + some.other.setting = "value" + + graphql.conf: | + # GraphQL configuration + graphiql.enabled = true diff --git a/templates/vertx/deployment.yaml b/templates/vertx/deployment.yaml new file mode 100644 index 0000000..c8ffcf4 --- /dev/null +++ b/templates/vertx/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vertx-server + labels: + app: vertx-server +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: vertx-server + template: + metadata: + labels: + app: vertx-server + spec: + containers: + - name: vertx-server + image: {{ .Values.vertxServer.image }} + ports: + - containerPort: {{ .Values.vertxServer.servicePort }} + volumeMounts: + - name: config + mountPath: /app/config + volumes: + - name: config + configMap: + name: vertx-server-config diff --git a/templates/vertx/service.yaml b/templates/vertx/service.yaml new file mode 100644 index 0000000..d70ea32 --- /dev/null +++ b/templates/vertx/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: vertx-server + labels: + app: vertx-server +spec: + selector: + app: vertx-server + ports: + - protocol: TCP + port: {{ .Values.vertxServer.servicePort }} + targetPort: {{ .Values.vertxServer.servicePort }} + type: LoadBalancer diff --git a/values.yaml b/values.yaml new file mode 100644 index 0000000..db10b07 --- /dev/null +++ b/values.yaml @@ -0,0 +1,18 @@ +replicaCount: 1 + +kafka: + kafkaClusterName: kafka + replicas: 3 + namespace: sqrl + +postgres: + postgresClusterName: postgres + postgresPassword: "yourpassword" + +flink: + image: datasqrl/flink-1.19-v0.5:latest + servicePort: 8081 + +vertxServer: + image: datasqrl/vertx-server:latest + servicePort: 8888