|
| 1 | +# Default values for the helm chart. |
| 2 | +# This is a YAML-formatted file. |
| 3 | +# Declare variables to be passed into your templates. |
| 4 | + |
| 5 | +########### |
| 6 | +# Deployment and Service |
| 7 | +########### |
| 8 | +logLevel: INFO |
| 9 | + |
| 10 | +minReadySeconds: 5 |
| 11 | +progressDeadlineSeconds: 120 |
| 12 | +replicaCount: 1 |
| 13 | +maxUnavailable: 0 |
| 14 | + |
| 15 | +image: |
| 16 | + repository: hypertrace/hypertrace-collector |
| 17 | + pullPolicy: IfNotPresent |
| 18 | + |
| 19 | +env: |
| 20 | + - name: GOGC |
| 21 | + value: "80" |
| 22 | + |
| 23 | +containerPorts: |
| 24 | + - name: grpc-otlp |
| 25 | + containerPort: 4317 |
| 26 | + - name: http-otlp |
| 27 | + containerPort: 55681 |
| 28 | + - name: grpc-opencensus |
| 29 | + containerPort: 55678 |
| 30 | + - name: http-jaeger |
| 31 | + containerPort: 14268 |
| 32 | + - name: grpc-jaeger |
| 33 | + containerPort: 14250 |
| 34 | + - name: http-zipkin |
| 35 | + containerPort: 9411 |
| 36 | + |
| 37 | +service: |
| 38 | + type: ClusterIP |
| 39 | + ports: |
| 40 | + - name: grpc-otlp |
| 41 | + port: 4317 |
| 42 | + targetPort: 4317 |
| 43 | + protocol: TCP |
| 44 | + - name: http-otlp |
| 45 | + port: 55681 |
| 46 | + targetPort: 55681 |
| 47 | + protocol: TCP |
| 48 | + - name: grpc-opencensus |
| 49 | + port: 55678 |
| 50 | + targetPort: 55678 |
| 51 | + protocol: TCP |
| 52 | + - name: http-jaeger |
| 53 | + port: 14268 |
| 54 | + targetPort: 14268 |
| 55 | + protocol: TCP |
| 56 | + - name: grpc-jaeger |
| 57 | + port: 14250 |
| 58 | + targetPort: 14268 |
| 59 | + protocol: TCP |
| 60 | + - name: http-zipkin |
| 61 | + port: 9411 |
| 62 | + targetPort: 9411 |
| 63 | + protocol: TCP |
| 64 | + |
| 65 | +livenessProbe: |
| 66 | + initialDelaySeconds: 5 |
| 67 | + periodSeconds: 10 |
| 68 | + |
| 69 | +readinessProbe: |
| 70 | + initialDelaySeconds: 5 |
| 71 | + periodSeconds: 5 |
| 72 | + |
| 73 | +resources: |
| 74 | + # We usually recommend not to specify default resources and to leave this as a conscious |
| 75 | + # choice for the user. This also increases chances charts run on environments with little |
| 76 | + # resources, such as Minikube. If you do want to specify resources, uncomment the following |
| 77 | + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. |
| 78 | + limits: |
| 79 | + cpu: 1 |
| 80 | + memory: 2Gi |
| 81 | + requests: |
| 82 | + cpu: 200m |
| 83 | + memory: 400Mi |
| 84 | + |
| 85 | +deploymentLabels: |
| 86 | + app: hypertrace-collector |
| 87 | + |
| 88 | +podLabels: |
| 89 | + app: hypertrace-collector |
| 90 | + |
| 91 | +podAnnotations: {} |
| 92 | + |
| 93 | +# The Deployment Selector match labels are different from the pod labels. Note that they should be a subset of the pod |
| 94 | +# labels. You append new labels to them but cannot remove labels. If you remove or modify the labels you will need to |
| 95 | +# delete the existing deployment bearing the same name and then redeploy. This is the reason why they are separated from |
| 96 | +# the pod labels. You can add and remove pod labels without having an effect on the deployment. |
| 97 | +# Also, please use "apiVersion: apps/v1" instead of the deprecated "apiVersion: extensions/v1beta1" for the deployment |
| 98 | +# apiVersion in the yaml file. |
| 99 | +deploymentSelectorMatchLabels: |
| 100 | + app: hypertrace-collector |
| 101 | + |
| 102 | +serviceSelectorLabels: |
| 103 | + app: hypertrace-collector |
| 104 | + |
| 105 | +# Volumes and Volume mounts |
| 106 | +volumeMounts: |
| 107 | + - name: hypertrace-collector-config-vol |
| 108 | + mountPath: /conf |
| 109 | + |
| 110 | +volumes: |
| 111 | + - configMap: |
| 112 | + name: hypertrace-collector-config |
| 113 | + items: |
| 114 | + - key: hypertrace-collector-config |
| 115 | + path: hypertrace-collector-config.yaml |
| 116 | + name: hypertrace-collector-config-vol |
| 117 | + |
| 118 | +nodeSelector: {} |
| 119 | + |
| 120 | +########### |
| 121 | +# Config Maps |
| 122 | +########### |
| 123 | +configMap: |
| 124 | + name: hypertrace-collector-config |
| 125 | + data: |
| 126 | + extensions: |
| 127 | + health_check: |
| 128 | + pprof: |
| 129 | + endpoint: 0.0.0.0:1777 |
| 130 | + zpages: |
| 131 | + endpoint: 0.0.0.0:55679 |
| 132 | + |
| 133 | + receivers: |
| 134 | + otlp: |
| 135 | + protocols: |
| 136 | + grpc: |
| 137 | + http: |
| 138 | + opencensus: |
| 139 | + zipkin: |
| 140 | + jaeger: |
| 141 | + protocols: |
| 142 | + grpc: |
| 143 | + thrift_http: |
| 144 | + # Collect own metrics |
| 145 | + prometheus: |
| 146 | + config: |
| 147 | + scrape_configs: |
| 148 | + - job_name: "otel-collector" |
| 149 | + scrape_interval: 10s |
| 150 | + static_configs: |
| 151 | + - targets: ["0.0.0.0:8888"] |
| 152 | + |
| 153 | + processors: |
| 154 | + batch: |
| 155 | + |
| 156 | + exporters: |
| 157 | + kafka: |
| 158 | + protocol_version: 2.0.0 |
| 159 | + brokers: |
| 160 | + - bootstrap:9092 |
| 161 | + topic: jaeger-spans |
| 162 | + encoding: jaeger_proto |
| 163 | + |
| 164 | + service: |
| 165 | + extensions: [health_check, pprof, zpages] |
| 166 | + pipelines: |
| 167 | + traces: |
| 168 | + receivers: [otlp, opencensus, jaeger, zipkin] |
| 169 | + processors: [batch] |
| 170 | + exporters: [kafka] |
| 171 | + |
| 172 | +kafka-topic-creator: |
| 173 | + jobName: jaeger-spans-kafka-topic-creator |
| 174 | + helmHook: pre-install,pre-upgrade |
| 175 | + kafka: |
| 176 | + topics: |
| 177 | + - name: jaeger-spans |
| 178 | + replicationFactor: 1 |
| 179 | + partitions: 8 |
| 180 | + configs: |
| 181 | + - retention.bytes=4294967296 |
| 182 | + - retention.ms=259200000 |
| 183 | + zookeeper: |
| 184 | + address: zookeeper:2181 |
| 185 | + imagePullSecrets: [] |
| 186 | + podAnnotations: |
| 187 | + sidecar.istio.io/inject: "false" |
0 commit comments