@@ -67,7 +67,7 @@ timescaledb-single:
67
67
enabled : true
68
68
image :
69
69
repository : quay.io/prometheuscommunity/postgres-exporter
70
- tag : v0.11 .1
70
+ tag : v0.17 .1
71
71
args :
72
72
# Disabling collecting database size statistics as this can be expensive
73
73
# and some of this data is also provided via node_exporter.
@@ -82,7 +82,7 @@ promscale:
82
82
enabled : true
83
83
image :
84
84
repository : timescale/promscale
85
- tag : 0.16 .0
85
+ tag : 0.17 .0
86
86
pullPolicy : IfNotPresent
87
87
# to pass extra args
88
88
extraArgs :
@@ -120,7 +120,7 @@ kube-prometheus-stack:
120
120
image :
121
121
registry : quay.io
122
122
repository : prometheus/alertmanager
123
- tag : v0.25.0
123
+ tag : v0.28.1
124
124
replicas : 3
125
125
# # AlertManager resource requests
126
126
resources :
@@ -134,15 +134,15 @@ kube-prometheus-stack:
134
134
image :
135
135
registry : quay.io
136
136
repository : prometheus-operator/prometheus-operator
137
- tag : v0.62 .0
137
+ tag : v0.81 .0
138
138
pullPolicy : IfNotPresent
139
139
# # Prometheus config reloader configuration
140
140
prometheusConfigReloader :
141
141
# image to use for config and rule reloading
142
142
image :
143
143
registry : quay.io
144
144
repository : prometheus-operator/prometheus-config-reloader
145
- tag : v0.62 .0
145
+ tag : v0.81 .0
146
146
# resource config for prometheusConfigReloader
147
147
resources :
148
148
requests :
@@ -164,7 +164,7 @@ kube-prometheus-stack:
164
164
image :
165
165
registry : quay.io
166
166
repository : prometheus/prometheus
167
- tag : v2.41.0
167
+ tag : v2.55.1
168
168
scrapeInterval : " 1m"
169
169
scrapeTimeout : " 10s"
170
170
evaluationInterval : " 1m"
@@ -273,7 +273,7 @@ kube-prometheus-stack:
273
273
# TODO(paulfantom): remove with kube-prometheus bump
274
274
image :
275
275
repository : grafana/grafana
276
- tag : 9.3.2
276
+ tag : 9.5.21
277
277
pullPolicy : IfNotPresent
278
278
resources :
279
279
limits :
@@ -346,7 +346,7 @@ kube-prometheus-stack:
346
346
kube-state-metrics :
347
347
image :
348
348
repository : registry.k8s.io/kube-state-metrics/kube-state-metrics
349
- tag : v2.7 .0
349
+ tag : v2.15 .0
350
350
pullPolicy : IfNotPresent
351
351
# By default kube-state-metrics are scraped using
352
352
# serviceMonitor disable annotation based scraping
@@ -361,7 +361,7 @@ kube-prometheus-stack:
361
361
prometheus-node-exporter :
362
362
image :
363
363
repository : quay.io/prometheus/node-exporter
364
- tag : v1.5 .0
364
+ tag : v1.9 .0
365
365
pullPolicy : IfNotPresent
366
366
# By default node-exporter are scraped using
367
367
# serviceMonitor disable annotation based scraping
@@ -385,7 +385,7 @@ opentelemetry-operator:
385
385
manager :
386
386
image :
387
387
repository : ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator
388
- tag : v0.67 .0
388
+ tag : v0.120 .0
389
389
resources :
390
390
limits :
391
391
cpu : 50m
@@ -399,8 +399,8 @@ opentelemetry-operator:
399
399
enabled : true
400
400
instrumentation :
401
401
pythonImage : ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.32b0
402
- javaImage : ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.21.0
403
- nodejsImage : ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34 .0
402
+ javaImage : ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.33.6
403
+ nodejsImage : ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.56 .0
404
404
collector :
405
405
# The default otel collector that will be deployed by helm once
406
406
# the otel operator is in running state
0 commit comments