diff --git a/config/config.md b/config/config.md
index b600ec0de689..aff39e8289f5 100644
--- a/config/config.md
+++ b/config/config.md
@@ -200,15 +200,7 @@
| `slow_query.record_type` | String | Unset | The record type of slow queries. It can be `system_table` or `log`. |
| `slow_query.threshold` | String | Unset | The threshold of slow query. |
| `slow_query.sample_ratio` | Float | Unset | The sampling ratio of slow query log. The value should be in the range of (0, 1]. |
-| `export_metrics` | -- | -- | The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.self_import` | -- | -- | For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
You must create the database before enabling it. |
-| `export_metrics.self_import.db` | String | Unset | -- |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
-| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
+| `tracing` | -- | -- | Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.
When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
is set to "prof:true,prof_active:false". The official image adds this env variable.
Default is true. |
@@ -320,13 +312,7 @@
| `slow_query.threshold` | String | `30s` | The threshold of slow query. It can be human readable time string, for example: `10s`, `100ms`, `1s`. |
| `slow_query.sample_ratio` | Float | `1.0` | The sampling ratio of slow query log. The value should be in the range of (0, 1]. For example, `0.1` means 10% of the slow queries will be logged and `1.0` means all slow queries will be logged. |
| `slow_query.ttl` | String | `90d` | The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`. |
-| `export_metrics` | -- | -- | The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
-| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
+| `tracing` | -- | -- | Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.
When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
is set to "prof:true,prof_active:false". The official image adds this env variable.
Default is true. |
@@ -416,13 +402,7 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.
Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
-| `export_metrics` | -- | -- | The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
-| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
+| `tracing` | -- | -- | Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.
When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
is set to "prof:true,prof_active:false". The official image adds this env variable.
Default is true. |
@@ -587,13 +567,7 @@
| `logging.otlp_headers` | -- | -- | Additional OTLP headers, only valid when using OTLP http |
| `logging.tracing_sample_ratio` | -- | Unset | The percentage of tracing will be sampled and exported.
Valid range `[0, 1]`, 1 means all traces are sampled, 0 means all traces are not sampled, the default value is 1.
ratio > 1 are treated as 1. Fractions < 0 are treated as 0 |
| `logging.tracing_sample_ratio.default_ratio` | Float | `1.0` | -- |
-| `export_metrics` | -- | -- | The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape. |
-| `export_metrics.enable` | Bool | `false` | whether enable export metrics. |
-| `export_metrics.write_interval` | String | `30s` | The interval of export metrics. |
-| `export_metrics.remote_write` | -- | -- | -- |
-| `export_metrics.remote_write.url` | String | `""` | The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`. |
-| `export_metrics.remote_write.headers` | InlineTable | -- | HTTP headers of Prometheus remote-write carry. |
-| `tracing` | -- | -- | The tracing options. Only effect when compiled with `tokio-console` feature. |
+| `tracing` | -- | -- | Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
The tracing options. Only effect when compiled with `tokio-console` feature. |
| `tracing.tokio_console_addr` | String | Unset | The tokio console address. |
| `memory` | -- | -- | The memory options. |
| `memory.enable_heap_profiling` | Bool | `true` | Whether to enable heap profiling activation during startup.
When enabled, heap profiling will be activated if the `MALLOC_CONF` environment variable
is set to "prof:true,prof_active:false". The official image adds this env variable.
Default is true. |
diff --git a/config/datanode.example.toml b/config/datanode.example.toml
index 7e04748059d2..2372798fd388 100644
--- a/config/datanode.example.toml
+++ b/config/datanode.example.toml
@@ -672,20 +672,7 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
-## The datanode can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
+## Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
diff --git a/config/frontend.example.toml b/config/frontend.example.toml
index 933e82e43188..3b376f0c40f0 100644
--- a/config/frontend.example.toml
+++ b/config/frontend.example.toml
@@ -305,20 +305,7 @@ sample_ratio = 1.0
## The TTL of the `slow_queries` system table. Default is `90d` when `record_type` is `system_table`.
ttl = "90d"
-## The frontend can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
+## Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
diff --git a/config/metasrv.example.toml b/config/metasrv.example.toml
index cf40ddc0b1d8..cf3b0f3fa86e 100644
--- a/config/metasrv.example.toml
+++ b/config/metasrv.example.toml
@@ -325,20 +325,7 @@ otlp_export_protocol = "http"
[logging.tracing_sample_ratio]
default_ratio = 1.0
-## The metasrv can export its metrics and send to Prometheus compatible service (e.g. `greptimedb` itself) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
+## Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
diff --git a/config/standalone.example.toml b/config/standalone.example.toml
index 22dd8105a97f..2963ff306777 100644
--- a/config/standalone.example.toml
+++ b/config/standalone.example.toml
@@ -768,26 +768,7 @@ default_ratio = 1.0
## @toml2docs:none-default
#+ sample_ratio = 1.0
-## The standalone can export its metrics and send to Prometheus compatible service (e.g. `greptimedb`) from remote-write API.
-## This is only used for `greptimedb` to export its own metrics internally. It's different from prometheus scrape.
-[export_metrics]
-## whether enable export metrics.
-enable = false
-## The interval of export metrics.
-write_interval = "30s"
-
-## For `standalone` mode, `self_import` is recommended to collect metrics generated by itself
-## You must create the database before enabling it.
-[export_metrics.self_import]
-## @toml2docs:none-default
-db = "greptime_metrics"
-
-[export_metrics.remote_write]
-## The prometheus remote write endpoint that the metrics send to. The url example can be: `http://127.0.0.1:4000/v1/prometheus/write?db=greptime_metrics`.
-url = ""
-
-## HTTP headers of Prometheus remote-write carry.
-headers = { }
+## Exporting internal metrics via remote-write has been removed. Please use external Prometheus/OTel/VMA to scrape metrics.
## The tracing options. Only effect when compiled with `tokio-console` feature.
#+ [tracing]
diff --git a/src/catalog/src/system_schema/information_schema.rs b/src/catalog/src/system_schema/information_schema.rs
index 00313a78e650..811b5055a7c8 100644
--- a/src/catalog/src/system_schema/information_schema.rs
+++ b/src/catalog/src/system_schema/information_schema.rs
@@ -22,7 +22,6 @@ mod procedure_info;
pub mod process_list;
pub mod region_peers;
mod region_statistics;
-mod runtime_metrics;
pub mod schemata;
mod table_constraints;
mod table_names;
@@ -64,7 +63,6 @@ use crate::system_schema::information_schema::information_memory_table::get_sche
use crate::system_schema::information_schema::key_column_usage::InformationSchemaKeyColumnUsage;
use crate::system_schema::information_schema::partitions::InformationSchemaPartitions;
use crate::system_schema::information_schema::region_peers::InformationSchemaRegionPeers;
-use crate::system_schema::information_schema::runtime_metrics::InformationSchemaMetrics;
use crate::system_schema::information_schema::schemata::InformationSchemaSchemata;
use crate::system_schema::information_schema::table_constraints::InformationSchemaTableConstraints;
use crate::system_schema::information_schema::tables::InformationSchemaTables;
@@ -214,7 +212,6 @@ impl SystemSchemaProviderInner for InformationSchemaProvider {
self.catalog_name.clone(),
self.catalog_manager.clone(),
)) as _),
- RUNTIME_METRICS => Some(Arc::new(InformationSchemaMetrics::new())),
PARTITIONS => Some(Arc::new(InformationSchemaPartitions::new(
self.catalog_name.clone(),
self.catalog_manager.clone(),
@@ -300,10 +297,6 @@ impl InformationSchemaProvider {
// authentication details, and other critical information.
// Only put these tables under `greptime` catalog to prevent info leak.
if self.catalog_name == DEFAULT_CATALOG_NAME {
- tables.insert(
- RUNTIME_METRICS.to_string(),
- self.build_table(RUNTIME_METRICS).unwrap(),
- );
tables.insert(
BUILD_INFO.to_string(),
self.build_table(BUILD_INFO).unwrap(),
diff --git a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs b/src/catalog/src/system_schema/information_schema/runtime_metrics.rs
deleted file mode 100644
index 5ccb871321f0..000000000000
--- a/src/catalog/src/system_schema/information_schema/runtime_metrics.rs
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::sync::Arc;
-
-use arrow_schema::SchemaRef as ArrowSchemaRef;
-use common_catalog::consts::INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID;
-use common_error::ext::BoxedError;
-use common_recordbatch::adapter::RecordBatchStreamAdapter;
-use common_recordbatch::{RecordBatch, SendableRecordBatchStream};
-use common_time::util::current_time_millis;
-use datafusion::execution::TaskContext;
-use datafusion::physical_plan::SendableRecordBatchStream as DfSendableRecordBatchStream;
-use datafusion::physical_plan::stream::RecordBatchStreamAdapter as DfRecordBatchStreamAdapter;
-use datafusion::physical_plan::streaming::PartitionStream as DfPartitionStream;
-use datatypes::prelude::{ConcreteDataType, MutableVector};
-use datatypes::scalars::ScalarVectorBuilder;
-use datatypes::schema::{ColumnSchema, Schema, SchemaRef};
-use datatypes::vectors::{
- ConstantVector, Float64VectorBuilder, StringVectorBuilder, TimestampMillisecondVector,
- VectorRef,
-};
-use itertools::Itertools;
-use snafu::ResultExt;
-use store_api::storage::{ScanRequest, TableId};
-
-use crate::error::{CreateRecordBatchSnafu, InternalSnafu, Result};
-use crate::system_schema::information_schema::{InformationTable, RUNTIME_METRICS};
-
-#[derive(Debug)]
-pub(super) struct InformationSchemaMetrics {
- schema: SchemaRef,
-}
-
-const METRIC_NAME: &str = "metric_name";
-const METRIC_VALUE: &str = "value";
-const METRIC_LABELS: &str = "labels";
-const PEER_ADDR: &str = "peer_addr";
-const PEER_TYPE: &str = "peer_type";
-const TIMESTAMP: &str = "timestamp";
-
-/// The `information_schema.runtime_metrics` virtual table.
-/// It provides the GreptimeDB runtime metrics for the users by SQL.
-impl InformationSchemaMetrics {
- pub(super) fn new() -> Self {
- Self {
- schema: Self::schema(),
- }
- }
-
- fn schema() -> SchemaRef {
- Arc::new(Schema::new(vec![
- ColumnSchema::new(METRIC_NAME, ConcreteDataType::string_datatype(), false),
- ColumnSchema::new(METRIC_VALUE, ConcreteDataType::float64_datatype(), false),
- ColumnSchema::new(METRIC_LABELS, ConcreteDataType::string_datatype(), true),
- ColumnSchema::new(PEER_ADDR, ConcreteDataType::string_datatype(), true),
- ColumnSchema::new(PEER_TYPE, ConcreteDataType::string_datatype(), false),
- ColumnSchema::new(
- TIMESTAMP,
- ConcreteDataType::timestamp_millisecond_datatype(),
- false,
- ),
- ]))
- }
-
- fn builder(&self) -> InformationSchemaMetricsBuilder {
- InformationSchemaMetricsBuilder::new(self.schema.clone())
- }
-}
-
-impl InformationTable for InformationSchemaMetrics {
- fn table_id(&self) -> TableId {
- INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID
- }
-
- fn table_name(&self) -> &'static str {
- RUNTIME_METRICS
- }
-
- fn schema(&self) -> SchemaRef {
- self.schema.clone()
- }
-
- fn to_stream(&self, request: ScanRequest) -> Result {
- let schema = self.schema.arrow_schema().clone();
- let mut builder = self.builder();
- let stream = Box::pin(DfRecordBatchStreamAdapter::new(
- schema,
- futures::stream::once(async move {
- builder
- .make_metrics(Some(request))
- .await
- .map(|x| x.into_df_record_batch())
- .map_err(Into::into)
- }),
- ));
-
- Ok(Box::pin(
- RecordBatchStreamAdapter::try_new(stream)
- .map_err(BoxedError::new)
- .context(InternalSnafu)?,
- ))
- }
-}
-
-struct InformationSchemaMetricsBuilder {
- schema: SchemaRef,
-
- metric_names: StringVectorBuilder,
- metric_values: Float64VectorBuilder,
- metric_labels: StringVectorBuilder,
- peer_addrs: StringVectorBuilder,
- peer_types: StringVectorBuilder,
-}
-
-impl InformationSchemaMetricsBuilder {
- fn new(schema: SchemaRef) -> Self {
- Self {
- schema,
- metric_names: StringVectorBuilder::with_capacity(42),
- metric_values: Float64VectorBuilder::with_capacity(42),
- metric_labels: StringVectorBuilder::with_capacity(42),
- peer_addrs: StringVectorBuilder::with_capacity(42),
- peer_types: StringVectorBuilder::with_capacity(42),
- }
- }
-
- fn add_metric(
- &mut self,
- metric_name: &str,
- labels: String,
- metric_value: f64,
- peer: Option<&str>,
- peer_type: &str,
- ) {
- self.metric_names.push(Some(metric_name));
- self.metric_values.push(Some(metric_value));
- self.metric_labels.push(Some(&labels));
- self.peer_addrs.push(peer);
- self.peer_types.push(Some(peer_type));
- }
-
- async fn make_metrics(&mut self, _request: Option) -> Result {
- let metric_families = prometheus::gather();
-
- let write_request =
- common_telemetry::metric::convert_metric_to_write_request(metric_families, None, 0);
-
- for ts in write_request.timeseries {
- //Safety: always has `__name__` label
- let metric_name = ts
- .labels
- .iter()
- .find_map(|label| {
- if label.name == "__name__" {
- Some(label.value.clone())
- } else {
- None
- }
- })
- .unwrap();
-
- self.add_metric(
- &metric_name,
- ts.labels
- .into_iter()
- .filter_map(|label| {
- if label.name == "__name__" {
- None
- } else {
- Some(format!("{}={}", label.name, label.value))
- }
- })
- .join(", "),
- // Safety: always has a sample
- ts.samples[0].value,
- // The peer column is always `None` for standalone
- None,
- "STANDALONE",
- );
- }
-
- // FIXME(dennis): fetching other peers metrics
- self.finish()
- }
-
- fn finish(&mut self) -> Result {
- let rows_num = self.metric_names.len();
-
- let timestamps = Arc::new(ConstantVector::new(
- Arc::new(TimestampMillisecondVector::from_slice([
- current_time_millis(),
- ])),
- rows_num,
- ));
-
- let columns: Vec = vec![
- Arc::new(self.metric_names.finish()),
- Arc::new(self.metric_values.finish()),
- Arc::new(self.metric_labels.finish()),
- Arc::new(self.peer_addrs.finish()),
- Arc::new(self.peer_types.finish()),
- timestamps,
- ];
-
- RecordBatch::new(self.schema.clone(), columns).context(CreateRecordBatchSnafu)
- }
-}
-
-impl DfPartitionStream for InformationSchemaMetrics {
- fn schema(&self) -> &ArrowSchemaRef {
- self.schema.arrow_schema()
- }
-
- fn execute(&self, _: Arc) -> DfSendableRecordBatchStream {
- let schema = self.schema.arrow_schema().clone();
- let mut builder = self.builder();
- Box::pin(DfRecordBatchStreamAdapter::new(
- schema,
- futures::stream::once(async move {
- builder
- .make_metrics(None)
- .await
- .map(|x| x.into_df_record_batch())
- .map_err(Into::into)
- }),
- ))
- }
-}
-
-#[cfg(test)]
-mod tests {
- use common_recordbatch::RecordBatches;
-
- use super::*;
-
- #[tokio::test]
- async fn test_make_metrics() {
- let metrics = InformationSchemaMetrics::new();
-
- let stream = metrics.to_stream(ScanRequest::default()).unwrap();
-
- let batches = RecordBatches::try_collect(stream).await.unwrap();
-
- let result_literal = batches.pretty_print().unwrap();
-
- assert!(result_literal.contains(METRIC_NAME));
- assert!(result_literal.contains(METRIC_VALUE));
- assert!(result_literal.contains(METRIC_LABELS));
- assert!(result_literal.contains(PEER_ADDR));
- assert!(result_literal.contains(PEER_TYPE));
- assert!(result_literal.contains(TIMESTAMP));
- }
-}
diff --git a/src/catalog/src/system_schema/information_schema/table_names.rs b/src/catalog/src/system_schema/information_schema/table_names.rs
index 95e8fe74f44c..fefec9221b06 100644
--- a/src/catalog/src/system_schema/information_schema/table_names.rs
+++ b/src/catalog/src/system_schema/information_schema/table_names.rs
@@ -38,7 +38,6 @@ pub const TABLE_PRIVILEGES: &str = "table_privileges";
pub const TRIGGERS: &str = "triggers";
pub const GLOBAL_STATUS: &str = "global_status";
pub const SESSION_STATUS: &str = "session_status";
-pub const RUNTIME_METRICS: &str = "runtime_metrics";
pub const PARTITIONS: &str = "partitions";
pub const REGION_PEERS: &str = "region_peers";
pub const TABLE_CONSTRAINTS: &str = "table_constraints";
diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs
index ed9dda22d30b..d9271b43d742 100644
--- a/src/cmd/src/error.rs
+++ b/src/cmd/src/error.rs
@@ -99,13 +99,6 @@ pub enum Error {
source: flow::Error,
},
- #[snafu(display("Servers error"))]
- Servers {
- #[snafu(implicit)]
- location: Location,
- source: servers::error::Error,
- },
-
#[snafu(display("Failed to start frontend"))]
StartFrontend {
#[snafu(implicit)]
@@ -315,7 +308,6 @@ impl ErrorExt for Error {
Error::ShutdownFrontend { source, .. } => source.status_code(),
Error::StartMetaServer { source, .. } => source.status_code(),
Error::ShutdownMetaServer { source, .. } => source.status_code(),
- Error::Servers { source, .. } => source.status_code(),
Error::BuildMetaServer { source, .. } => source.status_code(),
Error::UnsupportedSelectorType { source, .. } => source.status_code(),
Error::BuildCli { source, .. } => source.status_code(),
diff --git a/src/cmd/src/frontend.rs b/src/cmd/src/frontend.rs
index d147fc648ee6..e5cd894cc2a3 100644
--- a/src/cmd/src/frontend.rs
+++ b/src/cmd/src/frontend.rs
@@ -40,7 +40,6 @@ use frontend::instance::builder::FrontendBuilder;
use frontend::server::Services;
use meta_client::{MetaClientOptions, MetaClientType};
use servers::addrs;
-use servers::export_metrics::ExportMetricsTask;
use servers::grpc::GrpcOptions;
use servers::tls::{TlsMode, TlsOption};
use snafu::{OptionExt, ResultExt};
@@ -445,9 +444,6 @@ impl StartCommand {
.context(error::StartFrontendSnafu)?;
let instance = Arc::new(instance);
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::ServersSnafu)?;
-
let servers = Services::new(opts, instance.clone(), plugins)
.build()
.context(error::StartFrontendSnafu)?;
@@ -456,7 +452,6 @@ impl StartCommand {
instance,
servers,
heartbeat_task,
- export_metrics_task,
};
Ok(Instance::new(frontend, guard))
diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs
index 9ee7d6b72891..f99bd7b7f7f2 100644
--- a/src/cmd/src/standalone.rs
+++ b/src/cmd/src/standalone.rs
@@ -77,7 +77,6 @@ use meta_srv::metasrv::{FLOW_ID_SEQ, TABLE_ID_SEQ};
use mito2::config::MitoConfig;
use query::options::QueryOptions;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -155,7 +154,6 @@ pub struct StandaloneOptions {
pub user_provider: Option,
/// Options for different store engines.
pub region_engine: Vec,
- pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub init_regions_in_background: bool,
pub init_regions_parallelism: usize,
@@ -184,7 +182,6 @@ impl Default for StandaloneOptions {
procedure: ProcedureConfig::default(),
flow: FlowConfig::default(),
logging: LoggingOptions::default(),
- export_metrics: ExportMetricsOption::default(),
user_provider: None,
region_engine: vec![
RegionEngineConfig::Mito(MitoConfig::default()),
@@ -233,8 +230,6 @@ impl StandaloneOptions {
meta_client: None,
logging: cloned_opts.logging,
user_provider: cloned_opts.user_provider,
- // Handle the export metrics task run by standalone to frontend for execution
- export_metrics: cloned_opts.export_metrics,
max_in_flight_write_bytes: cloned_opts.max_in_flight_write_bytes,
slow_query: cloned_opts.slow_query,
..Default::default()
@@ -697,9 +692,6 @@ impl StartCommand {
.context(StartFlownodeSnafu)?;
flow_streaming_engine.set_frontend_invoker(invoker).await;
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::ServersSnafu)?;
-
let servers = Services::new(opts, fe_instance.clone(), plugins.clone())
.build()
.context(error::StartFrontendSnafu)?;
@@ -708,7 +700,6 @@ impl StartCommand {
instance: fe_instance,
servers,
heartbeat_task: None,
- export_metrics_task,
};
#[cfg(feature = "enterprise")]
diff --git a/src/cmd/tests/load_config_test.rs b/src/cmd/tests/load_config_test.rs
index da2dfd456c4c..29e9774d7b42 100644
--- a/src/cmd/tests/load_config_test.rs
+++ b/src/cmd/tests/load_config_test.rs
@@ -31,7 +31,6 @@ use meta_srv::selector::SelectorType;
use metric_engine::config::EngineConfig as MetricEngineConfig;
use mito2::config::MitoConfig;
use query::options::QueryOptions;
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::{TlsMode, TlsOption};
@@ -88,11 +87,7 @@ fn test_load_datanode_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
+
grpc: GrpcOptions::default()
.with_bind_addr("127.0.0.1:3001")
.with_server_addr("127.0.0.1:3001"),
@@ -138,11 +133,7 @@ fn test_load_frontend_example_config() {
..Default::default()
},
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
+
grpc: GrpcOptions::default()
.with_bind_addr("127.0.0.1:4001")
.with_server_addr("127.0.0.1:4001"),
@@ -187,11 +178,7 @@ fn test_load_metasrv_example_config() {
tcp_nodelay: true,
},
},
- export_metrics: ExportMetricsOption {
- self_import: None,
- remote_write: Some(Default::default()),
- ..Default::default()
- },
+
backend_tls: Some(TlsOption {
mode: TlsMode::Prefer,
cert_path: String::new(),
@@ -300,11 +287,7 @@ fn test_load_standalone_example_config() {
tracing_sample_ratio: Some(Default::default()),
..Default::default()
},
- export_metrics: ExportMetricsOption {
- self_import: Some(Default::default()),
- remote_write: Some(Default::default()),
- ..Default::default()
- },
+
http: HttpOptions {
cors_allowed_origins: vec!["https://example.com".to_string()],
..Default::default()
diff --git a/src/common/catalog/src/consts.rs b/src/common/catalog/src/consts.rs
index ebaccf076c33..ee184e5df1d3 100644
--- a/src/common/catalog/src/consts.rs
+++ b/src/common/catalog/src/consts.rs
@@ -84,8 +84,6 @@ pub const INFORMATION_SCHEMA_TRIGGERS_TABLE_ID: u32 = 24;
pub const INFORMATION_SCHEMA_GLOBAL_STATUS_TABLE_ID: u32 = 25;
/// id for information_schema.SESSION_STATUS
pub const INFORMATION_SCHEMA_SESSION_STATUS_TABLE_ID: u32 = 26;
-/// id for information_schema.RUNTIME_METRICS
-pub const INFORMATION_SCHEMA_RUNTIME_METRICS_TABLE_ID: u32 = 27;
/// id for information_schema.PARTITIONS
pub const INFORMATION_SCHEMA_PARTITIONS_TABLE_ID: u32 = 28;
/// id for information_schema.REGION_PEERS
diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs
index 9829c1a98215..9fcc9acfe12d 100644
--- a/src/datanode/src/config.rs
+++ b/src/datanode/src/config.rs
@@ -28,7 +28,6 @@ use mito2::config::MitoConfig;
pub(crate) use object_store::config::ObjectStoreConfig;
use query::options::QueryOptions;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
@@ -83,7 +82,6 @@ pub struct DatanodeOptions {
pub region_engine: Vec,
pub logging: LoggingOptions,
pub enable_telemetry: bool,
- pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub query: QueryOptions,
pub memory: MemoryOptions,
@@ -130,7 +128,6 @@ impl Default for DatanodeOptions {
logging: LoggingOptions::default(),
heartbeat: HeartbeatOptions::datanode_default(),
enable_telemetry: true,
- export_metrics: ExportMetricsOption::default(),
tracing: TracingOptions::default(),
query: QueryOptions::default(),
memory: MemoryOptions::default(),
diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs
index 5c9e86848f2e..b61ccbc7e79d 100644
--- a/src/datanode/src/datanode.rs
+++ b/src/datanode/src/datanode.rs
@@ -45,7 +45,6 @@ use object_store::manager::{ObjectStoreManager, ObjectStoreManagerRef};
use object_store::util::normalize_dir;
use query::QueryEngineFactory;
use query::dummy_catalog::{DummyCatalogManager, TableProviderFactoryRef};
-use servers::export_metrics::ExportMetricsTask;
use servers::server::ServerHandlers;
use snafu::{OptionExt, ResultExt, ensure};
use store_api::path_utils::WAL_DIR;
@@ -81,7 +80,6 @@ pub struct Datanode {
greptimedb_telemetry_task: Arc,
leases_notifier: Option>,
plugins: Plugins,
- export_metrics_task: Option,
}
impl Datanode {
@@ -93,10 +91,6 @@ impl Datanode {
self.start_telemetry();
- if let Some(t) = self.export_metrics_task.as_ref() {
- t.start(None).context(StartServerSnafu)?
- }
-
self.services.start_all().await.context(StartServerSnafu)
}
@@ -309,10 +303,6 @@ impl DatanodeBuilder {
None
};
- let export_metrics_task =
- ExportMetricsTask::try_new(&self.opts.export_metrics, Some(&self.plugins))
- .context(StartServerSnafu)?;
-
Ok(Datanode {
services: ServerHandlers::default(),
heartbeat_task,
@@ -321,7 +311,6 @@ impl DatanodeBuilder {
region_event_receiver,
leases_notifier,
plugins: self.plugins.clone(),
- export_metrics_task,
})
}
diff --git a/src/frontend/src/frontend.rs b/src/frontend/src/frontend.rs
index bf2e7a055851..4f6a657c89c2 100644
--- a/src/frontend/src/frontend.rs
+++ b/src/frontend/src/frontend.rs
@@ -23,7 +23,6 @@ use common_telemetry::logging::{LoggingOptions, SlowQueryOptions, TracingOptions
use meta_client::MetaClientOptions;
use query::options::QueryOptions;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::{ExportMetricsOption, ExportMetricsTask};
use servers::grpc::GrpcOptions;
use servers::heartbeat_options::HeartbeatOptions;
use servers::http::HttpOptions;
@@ -34,7 +33,6 @@ use crate::error;
use crate::error::Result;
use crate::heartbeat::HeartbeatTask;
use crate::instance::Instance;
-use crate::instance::prom_store::ExportMetricHandler;
use crate::service_config::{
InfluxdbOptions, JaegerOptions, MysqlOptions, OpentsdbOptions, OtlpOptions, PostgresOptions,
PromStoreOptions,
@@ -62,7 +60,6 @@ pub struct FrontendOptions {
pub logging: LoggingOptions,
pub datanode: DatanodeClientOptions,
pub user_provider: Option,
- pub export_metrics: ExportMetricsOption,
pub tracing: TracingOptions,
pub query: QueryOptions,
pub max_in_flight_write_bytes: Option,
@@ -92,7 +89,6 @@ impl Default for FrontendOptions {
logging: LoggingOptions::default(),
datanode: DatanodeClientOptions::default(),
user_provider: None,
- export_metrics: ExportMetricsOption::default(),
tracing: TracingOptions::default(),
query: QueryOptions::default(),
max_in_flight_write_bytes: None,
@@ -115,7 +111,6 @@ pub struct Frontend {
pub instance: Arc,
pub servers: ServerHandlers,
pub heartbeat_task: Option,
- pub export_metrics_task: Option,
}
impl Frontend {
@@ -124,17 +119,6 @@ impl Frontend {
t.start().await?;
}
- if let Some(t) = self.export_metrics_task.as_ref() {
- if t.send_by_handler {
- let inserter = self.instance.inserter().clone();
- let statement_executor = self.instance.statement_executor().clone();
- let handler = ExportMetricHandler::new_handler(inserter, statement_executor);
- t.start(Some(handler)).context(error::StartServerSnafu)?
- } else {
- t.start(None).context(error::StartServerSnafu)?;
- }
- }
-
self.servers
.start_all()
.await
diff --git a/src/meta-srv/src/bootstrap.rs b/src/meta-srv/src/bootstrap.rs
index e349a0f61de5..ef57c0adbe45 100644
--- a/src/meta-srv/src/bootstrap.rs
+++ b/src/meta-srv/src/bootstrap.rs
@@ -43,7 +43,6 @@ use deadpool_postgres::{Config, Runtime};
use either::Either;
use etcd_client::{Client, ConnectOptions};
use servers::configurator::ConfiguratorRef;
-use servers::export_metrics::ExportMetricsTask;
use servers::http::{HttpServer, HttpServerBuilder};
use servers::metrics_handler::MetricsHandler;
use servers::server::Server;
@@ -95,8 +94,6 @@ pub struct MetasrvInstance {
plugins: Plugins,
- export_metrics_task: Option,
-
/// gRPC serving state receiver. Only present if the gRPC server is started.
serve_state: Arc>>>>,
@@ -120,15 +117,12 @@ impl MetasrvInstance {
// put metasrv into plugins for later use
plugins.insert::>(metasrv.clone());
- let export_metrics_task = ExportMetricsTask::try_new(&opts.export_metrics, Some(&plugins))
- .context(error::InitExportMetricsTaskSnafu)?;
Ok(MetasrvInstance {
metasrv,
http_server: Either::Left(Some(builder)),
opts,
signal_sender: None,
plugins,
- export_metrics_task,
serve_state: Default::default(),
bind_addr: None,
})
@@ -156,10 +150,6 @@ impl MetasrvInstance {
self.metasrv.try_start().await?;
- if let Some(t) = self.export_metrics_task.as_ref() {
- t.start(None).context(error::InitExportMetricsTaskSnafu)?
- }
-
let (tx, rx) = mpsc::channel::<()>(1);
self.signal_sender = Some(tx);
diff --git a/src/meta-srv/src/error.rs b/src/meta-srv/src/error.rs
index f35497b60d76..b964427c01fb 100644
--- a/src/meta-srv/src/error.rs
+++ b/src/meta-srv/src/error.rs
@@ -304,13 +304,6 @@ pub enum Error {
source: servers::error::Error,
},
- #[snafu(display("Failed to init export metrics task"))]
- InitExportMetricsTask {
- #[snafu(implicit)]
- location: Location,
- source: servers::error::Error,
- },
-
#[snafu(display("Failed to parse address {}", addr))]
ParseAddr {
addr: String,
@@ -1053,7 +1046,6 @@ impl ErrorExt for Error {
| Error::ParseAddr { .. }
| Error::UnsupportedSelectorType { .. }
| Error::InvalidArguments { .. }
- | Error::InitExportMetricsTask { .. }
| Error::ProcedureNotFound { .. }
| Error::TooManyPartitions { .. }
| Error::TomlFormat { .. }
diff --git a/src/meta-srv/src/metasrv.rs b/src/meta-srv/src/metasrv.rs
index 82780ea721a5..5aa97efcc953 100644
--- a/src/meta-srv/src/metasrv.rs
+++ b/src/meta-srv/src/metasrv.rs
@@ -51,7 +51,6 @@ use common_telemetry::logging::{LoggingOptions, TracingOptions};
use common_telemetry::{error, info, warn};
use common_wal::config::MetasrvWalConfig;
use serde::{Deserialize, Serialize};
-use servers::export_metrics::ExportMetricsOption;
use servers::grpc::GrpcOptions;
use servers::http::HttpOptions;
use servers::tls::TlsOption;
@@ -168,8 +167,6 @@ pub struct MetasrvOptions {
pub data_home: String,
/// The WAL options.
pub wal: MetasrvWalConfig,
- /// The metrics export options.
- pub export_metrics: ExportMetricsOption,
/// The store key prefix. If it is not empty, all keys in the store will be prefixed with it.
/// This is useful when multiple metasrv clusters share the same store.
pub store_key_prefix: String,
@@ -233,7 +230,6 @@ impl fmt::Debug for MetasrvOptions {
.field("enable_telemetry", &self.enable_telemetry)
.field("data_home", &self.data_home)
.field("wal", &self.wal)
- .field("export_metrics", &self.export_metrics)
.field("store_key_prefix", &self.store_key_prefix)
.field("max_txn_ops", &self.max_txn_ops)
.field("flush_stats_factor", &self.flush_stats_factor)
@@ -291,7 +287,6 @@ impl Default for MetasrvOptions {
enable_telemetry: true,
data_home: DEFAULT_DATA_HOME.to_string(),
wal: MetasrvWalConfig::default(),
- export_metrics: ExportMetricsOption::default(),
store_key_prefix: String::new(),
max_txn_ops: 128,
flush_stats_factor: 3,
diff --git a/src/servers/src/error.rs b/src/servers/src/error.rs
index d36bdd1494f1..78f4f1d256f0 100644
--- a/src/servers/src/error.rs
+++ b/src/servers/src/error.rs
@@ -256,21 +256,6 @@ pub enum Error {
error: std::io::Error,
},
- #[snafu(display("Failed to send prometheus remote request"))]
- SendPromRemoteRequest {
- #[snafu(implicit)]
- location: Location,
- #[snafu(source)]
- error: reqwest::Error,
- },
-
- #[snafu(display("Invalid export metrics config, msg: {}", msg))]
- InvalidExportMetricsConfig {
- msg: String,
- #[snafu(implicit)]
- location: Location,
- },
-
#[snafu(display("Failed to compress prometheus remote request"))]
CompressPromRemoteRequest {
#[snafu(implicit)]
@@ -648,7 +633,6 @@ impl ErrorExt for Error {
| StartHttp { .. }
| StartGrpc { .. }
| TcpBind { .. }
- | SendPromRemoteRequest { .. }
| BuildHttpResponse { .. }
| Arrow { .. }
| FileWatch { .. } => StatusCode::Internal,
@@ -685,7 +669,6 @@ impl ErrorExt for Error {
| DecompressSnappyPromRemoteRequest { .. }
| DecompressZstdPromRemoteRequest { .. }
| InvalidPromRemoteRequest { .. }
- | InvalidExportMetricsConfig { .. }
| InvalidFlightTicket { .. }
| InvalidPrepareStatement { .. }
| DataFrame { .. }
diff --git a/src/servers/src/export_metrics.rs b/src/servers/src/export_metrics.rs
deleted file mode 100644
index aac7e8dda497..000000000000
--- a/src/servers/src/export_metrics.rs
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright 2023 Greptime Team
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use std::collections::HashMap;
-use std::sync::Arc;
-use std::time::Duration;
-
-use common_base::Plugins;
-use common_telemetry::metric::{MetricFilter, convert_metric_to_write_request};
-use common_telemetry::{error, info};
-use common_time::Timestamp;
-use prost::Message;
-use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
-use serde::{Deserialize, Serialize};
-use session::context::QueryContextBuilder;
-use snafu::{ResultExt, ensure};
-use tokio::time::{self, Interval};
-
-use crate::error::{InvalidExportMetricsConfigSnafu, Result, SendPromRemoteRequestSnafu};
-use crate::prom_store::{snappy_compress, to_grpc_row_insert_requests};
-use crate::query_handler::PromStoreProtocolHandlerRef;
-
-/// Use to export the metrics generated by greptimedb.
-///
-/// Encoded to Prometheus [RemoteWrite format](https://prometheus.io/docs/concepts/remote_write_spec/),
-/// and send to Prometheus remote-write compatible receiver (e.g. send to `greptimedb` itself)
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
-#[serde(default)]
-pub struct ExportMetricsOption {
- pub enable: bool,
- #[serde(with = "humantime_serde")]
- pub write_interval: Duration,
- pub self_import: Option,
- pub remote_write: Option,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Default)]
-#[serde(default)]
-pub struct RemoteWriteOption {
- pub url: String,
- pub headers: HashMap,
-}
-
-#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
-#[serde(default)]
-pub struct SelfImportOption {
- pub db: String,
-}
-
-impl Default for SelfImportOption {
- fn default() -> Self {
- Self {
- db: "greptime_metrics".to_string(),
- }
- }
-}
-
-impl Default for ExportMetricsOption {
- fn default() -> Self {
- Self {
- enable: false,
- write_interval: Duration::from_secs(30),
- self_import: None,
- remote_write: None,
- }
- }
-}
-
-#[derive(Default, Clone)]
-pub struct ExportMetricsTask {
- config: ExportMetricsOption,
- filter: Option,
- headers: HeaderMap,
- pub send_by_handler: bool,
-}
-
-impl ExportMetricsTask {
- pub fn try_new(
- config: &ExportMetricsOption,
- plugins: Option<&Plugins>,
- ) -> Result