From 6e19fe198bedf6a9595a1278400a527bdf8930c2 Mon Sep 17 00:00:00 2001 From: Christopher Kolstad Date: Wed, 5 Feb 2025 16:03:27 +0100 Subject: [PATCH] More work for dealing with prometheus data --- server/src/cli.rs | 6 + server/src/client_api.rs | 30 ++++- server/src/http/instance_data.rs | 47 +++++++ server/src/http/mod.rs | 3 +- server/src/http/unleash_client.rs | 83 ++++++++++-- server/src/internal_backstage.rs | 34 ++++- server/src/main.rs | 20 ++- server/src/metrics/edge_metrics.rs | 207 +++++++++++++++++++++++++++++ server/src/metrics/mod.rs | 1 + server/src/prom_metrics.rs | 10 ++ server/src/urls.rs | 12 +- 11 files changed, 426 insertions(+), 27 deletions(-) create mode 100644 server/src/http/instance_data.rs create mode 100644 server/src/metrics/edge_metrics.rs diff --git a/server/src/cli.rs b/server/src/cli.rs index 649a63b0..6552801b 100644 --- a/server/src/cli.rs +++ b/server/src/cli.rs @@ -312,6 +312,12 @@ pub struct InternalBackstageArgs { /// Used to show tokens used to refresh feature caches, but also tokens already validated/invalidated against upstream #[clap(long, env, global = true)] pub disable_tokens_endpoint: bool, + + /// Disables /internal-backstage/instancedata endpoint + /// + /// Used to show instance data for the edge instance. + #[clap(long, env, global = true)] + pub disable_instance_data_endpoint: bool, } #[derive(Args, Debug, Clone)] diff --git a/server/src/client_api.rs b/server/src/client_api.rs index 8b331110..1bc488f7 100644 --- a/server/src/client_api.rs +++ b/server/src/client_api.rs @@ -1,3 +1,5 @@ +use std::sync::RwLock; + use crate::cli::{EdgeArgs, EdgeMode}; use crate::error::EdgeError; use crate::feature_cache::FeatureCache; @@ -7,14 +9,16 @@ use crate::filters::{ use crate::http::broadcaster::Broadcaster; use crate::http::refresher::feature_refresher::FeatureRefresher; use crate::metrics::client_metrics::MetricsCache; +use crate::metrics::edge_metrics::EdgeInstanceData; use crate::tokens::cache_key; use crate::types::{ self, BatchMetricsRequestBody, EdgeJsonResult, EdgeResult, EdgeToken, FeatureFilters, }; -use actix_web::web::{self, Data, Json, Query}; +use actix_web::web::{self, post, Data, Json, Query}; use actix_web::Responder; use actix_web::{get, post, HttpRequest, HttpResponse}; use dashmap::DashMap; +use tracing::{info, instrument}; use unleash_types::client_features::{ClientFeature, ClientFeatures}; use unleash_types::client_metrics::{ClientApplication, ClientMetrics, ConnectVia}; @@ -272,6 +276,27 @@ pub async fn post_bulk_metrics( ); Ok(HttpResponse::Accepted().finish()) } + +#[utoipa::path(context_path = "/api/client", responses((status = 202, description = "Accepted Instance data"), (status = 403, description = "Was not allowed to post instance data")), request_body = EdgeInstanceData, security( +("Authorization" = []) +) +)] +#[post("/metrics/edge")] +#[instrument(skip(_edge_token, instance_data, connected_instances))] +pub async fn post_edge_instance_data( + _edge_token: EdgeToken, + instance_data: Json, + connected_instances: Data>>, +) -> EdgeResult { + tracing::info!("Accepted {instance_data:?}"); + connected_instances + .write() + .unwrap() + .push(instance_data.into_inner()); + info!("Adding to {connected_instances:?}"); + Ok(HttpResponse::Accepted().finish()) +} + pub fn configure_client_api(cfg: &mut web::ServiceConfig) { let client_scope = web::scope("/client") .wrap(crate::middleware::as_async_middleware::as_async_middleware( @@ -282,7 +307,8 @@ pub fn configure_client_api(cfg: &mut web::ServiceConfig) { .service(register) .service(metrics) .service(post_bulk_metrics) - .service(stream_features); + .service(stream_features) + .service(post_edge_instance_data); cfg.service(client_scope); } diff --git a/server/src/http/instance_data.rs b/server/src/http/instance_data.rs new file mode 100644 index 00000000..f6591ecc --- /dev/null +++ b/server/src/http/instance_data.rs @@ -0,0 +1,47 @@ +use std::sync::{Arc, RwLock}; + +use prometheus::Registry; +use tracing::{debug, info, trace}; + +use crate::metrics::edge_metrics::EdgeInstanceData; + +use super::refresher::feature_refresher::FeatureRefresher; + +pub async fn send_instance_data( + feature_refresher: Arc, + prometheus_registry: &Registry, + our_instance_data: Arc, + downstream_instance_data: Arc>>, +) { + loop { + trace!("Looping instance data sending"); + let mut observed_data = our_instance_data.observe(prometheus_registry); + { + let downstream_instance_data = downstream_instance_data.read().unwrap().clone(); + for downstream in downstream_instance_data { + observed_data = observed_data.add_downstream(downstream); + } + } + { + downstream_instance_data.write().unwrap().clear(); + } + let status = feature_refresher + .unleash_client + .send_instance_data( + observed_data, + &feature_refresher + .tokens_to_refresh + .iter() + .next() + .map(|t| t.value().clone()) + .map(|t| t.token.token.clone()) + .expect("No token to refresh, cowardly panic'ing"), + ) + .await; + match status { + Ok(_) => info!("Posted instance data"), + Err(_) => info!("Failed to post instance data"), + } + tokio::time::sleep(std::time::Duration::from_secs(15)).await; + } +} diff --git a/server/src/http/mod.rs b/server/src/http/mod.rs index c223d055..d6f5e8c0 100644 --- a/server/src/http/mod.rs +++ b/server/src/http/mod.rs @@ -2,5 +2,6 @@ pub mod background_send_metrics; pub mod broadcaster; pub(crate) mod headers; -pub mod unleash_client; +pub mod instance_data; pub mod refresher; +pub mod unleash_client; diff --git a/server/src/http/unleash_client.rs b/server/src/http/unleash_client.rs index e1829e34..e7379d6a 100644 --- a/server/src/http/unleash_client.rs +++ b/server/src/http/unleash_client.rs @@ -24,6 +24,7 @@ use crate::http::headers::{ UNLEASH_APPNAME_HEADER, UNLEASH_CLIENT_SPEC_HEADER, UNLEASH_INSTANCE_ID_HEADER, }; use crate::metrics::client_metrics::MetricsBatch; +use crate::metrics::edge_metrics::EdgeInstanceData; use crate::tls::build_upstream_certificate; use crate::types::{ ClientFeaturesDeltaResponse, ClientFeaturesResponse, EdgeResult, EdgeToken, @@ -55,6 +56,20 @@ lazy_static! { vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 5000.0] ) .unwrap(); + pub static ref METRICS_UPLOAD: HistogramVec = register_histogram_vec!( + "client_metrics_upload", + "Timings for uploading client metrics in milliseconds", + &["status_code"], + vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0] + ) + .unwrap(); + pub static ref INSTANCE_DATA_UPLOAD: HistogramVec = register_histogram_vec!( + "instance_data_upload", + "Timings for uploading Edge instance data in milliseconds", + &["status_code"], + vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0] + ) + .unwrap(); pub static ref CLIENT_FEATURE_FETCH_FAILURES: IntGaugeVec = register_int_gauge_vec!( Opts::new( "client_feature_fetch_failures", @@ -243,7 +258,6 @@ impl UnleashClient { #[cfg(test)] pub fn new_insecure(server_url: &str) -> Result { - Ok(Self { urls: UnleashUrls::from_str(server_url)?, backing_client: new_reqwest_client( @@ -518,6 +532,7 @@ impl UnleashClient { token: &str, ) -> EdgeResult<()> { trace!("Sending metrics to bulk endpoint"); + let started_at = Utc::now(); let result = self .backing_client .post(self.urls.client_bulk_metrics_url.to_string()) @@ -529,6 +544,48 @@ impl UnleashClient { info!("Failed to send metrics to /api/client/metrics/bulk endpoint {e:?}"); EdgeError::EdgeMetricsError })?; + let ended = Utc::now(); + METRICS_UPLOAD + .with_label_values(&[result.status().as_str()]) + .observe(ended.signed_duration_since(started_at).num_milliseconds() as f64); + if result.status().is_success() { + Ok(()) + } else { + match result.status() { + StatusCode::BAD_REQUEST => Err(EdgeMetricsRequestError( + result.status(), + result.json().await.ok(), + )), + _ => Err(EdgeMetricsRequestError(result.status(), None)), + } + } + } + + pub async fn send_instance_data( + &self, + instance_data: EdgeInstanceData, + token: &str, + ) -> EdgeResult<()> { + let started_at = Utc::now(); + let result = self + .backing_client + .post(self.urls.edge_instance_data_url.to_string()) + .headers(self.header_map(Some(token.into()))) + .json(&instance_data) + .send() + .await + .map_err(|e| { + info!("Failed to send instance data: {e:?}"); + EdgeError::EdgeMetricsError + })?; + let ended_at = Utc::now(); + INSTANCE_DATA_UPLOAD + .with_label_values(&[result.status().as_str()]) + .observe( + ended_at + .signed_duration_since(started_at) + .num_milliseconds() as f64, + ); if result.status().is_success() { Ok(()) } else { @@ -609,17 +666,6 @@ mod tests { use std::path::PathBuf; use std::str::FromStr; - use actix_http::{body::MessageBody, HttpService, TlsAcceptorConfig}; - use actix_http_test::{test_server, TestServer}; - use actix_middleware_etag::Etag; - use actix_service::map_config; - use actix_web::{ - dev::{AppConfig, ServiceRequest, ServiceResponse}, - http::header::EntityTag, - web, App, HttpResponse, - }; - use chrono::Duration; - use unleash_types::client_features::{ClientFeature, ClientFeatures}; use crate::cli::ClientIdentity; use crate::http::unleash_client::new_reqwest_client; use crate::{ @@ -631,8 +677,19 @@ mod tests { ValidateTokensRequest, }, }; + use actix_http::{body::MessageBody, HttpService, TlsAcceptorConfig}; + use actix_http_test::{test_server, TestServer}; + use actix_middleware_etag::Etag; + use actix_service::map_config; + use actix_web::{ + dev::{AppConfig, ServiceRequest, ServiceResponse}, + http::header::EntityTag, + web, App, HttpResponse, + }; + use chrono::Duration; + use unleash_types::client_features::{ClientFeature, ClientFeatures}; - use super::{EdgeTokens, UnleashClient, ClientMetaInformation}; + use super::{ClientMetaInformation, EdgeTokens, UnleashClient}; impl ClientFeaturesRequest { pub(crate) fn new(api_key: String, etag: Option) -> Self { diff --git a/server/src/internal_backstage.rs b/server/src/internal_backstage.rs index ed55f703..84bfcd64 100644 --- a/server/src/internal_backstage.rs +++ b/server/src/internal_backstage.rs @@ -10,13 +10,15 @@ use serde::{Deserialize, Serialize}; use unleash_types::client_features::ClientFeatures; use unleash_types::client_metrics::ClientApplication; -use crate::http::refresher::feature_refresher::FeatureRefresher; use crate::metrics::actix_web_metrics::PrometheusMetricsHandler; use crate::metrics::client_metrics::MetricsCache; use crate::types::{BuildInfo, EdgeJsonResult, EdgeToken, TokenInfo, TokenRefresh}; use crate::types::{ClientMetric, MetricsInfo, Status}; use crate::{auth::token_validator::TokenValidator, cli::InternalBackstageArgs}; use crate::{error::EdgeError, feature_cache::FeatureCache}; +use crate::{ + http::refresher::feature_refresher::FeatureRefresher, metrics::edge_metrics::EdgeInstanceData, +}; #[derive(Debug, Serialize, Deserialize)] pub struct EdgeStatus { @@ -146,24 +148,44 @@ pub async fn features( Ok(Json(features)) } +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DebugEdgeInstanceData { + pub this_instance: EdgeInstanceData, + pub connected_instances: Vec, +} + +#[get("/instancedata")] +pub async fn instance_data( + this_instance: web::Data, + downstream_instance_data: web::Data>>, +) -> EdgeJsonResult { + Ok(Json(DebugEdgeInstanceData { + this_instance: this_instance.get_ref().clone(), + connected_instances: downstream_instance_data.read().unwrap().clone(), + })) +} + pub fn configure_internal_backstage( cfg: &mut web::ServiceConfig, metrics_handler: PrometheusMetricsHandler, - internal_backtage_args: InternalBackstageArgs, + internal_backstage_args: InternalBackstageArgs, ) { cfg.service(health).service(info).service(ready); - if !internal_backtage_args.disable_tokens_endpoint { + if !internal_backstage_args.disable_tokens_endpoint { cfg.service(tokens); } - if !internal_backtage_args.disable_metrics_endpoint { + if !internal_backstage_args.disable_metrics_endpoint { cfg.service(web::resource("/metrics").route(web::get().to(metrics_handler))); } - if !internal_backtage_args.disable_metrics_batch_endpoint { + if !internal_backstage_args.disable_metrics_batch_endpoint { cfg.service(metrics_batch); } - if !internal_backtage_args.disable_features_endpoint { + if !internal_backstage_args.disable_features_endpoint { cfg.service(features); } + if !internal_backstage_args.disable_instance_data_endpoint { + cfg.service(instance_data); + } } #[cfg(test)] diff --git a/server/src/main.rs b/server/src/main.rs index 31a90161..8d355938 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -29,11 +29,12 @@ use unleash_edge::{internal_backstage, tls}; #[cfg(not(tarpaulin_include))] #[actix_web::main] async fn main() -> Result<(), anyhow::Error> { + use std::sync::RwLock; + use unleash_edge::{ http::{broadcaster::Broadcaster, unleash_client::ClientMetaInformation}, - metrics::metrics_pusher, + metrics::{edge_metrics::EdgeInstanceData, metrics_pusher}, }; - let args = CliArgs::parse(); let disable_all_endpoint = args.disable_all_endpoint; if args.markdown_help { @@ -62,6 +63,8 @@ async fn main() -> Result<(), anyhow::Error> { instance_id: args.clone().instance_id, }; let app_name = args.app_name.clone(); + let our_instance_data_for_app_context = Arc::new(EdgeInstanceData::new(&app_name)); + let our_instance_data = our_instance_data_for_app_context.clone(); let instance_id = args.instance_id.clone(); let custom_headers = match args.mode { cli::EdgeMode::Edge(ref edge) => edge.custom_client_headers.clone(), @@ -89,6 +92,9 @@ async fn main() -> Result<(), anyhow::Error> { let openapi = openapi::ApiDoc::openapi(); let refresher_for_app_data = feature_refresher.clone(); let prom_registry_for_write = metrics_handler.registry.clone(); + let instances_observed_for_app_context: Arc>> = + Arc::new(RwLock::new(Vec::new())); + let downstream_instance_data = instances_observed_for_app_context.clone(); let broadcaster = Broadcaster::new(features_cache.clone()); @@ -111,7 +117,9 @@ async fn main() -> Result<(), anyhow::Error> { .app_data(web::Data::from(token_cache.clone())) .app_data(web::Data::from(features_cache.clone())) .app_data(web::Data::from(engine_cache.clone())) - .app_data(web::Data::from(broadcaster.clone())); + .app_data(web::Data::from(broadcaster.clone())) + .app_data(web::Data::from(our_instance_data_for_app_context.clone())) + .app_data(web::Data::from(instances_observed_for_app_context.clone())); app = match token_validator.clone() { Some(v) => app.app_data(web::Data::from(v)), @@ -173,6 +181,7 @@ async fn main() -> Result<(), anyhow::Error> { let custom_headers = custom_headers.clone(); tokio::spawn(async move { let _ = refresher_for_background + .clone() .start_streaming_features_background_task( ClientMetaInformation { app_name, @@ -209,9 +218,12 @@ async fn main() -> Result<(), anyhow::Error> { _ = validator.schedule_revalidation_of_startup_tokens(edge.tokens, lazy_feature_refresher) => { tracing::info!("Token validator validation of startup tokens was unexpectedly shut down"); } - _ = metrics_pusher::prometheus_remote_write(prom_registry_for_write, edge.prometheus_remote_write_url, edge.prometheus_push_interval, edge.prometheus_username, edge.prometheus_password, app_name) => { + _ = metrics_pusher::prometheus_remote_write(prom_registry_for_write.clone(), edge.prometheus_remote_write_url, edge.prometheus_push_interval, edge.prometheus_username, edge.prometheus_password, app_name) => { tracing::info!("Prometheus push unexpectedly shut down"); } + _ = unleash_edge::http::instance_data::send_instance_data(refresher.clone(), &prom_registry_for_write, our_instance_data.clone(), downstream_instance_data.clone()) => { + tracing::info!("Instance data pusher unexpectedly quit"); + } } } cli::EdgeMode::Offline(offline_args) if offline_args.reload_interval > 0 => { diff --git a/server/src/metrics/edge_metrics.rs b/server/src/metrics/edge_metrics.rs new file mode 100644 index 00000000..5a86aac6 --- /dev/null +++ b/server/src/metrics/edge_metrics.rs @@ -0,0 +1,207 @@ +use ahash::{HashMap, HashSet}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use tracing::debug; +use ulid::Ulid; +use utoipa::ToSchema; + +use crate::types::BuildInfo; + +#[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct LatencyMetrics { + pub avg: f64, + pub count: f64, +} + +impl LatencyMetrics { + pub fn new() -> Self { + Self { + avg: 0.0, + count: 0.0, + } + } +} + +#[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct ProcessMetrics { + pub cpu_usage: f64, + pub memory_usage: f64, +} + +#[derive(Debug, Default, Clone, Deserialize, Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct InstanceTraffic { + pub get: HashMap, + pub post: HashMap, +} + +#[derive(Debug, Default, Clone, Deserialize, Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct UpstreamLatency { + pub features: LatencyMetrics, + pub metrics: LatencyMetrics, + pub edge: LatencyMetrics, +} + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EdgeInstanceData { + pub identifier: String, + pub app_name: String, + pub region: Option, + pub edge_version: String, + pub process_metrics: Option, + pub started: DateTime, + pub traffic: InstanceTraffic, + pub latency_upstream: UpstreamLatency, + pub connected_streaming_clients: u64, + pub connected_edges: Vec, +} + +impl EdgeInstanceData { + pub fn new(app_name: &str) -> Self { + let build_info = BuildInfo::default(); + Self { + identifier: Ulid::new().to_string(), + app_name: app_name.to_string(), + region: std::env::var("AWS_REGION").ok(), + edge_version: build_info.package_version.clone(), + process_metrics: None, + started: Utc::now(), + traffic: InstanceTraffic::default(), + latency_upstream: UpstreamLatency::default(), + connected_edges: Vec::new(), + connected_streaming_clients: 0, + } + } + + pub fn add_downstream(&mut self, downstream_edge: EdgeInstanceData) -> Self { + self.connected_edges.push(downstream_edge); + self.clone() + } + + pub fn observe(&self, registry: &prometheus::Registry) -> Self { + let mut observed = self.clone(); + let mut cpu_seconds = 0; + let mut resident_memory = 0; + let mut desired_urls = HashSet::default(); + desired_urls.insert("/api/client/features"); + desired_urls.insert("/api/client/metrics"); + desired_urls.insert("/api/client/metrics/bulk"); + desired_urls.insert("/api/client/metrics/edge"); + desired_urls.insert("/api/frontend"); + let mut get_requests = HashMap::default(); + let mut post_requests = HashMap::default(); + + for family in registry.gather().iter() { + match family.get_name() { + "http_server_duration_milliseconds" => { + family + .get_metric() + .iter() + .filter(|m| { + m.has_histogram() + && m.get_label().iter().any(|l| { + l.get_name() == "url_path" + && desired_urls.contains(l.get_value()) + }) + && m.get_label().iter().any(|l| { + l.get_name() == "http_response_status_code" + && l.get_value() == "200" + }) + }) + .for_each(|m| { + let labels = m.get_label(); + + let path = labels + .iter() + .find(|l| l.get_name() == "url_path") + .unwrap() + .get_value(); + let method = labels + .iter() + .find(|l| l.get_name() == "http_request_method") + .unwrap() + .get_value(); + + let latency = if method == "GET" { + get_requests + .entry(path.to_string()) + .or_insert(LatencyMetrics::new()) + } else { + post_requests + .entry(path.to_string()) + .or_insert(LatencyMetrics::new()) + }; + let total = m.get_histogram().get_sample_sum(); + let count = m.get_histogram().get_sample_count() as f64; + *latency = LatencyMetrics { + avg: total / count, + count, + }; + }); + } + "process_cpu_seconds_total" => { + if let Some(cpu_second_metric) = family.get_metric().last() { + cpu_seconds = cpu_second_metric.get_counter().get_value() as u64; + } + } + "process_resident_memory_bytes" => { + if let Some(resident_memory_metric) = family.get_metric().last() { + resident_memory = resident_memory_metric.get_gauge().get_value() as u64; + } + } + "client_metrics_upload" => { + if let Some(metrics_upload_metric) = family.get_metric().last() { + let count = metrics_upload_metric.get_histogram().get_sample_count(); + observed.latency_upstream.metrics = LatencyMetrics { + avg: metrics_upload_metric.get_histogram().get_sample_sum() + / count as f64, + count: count as f64, + } + } + } + "instance_data_upload" => { + if let Some(instance_data_upload_metric) = family.get_metric().last() { + let count = instance_data_upload_metric + .get_histogram() + .get_sample_count(); + observed.latency_upstream.edge = LatencyMetrics { + avg: instance_data_upload_metric.get_histogram().get_sample_sum() + / count as f64, + count: count as f64, + } + } + } + "client_feature_fetch" => { + if let Some(feature_fetch_metric) = family.get_metric().last() { + let count = feature_fetch_metric.get_histogram().get_sample_count(); + observed.latency_upstream.features = LatencyMetrics { + avg: feature_fetch_metric.get_histogram().get_sample_sum() + / count as f64, + count: count as f64, + } + } + } + "connected_streaming_clients" => { + if let Some(connected_streaming_clients) = family.get_metric().last() { + observed.connected_streaming_clients = + connected_streaming_clients.get_gauge().get_value() as u64; + } + } + _ => {} + } + } + observed.traffic = InstanceTraffic { + get: get_requests, + post: post_requests, + }; + observed.process_metrics = Some(ProcessMetrics { + cpu_usage: cpu_seconds as f64, + memory_usage: resident_memory as f64, + }); + observed + } +} diff --git a/server/src/metrics/mod.rs b/server/src/metrics/mod.rs index 887fb965..e3759dfd 100644 --- a/server/src/metrics/mod.rs +++ b/server/src/metrics/mod.rs @@ -6,6 +6,7 @@ use tracing::trace; pub mod actix_web_metrics; pub mod client_metrics; +pub mod edge_metrics; pub mod metrics_pusher; pub mod route_formatter; diff --git a/server/src/prom_metrics.rs b/server/src/prom_metrics.rs index 028357e1..d1c25533 100644 --- a/server/src/prom_metrics.rs +++ b/server/src/prom_metrics.rs @@ -148,6 +148,16 @@ fn register_custom_metrics(registry: &prometheus::Registry) { crate::http::broadcaster::CONNECTED_STREAMING_CLIENTS.clone(), )) .unwrap(); + registry + .register(Box::new( + crate::http::unleash_client::METRICS_UPLOAD.clone(), + )) + .unwrap(); + registry + .register(Box::new( + crate::http::unleash_client::INSTANCE_DATA_UPLOAD.clone(), + )) + .unwrap(); } #[cfg(test)] diff --git a/server/src/urls.rs b/server/src/urls.rs index 12336c4c..56a2e8d1 100644 --- a/server/src/urls.rs +++ b/server/src/urls.rs @@ -17,6 +17,7 @@ pub struct UnleashUrls { pub edge_api_url: Url, pub edge_validate_url: Url, pub edge_metrics_url: Url, + pub edge_instance_data_url: Url, pub new_api_token_url: Url, pub client_features_stream_url: Url, } @@ -100,6 +101,11 @@ impl UnleashUrls { .path_segments_mut() .expect("Could not create /api/client/metrics/bulk") .push("bulk"); + let mut edge_instance_data_url = client_metrics_url.clone(); + edge_instance_data_url + .path_segments_mut() + .expect("Could not create /api/client/metrics/instance-data") + .push("edge"); UnleashUrls { base_url, api_url, @@ -114,6 +120,7 @@ impl UnleashUrls { edge_metrics_url, new_api_token_url, client_features_stream_url, + edge_instance_data_url, } } } @@ -138,6 +145,9 @@ mod tests { assert_eq!(urls.api_url.to_string(), api_url); assert_eq!(urls.client_api_url.to_string(), client_url); assert_eq!(urls.client_features_url.to_string(), client_features_url); - assert_eq!(urls.client_features_delta_url.to_string(), client_features_delta_url); + assert_eq!( + urls.client_features_delta_url.to_string(), + client_features_delta_url + ); } }