diff --git a/Cargo.lock b/Cargo.lock index 93efb0ba1..9d69b6927 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,6 +2039,26 @@ version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +[[package]] +name = "deepsize" +version = "0.2.0" +source = "git+https://github.com/chirino/deepsize?branch=main#c6a982656a57cf32efa345372c2955f1b8f68f92" +dependencies = [ + "cpe", + "deepsize_derive", + "petgraph 0.7.1", +] + +[[package]] +name = "deepsize_derive" +version = "0.1.2" +source = "git+https://github.com/chirino/deepsize?branch=main#c6a982656a57cf32efa345372c2955f1b8f68f92" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "deflate64" version = "0.1.9" @@ -2780,6 +2800,19 @@ dependencies = [ "url", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -3388,7 +3421,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -4157,6 +4190,19 @@ version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4373,6 +4419,25 @@ dependencies = [ "tokio", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot 0.12.3", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", +] + [[package]] name = "multer" version = "3.1.0" @@ -6517,6 +6582,12 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -7677,6 +7748,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -8248,6 +8325,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -8306,6 +8384,7 @@ dependencies = [ "chrono", "clap", "cpe", + "deepsize", "hex", "human-date-parser", "itertools 0.13.0", @@ -8361,6 +8440,7 @@ dependencies = [ "anyhow", "async-graphql", "cpe", + "deepsize", "log", "rstest", "schemars", @@ -8444,15 +8524,18 @@ dependencies = [ "bytes", "bytesize", "chrono", + "clap", "cpe", "criterion", "csaf", + "deepsize", "fixedbitset 0.5.7", "hex", "humantime", "itertools 0.13.0", "jsonpath-rust", "log", + "moka", "packageurl", "parking_lot 0.12.3", "petgraph 0.7.1", @@ -9450,6 +9533,16 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -9459,6 +9552,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 5b32b9b1b..11e4e7c93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,12 +54,14 @@ bytes = "1.5" bytesize = "1.3" chrono = { version = "0.4.35", default-features = false } clap = "4" +moka = "0.12.10" concat-idents = "1" cpe = "0.1.5" criterion = "0.5.1" csaf = { version = "0.5.0", default-features = false } csaf-walker = { version = "0.10.0", default-features = false } cve = "0.3.1" +deepsize = "0.2.0" env_logger = "0.11.0" fixedbitset = "0.5.7" futures = "0.3.30" @@ -207,3 +209,6 @@ langchain-rust = { git = "https://github.com/chirino/langchain-rust", branch = " # to pickup fix: https://github.com/eikendev/sectxt/issues/21 sectxtlib = { git = "https://github.com/ctron/sectxt", branch = "feature/fix_deps_1" } + +# to pickup feat: https://github.com/Aeledfyr/deepsize/pull/41 +deepsize = { git = "https://github.com/chirino/deepsize", branch = "main" } diff --git a/common/Cargo.toml b/common/Cargo.toml index a12ff4a8d..70aa3226e 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -18,6 +18,7 @@ bytesize = { workspace = true, features = ["serde"] } chrono = { workspace = true } clap = { workspace = true, features = ["derive", "env"] } cpe = { workspace = true } +deepsize = { workspace = true } hex = { workspace = true } human-date-parser = { workspace = true } itertools = { workspace = true } diff --git a/common/src/cpe.rs b/common/src/cpe.rs index 73259e205..444b87c25 100644 --- a/common/src/cpe.rs +++ b/common/src/cpe.rs @@ -2,6 +2,7 @@ use cpe::{ cpe::Cpe as _, uri::{OwnedUri, Uri}, }; +use deepsize::DeepSizeOf; use serde::{ de::{Error, Visitor}, Deserialize, Deserializer, Serialize, Serializer, @@ -17,7 +18,7 @@ use utoipa::{ }; use uuid::Uuid; -#[derive(Clone, Hash, Eq, PartialEq)] +#[derive(Clone, Hash, Eq, PartialEq, DeepSizeOf)] pub struct Cpe { uri: OwnedUri, } diff --git a/common/src/purl.rs b/common/src/purl.rs index d5ca52e08..2017739ff 100644 --- a/common/src/purl.rs +++ b/common/src/purl.rs @@ -1,3 +1,4 @@ +use deepsize::DeepSizeOf; use packageurl::PackageUrl; use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS}; use serde::{ @@ -25,7 +26,7 @@ pub enum PurlErr { Package(#[from] packageurl::Error), } -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash, DeepSizeOf)] pub struct Purl { pub ty: String, pub namespace: Option, diff --git a/docs/env-vars.md b/docs/env-vars.md index aba0056c7..3aae3a2c4 100644 --- a/docs/env-vars.md +++ b/docs/env-vars.md @@ -1,56 +1,57 @@ # ENVIRONMENT VARIABLES -| Environment Variable | Description | Default Value | -|----------------------|--------------------------------|---------------| -| `AUTHENTICATION_DISABLED` | Disable authentication | `false` | -| `AUTHENTICATOR_OIDC_CLIENT_IDS` | Set allowed client IDs (comma separated) | | -| `AUTHENTICATOR_OIDC_ISSUER_URL` | Issuer URL of the clients | | -| `AUTHENTICATOR_OIDC_REQUIRED_AUDIENCE` | Enforce an "audience" to be present in the access token | | -| `AUTHENTICATOR_OIDC_TLS_CA_CERTIFICATES` | Enable additional TLS certificates for communication with the SSO server | | -| `AUTHENTICATOR_OIDC_TLS_INSECURE` | Allow insecure TLS connections with the SSO server | | -| `AUTH_CONFIGURATION` | Location of the AuthNZ configuration file | | -| `AUTH_DISABLED` | Disable authentication and authorization | `false` | -| `CLIENT_TLS_CA_CERTIFICATES` | Additional certificates which will be added as trust anchors | | -| `CLIENT_TLS_INSECURE` | Make the TLS client insecure, disabling all validation | `false` | -| `HTTP_SERVER_BIND_ADDR` | Address to listen on | `::1` | -| `HTTP_SERVER_DISABLE_LOG` | Disable the request log | `false` | -| `HTTP_SERVER_JSON_LIMIT` | JSON request limit | `2 MiB` | -| `HTTP_SERVER_REQUEST_LIMIT` | Overall request limit | `256 KiB` | -| `HTTP_SERVER_TLS_CERTIFICATE_FILE` | Path to the TLS certificate in PEM format | | -| `HTTP_SERVER_TLS_ENABLED` | Enable TLS | `false` | -| `HTTP_SERVER_TLS_KEY_FILE` | Path to the TLS key file in PEM format | | -| `HTTP_SERVER_WORKERS` | Number of worker threads, defaults to zero, which falls back to the number of cores | `0` | -| `OIDC_PROVIDER_CLIENT_ID` | OIDC client ID used for retrieving access tokens | | -| `OIDC_PROVIDER_CLIENT_SECRET` | Secret matching the OIDC client ID | | -| `OIDC_PROVIDER_ISSUER_URL` | OIDC issuer to request access tokens from | | -| `OIDC_PROVIDER_REFRESH_BEFORE` | Duration an access token must still be valid before requesting a new one | `30s` | -| `OIDC_PROVIDER_TLS_INSECURE` | Insecure TLS when contacting the OIDC issuer | `false` | -| `OPENAI_API_KEY` | OpenAI access key | | -| `OPENAI_API_BASE` | To set the base URL path for API requests | `https://api.openapi.com/v1` | -| `OPENAI_MODEL` | OpenAI model | `gpt-4o` | -| `TRUSTD_DB_HOST` | Database address | `localhost` | -| `TRUSTD_DB_MAX_CONN` | Database max connections | `75` | -| `TRUSTD_DB_MIN_CONN` | Database min connections | `25` | -| `TRUSTD_DB_NAME` | Database name | `trustify` | -| `TRUSTD_DB_PASSWORD` | Database password | `trustify` | -| `TRUSTD_DB_PORT` | Database port | `5432` | -| `TRUSTD_DB_USER` | Database username | `postgres` | -| `TRUSTD_ISSUER_URL` | Issuer URL for `--devmode` | `http://localhost:8090/realms/trustify` | -| `TRUSTD_S3_ACCESS_KEY` | S3 access key | | -| `TRUSTD_S3_BUCKET` | S3 bucket name | | -| `TRUSTD_S3_REGION` | S3 region name | | -| `TRUSTD_S3_SECRET_KEY` | S3 secret key | | -| `TRUSTD_STORAGE_FS_PATH` | Path for storage file system strategy | `./.trustify/storage` | -| `TRUSTD_STORAGE_STRATEGY` | Specifies the storage strategy to use | `File system` | -| `TRUSTD_WITH_GRAPHQL` | Allows enabling the GraphQL endpoint | `false` | -| `UI_CLIENT_ID` | Client ID used by the UI | `frontend` | -| `UI_ISSUER_URL` | Issuer URL used by the UI | `http://localhost:8090/realms/trustify` | -| `UI_SCOPE` | Scopes to request | `openid` | +| Environment Variable | Description | Default Value | +|------------------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------| +| `AUTHENTICATION_DISABLED` | Disable authentication | `false` | +| `AUTHENTICATOR_OIDC_CLIENT_IDS` | Set allowed client IDs (comma separated) | | +| `AUTHENTICATOR_OIDC_ISSUER_URL` | Issuer URL of the clients | | +| `AUTHENTICATOR_OIDC_REQUIRED_AUDIENCE` | Enforce an "audience" to be present in the access token | | +| `AUTHENTICATOR_OIDC_TLS_CA_CERTIFICATES` | Enable additional TLS certificates for communication with the SSO server | | +| `AUTHENTICATOR_OIDC_TLS_INSECURE` | Allow insecure TLS connections with the SSO server | | +| `AUTH_CONFIGURATION` | Location of the AuthNZ configuration file | | +| `AUTH_DISABLED` | Disable authentication and authorization | `false` | +| `CLIENT_TLS_CA_CERTIFICATES` | Additional certificates which will be added as trust anchors | | +| `CLIENT_TLS_INSECURE` | Make the TLS client insecure, disabling all validation | `false` | +| `HTTP_SERVER_BIND_ADDR` | Address to listen on | `::1` | +| `HTTP_SERVER_DISABLE_LOG` | Disable the request log | `false` | +| `HTTP_SERVER_JSON_LIMIT` | JSON request limit | `2 MiB` | +| `HTTP_SERVER_REQUEST_LIMIT` | Overall request limit | `256 KiB` | +| `HTTP_SERVER_TLS_CERTIFICATE_FILE` | Path to the TLS certificate in PEM format | | +| `HTTP_SERVER_TLS_ENABLED` | Enable TLS | `false` | +| `HTTP_SERVER_TLS_KEY_FILE` | Path to the TLS key file in PEM format | | +| `HTTP_SERVER_WORKERS` | Number of worker threads, defaults to zero, which falls back to the number of cores | `0` | +| `OIDC_PROVIDER_CLIENT_ID` | OIDC client ID used for retrieving access tokens | | +| `OIDC_PROVIDER_CLIENT_SECRET` | Secret matching the OIDC client ID | | +| `OIDC_PROVIDER_ISSUER_URL` | OIDC issuer to request access tokens from | | +| `OIDC_PROVIDER_REFRESH_BEFORE` | Duration an access token must still be valid before requesting a new one | `30s` | +| `OIDC_PROVIDER_TLS_INSECURE` | Insecure TLS when contacting the OIDC issuer | `false` | +| `OPENAI_API_KEY` | OpenAI access key | | +| `OPENAI_API_BASE` | To set the base URL path for API requests | `https://api.openapi.com/v1` | +| `OPENAI_MODEL` | OpenAI model | `gpt-4o` | +| `TRUSTD_DB_HOST` | Database address | `localhost` | +| `TRUSTD_DB_MAX_CONN` | Database max connections | `75` | +| `TRUSTD_DB_MIN_CONN` | Database min connections | `25` | +| `TRUSTD_DB_NAME` | Database name | `trustify` | +| `TRUSTD_DB_PASSWORD` | Database password | `trustify` | +| `TRUSTD_DB_PORT` | Database port | `5432` | +| `TRUSTD_DB_USER` | Database username | `postgres` | +| `TRUSTD_ISSUER_URL` | Issuer URL for `--devmode` | `http://localhost:8090/realms/trustify` | +| `TRUSTD_MAX_CACHE_SIZE` | Maximum size of the graph cache. | `200 MiB` | +| `TRUSTD_S3_ACCESS_KEY` | S3 access key | | +| `TRUSTD_S3_BUCKET` | S3 bucket name | | +| `TRUSTD_S3_REGION` | S3 region name | | +| `TRUSTD_S3_SECRET_KEY` | S3 secret key | | +| `TRUSTD_STORAGE_FS_PATH` | Path for storage file system strategy | `./.trustify/storage` | +| `TRUSTD_STORAGE_STRATEGY` | Specifies the storage strategy to use | `File system` | +| `TRUSTD_WITH_GRAPHQL` | Allows enabling the GraphQL endpoint | `false` | +| `UI_CLIENT_ID` | Client ID used by the UI | `frontend` | +| `UI_ISSUER_URL` | Issuer URL used by the UI | `http://localhost:8090/realms/trustify` | +| `UI_SCOPE` | Scopes to request | `openid` | ## Development -| Environment Variable | Description | Default Value | -|----------------------|--------------------------------|---------------| -| `EXTERNAL_TEST_DB` | Run tests against external test database if set | | -| `EXTERNAL_TEST_DB_BOOTSTRAP` | Run tests against external test database if set | | -| `MEM_LIMIT_MB` | Set memory limit for tests that use TrustifyContext, shows the memory usage when the test reaches the limit | `500 MiB` | +| Environment Variable | Description | Default Value | +|------------------------------|-------------------------------------------------------------------------------------------------------------|---------------| +| `EXTERNAL_TEST_DB` | Run tests against external test database if set | | +| `EXTERNAL_TEST_DB_BOOTSTRAP` | Run tests against external test database if set | | +| `MEM_LIMIT_MB` | Set memory limit for tests that use TrustifyContext, shows the memory usage when the test reaches the limit | `500 MiB` | diff --git a/entity/Cargo.toml b/entity/Cargo.toml index 79f81d8ab..7b0d825ce 100644 --- a/entity/Cargo.toml +++ b/entity/Cargo.toml @@ -11,6 +11,7 @@ trustify-cvss = { workspace = true } async-graphql = { workspace = true, features = ["uuid", "time"] } cpe = { workspace = true } +deepsize = { workspace = true } schemars = { workspace = true } sea-orm = { workspace = true, features = [ "sqlx-postgres", diff --git a/entity/src/relationship.rs b/entity/src/relationship.rs index 5fc068785..47799b6d8 100644 --- a/entity/src/relationship.rs +++ b/entity/src/relationship.rs @@ -1,3 +1,4 @@ +use deepsize::DeepSizeOf; use sea_orm::{DeriveActiveEnum, EnumIter}; use std::fmt; @@ -17,6 +18,7 @@ use std::fmt; )] #[sea_orm(rs_type = "i32", db_type = "Integer")] #[serde(rename_all = "snake_case")] +#[derive(DeepSizeOf)] // When adding a new variant, also add this to the "relationship" table. pub enum Relationship { #[sea_orm(num_value = 0)] diff --git a/modules/analysis/Cargo.toml b/modules/analysis/Cargo.toml index 706b9bc02..ce07cd975 100644 --- a/modules/analysis/Cargo.toml +++ b/modules/analysis/Cargo.toml @@ -13,9 +13,13 @@ trustify-entity = { workspace = true } actix-http = { workspace = true } actix-web = { workspace = true } anyhow = { workspace = true } +bytesize = { workspace = true } +clap = { workspace = true } cpe = { workspace = true } +deepsize = { workspace = true, features = ["cpe", "petgraph"] } fixedbitset = { workspace = true } log = { workspace = true } +moka = { workspace = true, features = ["sync"] } parking_lot = { workspace = true } petgraph = { workspace = true } sea-orm = { workspace = true } diff --git a/modules/analysis/src/config.rs b/modules/analysis/src/config.rs new file mode 100644 index 000000000..38311dd30 --- /dev/null +++ b/modules/analysis/src/config.rs @@ -0,0 +1,22 @@ +use bytesize::ByteSize; +use trustify_common::model::BinaryByteSize; + +#[derive(clap::Args, Debug, Clone)] +pub struct AnalysisConfig { + #[arg( + id = "max-cache-size", + long, + env = "TRUSTD_MAX_CACHE_SIZE", + default_value = "200 MiB", + help = "Maximum size of the graph cache." + )] + pub max_cache_size: BinaryByteSize, +} + +impl Default for AnalysisConfig { + fn default() -> Self { + Self { + max_cache_size: BinaryByteSize(ByteSize::mib(200)), + } + } +} diff --git a/modules/analysis/src/endpoints/mod.rs b/modules/analysis/src/endpoints/mod.rs index 4226c059b..60258dc62 100644 --- a/modules/analysis/src/endpoints/mod.rs +++ b/modules/analysis/src/endpoints/mod.rs @@ -146,9 +146,9 @@ pub async fn render_sbom_graph( return Ok(HttpResponse::UnsupportedMediaType().finish()); }; - service.load_graph(db.as_ref(), &sbom).await; + let graph = service.load_graph(db.as_ref(), &sbom).await?; - if let Some((data, content_type)) = service.render(&sbom, ext) { + if let Some((data, content_type)) = service.render(graph.as_ref(), ext) { Ok(HttpResponse::Ok().content_type(content_type).body(data)) } else { Ok(HttpResponse::NotFound().finish()) diff --git a/modules/analysis/src/lib.rs b/modules/analysis/src/lib.rs index 45345e0ad..009373def 100644 --- a/modules/analysis/src/lib.rs +++ b/modules/analysis/src/lib.rs @@ -1,3 +1,4 @@ +pub mod config; pub mod endpoints; pub mod error; pub mod service; diff --git a/modules/analysis/src/model.rs b/modules/analysis/src/model.rs index 1485417ef..b2e8a649c 100644 --- a/modules/analysis/src/model.rs +++ b/modules/analysis/src/model.rs @@ -4,8 +4,14 @@ pub use roots::*; use petgraph::Graph; use serde::Serialize; -use std::ops::{Deref, DerefMut}; -use std::{collections::HashMap, fmt}; +use std::{ + fmt, + ops::{Deref, DerefMut}, + sync::Arc, +}; + +use deepsize::DeepSizeOf; +use moka::sync::Cache; use trustify_common::{cpe::Cpe, purl::Purl}; use trustify_entity::relationship::Relationship; use utoipa::ToSchema; @@ -24,7 +30,7 @@ impl fmt::Display for AnalysisStatus { } } -#[derive(Debug, Clone, PartialEq, Eq, ToSchema, serde::Serialize)] +#[derive(Debug, Clone, PartialEq, Eq, ToSchema, serde::Serialize, DeepSizeOf)] pub struct PackageNode { pub sbom_id: String, pub node_id: String, @@ -109,16 +115,33 @@ impl DerefMut for Node { } } -#[derive(Debug)] +pub type PackageGraph = Graph; + pub struct GraphMap { - map: HashMap>, + map: Cache>, +} + +#[allow(clippy::ptr_arg)] // &String is required by Cache::builder().weigher() method +fn size_of_graph_entry(key: &String, value: &Arc) -> u32 { + ( + key.deep_size_of() + + value.as_ref().deep_size_of() + // Also add in some entry overhead of the cache entry + + 20 + // todo: find a better estimate for the the moka ValueEntry + ) + .try_into() + .unwrap_or(u32::MAX) } impl GraphMap { // Create a new instance of GraphMap - pub fn new() -> Self { + pub fn new(cap: u64) -> Self { GraphMap { - map: HashMap::new(), + map: Cache::builder() + .weigher(size_of_graph_entry) + .max_capacity(cap) + .build(), } } @@ -128,42 +151,33 @@ impl GraphMap { } // Get the number of graphs in the map - pub fn len(&self) -> usize { - self.map.len() + pub fn len(&self) -> u64 { + self.map.entry_count() } // Check if the map is empty pub fn is_empty(&self) -> bool { - self.map.is_empty() + self.len() == 0 + } + + pub fn size_used(&self) -> u64 { + self.map.weighted_size() } // Add a new graph with the given key (write access) - pub fn insert( - &mut self, - key: String, - graph: Graph, - ) { + pub fn insert(&self, key: String, graph: Arc) { self.map.insert(key, graph); + self.map.run_pending_tasks(); } // Retrieve a reference to a graph by its key (read access) - pub fn get(&self, key: &str) -> Option<&Graph> { + pub fn get(&self, key: &str) -> Option> { self.map.get(key) } - // Retrieve all sbom ids(read access) - pub fn sbom_ids(&self) -> Vec { - self.map.keys().cloned().collect() - } - // Clear all graphs from the map - pub fn clear(&mut self) { - self.map.clear(); - } -} - -impl Default for GraphMap { - fn default() -> Self { - Self::new() + pub fn clear(&self) { + self.map.invalidate_all(); + self.map.run_pending_tasks(); } } diff --git a/modules/analysis/src/service/load.rs b/modules/analysis/src/service/load.rs index 87fda53b9..64119b5a0 100644 --- a/modules/analysis/src/service/load.rs +++ b/modules/analysis/src/service/load.rs @@ -1,8 +1,10 @@ +use crate::model::PackageGraph; use crate::{ model::PackageNode, service::{AnalysisService, ComponentReference, GraphQuery}, Error, }; +use anyhow::anyhow; use petgraph::{prelude::NodeIndex, Graph}; use sea_orm::{ ColumnTrait, ConnectionTrait, DatabaseBackend, DbErr, EntityOrSelect, EntityTrait, @@ -11,6 +13,7 @@ use sea_orm::{ use sea_query::{JoinType, Order, SelectStatement}; use serde_json::Value; use std::collections::HashSet; +use std::sync::Arc; use std::{collections::hash_map::Entry, collections::HashMap}; use tracing::{instrument, Level}; use trustify_common::{cpe::Cpe, db::query::Filtering, purl::Purl}; @@ -149,7 +152,7 @@ impl AnalysisService { &self, connection: &C, query: GraphQuery<'_>, - ) -> Result, Error> { + ) -> Result)>, Error> { let search_sbom_subquery = match query { GraphQuery::Component(ComponentReference::Id(name)) => sbom_node::Entity::find() .filter(sbom_node::Column::NodeId.eq(name)) @@ -196,7 +199,7 @@ impl AnalysisService { &self, connection: &C, subquery: SelectStatement, - ) -> Result, Error> { + ) -> Result)>, Error> { let distinct_sbom_ids: Vec = sbom::Entity::find() .filter(sbom::Column::SbomId.in_subquery(subquery)) .select() @@ -208,30 +211,33 @@ impl AnalysisService { .map(|record| record.sbom_id.to_string()) // Assuming sbom_id is of type String .collect(); - self.load_graphs(connection, &distinct_sbom_ids).await?; - - Ok(distinct_sbom_ids) + self.load_graphs(connection, &distinct_sbom_ids).await } /// Load the SBOM matching the provided ID #[instrument(skip(self, connection))] - pub async fn load_graph(&self, connection: &C, distinct_sbom_id: &str) { - if self.graph.read().contains_key(distinct_sbom_id) { + pub async fn load_graph( + &self, + connection: &C, + distinct_sbom_id: &str, + ) -> Result, Error> { + if let Some(g) = self.graph_cache.get(distinct_sbom_id) { // early return if we already loaded it - return; + return Ok(g); } let distinct_sbom_id = match Uuid::parse_str(distinct_sbom_id) { Ok(uuid) => uuid, Err(err) => { - log::warn!("Unable to parse SBOM ID {distinct_sbom_id}: {err}"); - return; + return Err(Error::Database(anyhow!( + "Unable to parse SBOM ID {distinct_sbom_id}: {err}" + ))); } }; // lazy load graphs - let mut g: Graph = Graph::new(); + let mut g: PackageGraph = Graph::new(); let mut nodes = HashMap::new(); let mut detected_nodes = HashSet::new(); @@ -240,8 +246,7 @@ impl AnalysisService { let packages = match get_nodes(connection, distinct_sbom_id).await { Ok(nodes) => nodes, Err(err) => { - log::error!("Error fetching graph nodes: {}", err); - return; + return Err(err.into()); } }; @@ -276,8 +281,7 @@ impl AnalysisService { let edges = match get_relationships(connection, distinct_sbom_id).await { Ok(edges) => edges, Err(err) => { - log::error!("Error fetching graph relationships: {}", err); - return; + return Err(err.into()); } }; @@ -323,7 +327,10 @@ impl AnalysisService { // Set the result. A parallel call might have done the same. We wasted some time, but the // state is still correct. - self.graph.write().insert(distinct_sbom_id.to_string(), g); + let g = Arc::new(g); + self.graph_cache + .insert(distinct_sbom_id.to_string(), g.clone()); + Ok(g) } /// Load all SBOMs by the provided IDs @@ -331,11 +338,14 @@ impl AnalysisService { &self, connection: &C, distinct_sbom_ids: &Vec, - ) -> Result<(), DbErr> { + ) -> Result)>, Error> { + let mut results = Vec::new(); for distinct_sbom_id in distinct_sbom_ids { - self.load_graph(connection, distinct_sbom_id).await; + results.push(( + distinct_sbom_id.clone(), + self.load_graph(connection, distinct_sbom_id).await?, + )); } - - Ok(()) + Ok(results) } } diff --git a/modules/analysis/src/service/mod.rs b/modules/analysis/src/service/mod.rs index 0674b99dc..1f69cf3fc 100644 --- a/modules/analysis/src/service/mod.rs +++ b/modules/analysis/src/service/mod.rs @@ -6,16 +6,16 @@ pub use query::*; pub use walk::*; pub mod render; - #[cfg(test)] mod test; +use crate::config::AnalysisConfig; +use crate::model::PackageGraph; use crate::{ model::{AnalysisStatus, BaseSummary, GraphMap, Node, PackageNode}, Error, }; use fixedbitset::FixedBitSet; -use parking_lot::RwLock; use petgraph::{ graph::{Graph, NodeIndex}, prelude::EdgeRef, @@ -39,7 +39,7 @@ use uuid::Uuid; #[derive(Clone)] pub struct AnalysisService { - graph: Arc>, + graph_cache: Arc, } /// Collect related nodes in the provided direction. @@ -124,7 +124,7 @@ fn collect( } impl AnalysisService { - /// Create a new analysis service instance. + /// Create a new analysis service instance with the configured cache size. /// /// ## Caching /// @@ -135,15 +135,25 @@ impl AnalysisService { /// /// Also, we do not implement default because of this. As a new instance has the implication /// of having its own cache. So creating a new instance should be a deliberate choice. - #[allow(clippy::new_without_default)] - pub fn new() -> Self { + pub fn new(config: AnalysisConfig) -> Self { Self { - graph: Default::default(), + graph_cache: Arc::new(GraphMap::new(config.max_cache_size.as_u64())), } } + pub fn cache_size_used(&self) -> u64 { + self.graph_cache.size_used() + } + + pub fn cache_len(&self) -> u64 { + self.graph_cache.len() + } + #[instrument(skip_all, err)] - pub async fn load_all_graphs(&self, connection: &C) -> Result<(), Error> { + pub async fn load_all_graphs( + &self, + connection: &C, + ) -> Result)>, Error> { // retrieve all sboms in trustify let distinct_sbom_ids = sbom::Entity::find() @@ -156,14 +166,11 @@ impl AnalysisService { .map(|record| record.sbom_id.to_string()) // Assuming sbom_id is of type String .collect(); - self.load_graphs(connection, &distinct_sbom_ids).await?; - - Ok(()) + self.load_graphs(connection, &distinct_sbom_ids).await } pub fn clear_all_graphs(&self) -> Result<(), Error> { - let mut manager = self.graph.write(); - manager.clear(); + self.graph_cache.clear(); Ok(()) } @@ -178,10 +185,9 @@ impl AnalysisService { .all(connection) .await?; - let manager = self.graph.read(); Ok(AnalysisStatus { sbom_count: distinct_sbom_ids.len() as u32, - graph_count: manager.len() as u32, + graph_count: self.graph_cache.len() as u32, }) } @@ -192,7 +198,7 @@ impl AnalysisService { fn collect_graph<'a, T, I, C>( &self, query: impl Into> + Debug, - distinct_sbom_ids: Vec, + graphs: &[(String, Arc)], init: I, collector: C, ) -> T @@ -202,13 +208,9 @@ impl AnalysisService { { let mut value = init(); - self.query_graphs( - query, - distinct_sbom_ids, - |graph, index, node, discovered| { - collector(&mut value, graph, index, node, discovered); - }, - ); + self.query_graphs(query, graphs, |graph, index, node, discovered| { + collector(&mut value, graph, index, node, discovered); + }); value } @@ -218,31 +220,22 @@ impl AnalysisService { fn query_graphs<'a, F>( &self, query: impl Into> + Debug, - distinct_sbom_ids: Vec, + graphs: &[(String, Arc)], mut f: F, ) where F: FnMut(&Graph, NodeIndex, &PackageNode, &mut FixedBitSet), { let query = query.into(); - - // RwLock for reading hashmap - let graph_read_guard = self.graph.read(); - for distinct_sbom_id in &distinct_sbom_ids { - self.query_graph(&graph_read_guard, query, distinct_sbom_id, &mut f); + for (distinct_sbom_id, graph) in graphs { + self.query_graph(distinct_sbom_id, graph.as_ref(), query, &mut f); } } #[instrument(skip(self, graph, f))] - fn query_graph(&self, graph: &GraphMap, query: GraphQuery<'_>, sbom_id: &str, f: &mut F) + fn query_graph(&self, sbom_id: &str, graph: &PackageGraph, query: GraphQuery<'_>, f: &mut F) where F: FnMut(&Graph, NodeIndex, &PackageNode, &mut FixedBitSet), { - let Some(graph) = graph.get(sbom_id) else { - // FIXME: we need a better strategy handling such errors - log::warn!("Unable to find SBOM: {sbom_id}"); - return; - }; - if let Some((start, end)) = detect_cycle(graph) { // FIXME: we need a better strategy handling such errors let start = graph.node_weight(start); @@ -275,13 +268,13 @@ impl AnalysisService { &self, query: impl Into> + Debug, options: QueryOptions, - distinct_sbom_ids: Vec, + graphs: &[(String, Arc)], ) -> Vec { let relationships = options.relationships; self.collect_graph( query, - distinct_sbom_ids, + graphs, Vec::new, |components, graph, node_index, node, _| { log::debug!( @@ -328,8 +321,8 @@ impl AnalysisService { let query = query.into(); let options = options.into(); - self.load_graphs(connection, &distinct_sbom_ids).await?; - let components = self.run_graph_query(query, options, distinct_sbom_ids); + let graphs = self.load_graphs(connection, &distinct_sbom_ids).await?; + let components = self.run_graph_query(query, options, &graphs); Ok(paginated.paginate_array(&components)) } @@ -346,8 +339,8 @@ impl AnalysisService { let query = query.into(); let options = options.into(); - let distinct_sbom_ids = self.load_graphs_query(connection, query).await?; - let components = self.run_graph_query(query, options, distinct_sbom_ids); + let graphs = self.load_graphs_query(connection, query).await?; + let components = self.run_graph_query(query, options, &graphs); Ok(paginated.paginate_array(&components)) } diff --git a/modules/analysis/src/service/render/mod.rs b/modules/analysis/src/service/render/mod.rs index fb085397b..44ac82ff8 100644 --- a/modules/analysis/src/service/render/mod.rs +++ b/modules/analysis/src/service/render/mod.rs @@ -12,9 +12,9 @@ pub enum Renderer { } impl AnalysisService { - pub fn render(&self, sbom: &str, renderer: Renderer) -> Option<(String, String)> { + pub fn render(&self, graph: &PackageGraph, renderer: Renderer) -> Option<(String, String)> { match renderer { - Renderer::Graphviz => self.walk(sbom, graphviz::Renderer::new()), + Renderer::Graphviz => self.walk(graph, graphviz::Renderer::new()), } } } diff --git a/modules/analysis/src/service/test.rs b/modules/analysis/src/service/test.rs index bac08dd3e..a5cd7a0ea 100644 --- a/modules/analysis/src/service/test.rs +++ b/modules/analysis/src/service/test.rs @@ -6,6 +6,7 @@ use crate::{ use std::{str::FromStr, time::SystemTime}; use test_context::test_context; use test_log::test; +use trustify_common::model::BinaryByteSize; use trustify_common::{ cpe::Cpe, db::query::Query, model::Paginated, purl::Purl, sbom::spdx::fix_license, }; @@ -17,7 +18,7 @@ async fn test_simple_analysis_service(ctx: &TrustifyContext) -> Result<(), anyho ctx.ingest_documents(["spdx/simple.json", "spdx/simple.json"]) .await?; //double ingestion intended - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -77,7 +78,7 @@ async fn test_simple_analysis_cyclonedx_service( ctx.ingest_documents(["cyclonedx/simple.json", "cyclonedx/simple.json"]) .await?; //double ingestion intended - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -135,7 +136,7 @@ async fn test_simple_analysis_cyclonedx_service( async fn test_simple_by_name_analysis_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -184,7 +185,7 @@ async fn simple_by_name_analysis_service_filter_rel( ) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -225,7 +226,7 @@ async fn simple_by_name_analysis_service_filter_rel( async fn test_simple_by_purl_analysis_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let component_purl: Purl = Purl::from_str("pkg:rpm/redhat/B@0.0.0").map_err(Error::Purl)?; @@ -277,7 +278,7 @@ async fn test_quarkus_analysis_service(ctx: &TrustifyContext) -> Result<(), anyh ]) .await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -330,14 +331,15 @@ async fn test_quarkus_analysis_service(ctx: &TrustifyContext) -> Result<(), anyh async fn test_status_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); - let _load_all_graphs = service.load_all_graphs(&ctx.db).await; - let analysis_status = service.status(&ctx.db).await?; + let service = AnalysisService::new(AnalysisConfig::default()); + let all_graphs = service.load_all_graphs(&ctx.db).await?; + assert_eq!(all_graphs.len(), 1); + let analysis_status = service.status(&ctx.db).await?; assert_eq!(analysis_status.sbom_count, 1); assert_eq!(analysis_status.graph_count, 1); - let _clear_all_graphs = service.clear_all_graphs(); + service.clear_all_graphs()?; ctx.ingest_documents([ "spdx/quarkus-bom-3.2.11.Final-redhat-00001.json", @@ -353,12 +355,53 @@ async fn test_status_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> Ok(()) } +#[test_context(TrustifyContext)] +#[test(tokio::test)] +async fn test_cache_size_used(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { + ctx.ingest_documents(["spdx/simple.json"]).await?; + + let service = AnalysisService::new(AnalysisConfig::default()); + assert_eq!(service.cache_size_used(), 0u64); + + let all_graphs = service.load_all_graphs(&ctx.db).await?; + assert_eq!(all_graphs.len(), 1); + + let kb = 1024; + let small_sbom_size = service.cache_size_used(); + assert!(small_sbom_size > 6 * kb); + assert!(small_sbom_size < 7 * kb); + + ctx.ingest_documents(["spdx/quarkus-bom-3.2.11.Final-redhat-00001.json"]) + .await?; + let all_graphs = service.load_all_graphs(&ctx.db).await?; + assert_eq!(all_graphs.len(), 2); + + let big_sbom_size = service.cache_size_used() - small_sbom_size; + assert!(big_sbom_size > 950 * kb); + assert!(big_sbom_size < 960 * kb); + + // Now lets try it with small cache that can at least fit the small bom + let service = AnalysisService::new(AnalysisConfig { + max_cache_size: BinaryByteSize::from(small_sbom_size * 2), + }); + + let all_graphs = service.load_all_graphs(&ctx.db).await?; + // we should be able to load all the graphs even if they can't fit in the cache. + assert_eq!(all_graphs.len(), 2); + + // but the cache should only contain the first sbom + assert_eq!(small_sbom_size, service.cache_size_used()); + assert_eq!(1u64, service.cache_len()); + + Ok(()) +} + #[test_context(TrustifyContext)] #[test(tokio::test)] async fn test_simple_deps_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -395,7 +438,7 @@ async fn test_simple_deps_service(ctx: &TrustifyContext) -> Result<(), anyhow::E async fn test_simple_deps_cyclonedx_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["cyclonedx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -428,7 +471,7 @@ async fn test_simple_deps_cyclonedx_service(ctx: &TrustifyContext) -> Result<(), async fn test_simple_by_name_deps_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -459,7 +502,7 @@ async fn test_simple_by_name_deps_service(ctx: &TrustifyContext) -> Result<(), a async fn test_simple_by_purl_deps_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/simple.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let component_purl: Purl = Purl::from_str("pkg:rpm/redhat/AA@0.0.0?arch=src").map_err(Error::Purl)?; @@ -492,7 +535,7 @@ async fn test_quarkus_deps_service(ctx: &TrustifyContext) -> Result<(), anyhow:: ]) .await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -514,7 +557,7 @@ async fn test_circular_deps_cyclonedx_service(ctx: &TrustifyContext) -> Result<( ctx.ingest_documents(["cyclonedx/cyclonedx-circular.json"]) .await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -536,7 +579,7 @@ async fn test_circular_deps_cyclonedx_service(ctx: &TrustifyContext) -> Result<( async fn test_circular_deps_spdx_service(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { ctx.ingest_documents(["spdx/loop.json"]).await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let analysis_graph = service .retrieve( @@ -559,7 +602,7 @@ async fn test_retrieve_all_sbom_roots_by_name(ctx: &TrustifyContext) -> Result<( ctx.ingest_documents(["spdx/quarkus-bom-3.2.11.Final-redhat-00001.json"]) .await?; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let component_name = "quarkus-vertx-http".to_string(); let analysis_graph = service @@ -630,7 +673,7 @@ async fn load_performance(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { log::info!("Start populating graph"); let start = SystemTime::now(); - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); service.load_all_graphs(&ctx.db).await?; log::info!( diff --git a/modules/analysis/src/service/walk.rs b/modules/analysis/src/service/walk.rs index 8d09dd630..ebdf45f6e 100644 --- a/modules/analysis/src/service/walk.rs +++ b/modules/analysis/src/service/walk.rs @@ -26,13 +26,10 @@ impl AnalysisService { /// /// Otherwise, the visitor will be called, and it will return the output of the /// [`Visitor::complete`] function. - pub fn walk(&self, sbom: &str, mut v: V) -> Option + pub fn walk(&self, graph: &PackageGraph, mut v: V) -> Option where V: Visitor, { - let graph = self.graph.read(); - let graph = graph.get(sbom)?; - for node in graph.node_weights() { v.node(node); } diff --git a/modules/analysis/src/test.rs b/modules/analysis/src/test.rs index 9ade9a118..89896e473 100644 --- a/modules/analysis/src/test.rs +++ b/modules/analysis/src/test.rs @@ -1,3 +1,4 @@ +use crate::config::AnalysisConfig; use crate::{ endpoints::configure, model::{BaseSummary, Node as GraphNode}, @@ -10,7 +11,7 @@ use trustify_test_context::{ }; pub async fn caller(ctx: &TrustifyContext) -> anyhow::Result { - let analysis = AnalysisService::new(); + let analysis = AnalysisService::new(AnalysisConfig::default()); call::caller(|svc| configure(svc, ctx.db.clone(), analysis)).await } diff --git a/modules/fundamental/src/test/common.rs b/modules/fundamental/src/test/common.rs index 5ecee76ae..96ce587cc 100644 --- a/modules/fundamental/src/test/common.rs +++ b/modules/fundamental/src/test/common.rs @@ -1,3 +1,4 @@ +use trustify_module_analysis::config::AnalysisConfig; use trustify_module_analysis::service::AnalysisService; use trustify_test_context::{ call::{self, CallService}, @@ -12,6 +13,6 @@ async fn caller_with( ctx: &TrustifyContext, config: Config, ) -> anyhow::Result { - let analysis = AnalysisService::new(); + let analysis = AnalysisService::new(AnalysisConfig::default()); call::caller(|svc| configure(svc, config, ctx.db.clone(), ctx.storage.clone(), analysis)).await } diff --git a/modules/fundamental/tests/sbom/spdx/aliases.rs b/modules/fundamental/tests/sbom/spdx/aliases.rs index 49f91c48a..75f0ec86e 100644 --- a/modules/fundamental/tests/sbom/spdx/aliases.rs +++ b/modules/fundamental/tests/sbom/spdx/aliases.rs @@ -3,6 +3,7 @@ use itertools::Itertools; use test_context::test_context; use test_log::test; use trustify_common::id::Id; +use trustify_module_analysis::config::AnalysisConfig; use trustify_module_analysis::service::{AnalysisService, ComponentReference}; use trustify_test_context::TrustifyContext; @@ -18,7 +19,7 @@ async fn cpe_purl(ctx: &TrustifyContext) -> Result<(), anyhow::Error> { bail!("must be an id") }; - let service = AnalysisService::new(); + let service = AnalysisService::new(AnalysisConfig::default()); let result = service .retrieve( diff --git a/modules/ingestor/tests/common.rs b/modules/ingestor/tests/common.rs index d3f2798b2..9cc8c68e9 100644 --- a/modules/ingestor/tests/common.rs +++ b/modules/ingestor/tests/common.rs @@ -1,3 +1,4 @@ +use trustify_module_analysis::config::AnalysisConfig; use trustify_module_analysis::service::AnalysisService; use trustify_module_ingestor::endpoints::{configure, Config}; use trustify_test_context::{ @@ -9,7 +10,7 @@ pub async fn caller_with( ctx: &TrustifyContext, config: Config, ) -> anyhow::Result { - let analysis = AnalysisService::new(); + let analysis = AnalysisService::new(AnalysisConfig::default()); call::caller(|svc| { configure( svc, diff --git a/server/src/openapi.rs b/server/src/openapi.rs index c293c3d81..456516ad9 100644 --- a/server/src/openapi.rs +++ b/server/src/openapi.rs @@ -1,6 +1,7 @@ use crate::profile::api::{configure, default_openapi_info, Config, ModuleConfig}; use actix_web::App; use trustify_common::{config::Database, db}; +use trustify_module_analysis::config::AnalysisConfig; use trustify_module_analysis::service::AnalysisService; use trustify_module_storage::service::{dispatch::DispatchBackend, fs::FileSystemBackend}; use utoipa::{ @@ -12,7 +13,7 @@ use utoipa_actix_web::AppExt; pub async fn create_openapi() -> anyhow::Result { let (db, postgresql) = db::embedded::create().await?; let (storage, _temp) = FileSystemBackend::for_test().await?; - let analysis = AnalysisService::new(); + let analysis = AnalysisService::new(AnalysisConfig::default()); let (_, mut openapi) = App::new() .into_utoipa_app() diff --git a/server/src/profile/api.rs b/server/src/profile/api.rs index 35a46acdd..aed2e98f5 100644 --- a/server/src/profile/api.rs +++ b/server/src/profile/api.rs @@ -34,6 +34,7 @@ use trustify_infrastructure::{ otel::{Metrics as OtelMetrics, Tracing}, Infrastructure, InfrastructureConfig, InitContext, Metrics, }; +use trustify_module_analysis::config::AnalysisConfig; use trustify_module_analysis::service::AnalysisService; use trustify_module_graphql::RootQuery; use trustify_module_importer::server::importer; @@ -94,6 +95,10 @@ pub struct Run { // flattened commands must go last // + /// Analysis configuration + #[command(flatten)] + pub analysis: AnalysisConfig, + /// Database configuration #[command(flatten)] pub database: Database, @@ -169,6 +174,7 @@ struct InitData { ui: UI, with_graphql: bool, config: ModuleConfig, + analysis: AnalysisService, } /// Groups all module configurations. @@ -289,6 +295,7 @@ impl InitData { }; Ok(InitData { + analysis: AnalysisService::new(run.analysis), authenticator, authorizer, db, @@ -309,7 +316,6 @@ impl InitData { let ui = Arc::new(UiResources::new(&self.ui)?); let db = self.db.clone(); let storage = self.storage.clone(); - let analysis = AnalysisService::new(); let http = { HttpServerBuilder::try_from(self.http)? @@ -327,7 +333,7 @@ impl InitData { db: self.db.clone(), storage: self.storage.clone(), auth: self.authenticator.clone(), - analysis: analysis.clone(), + analysis: self.analysis.clone(), with_graphql: self.with_graphql, }, @@ -502,7 +508,7 @@ mod test { let db = ctx.db; let (storage, _) = FileSystemBackend::for_test().await?; let ui = Arc::new(UiResources::new(&UI::default())?); - let analysis = AnalysisService::new(); + let analysis = AnalysisService::new(AnalysisConfig::default()); let app = actix_web::test::init_service( App::new() .into_utoipa_app() diff --git a/xtask/src/precommit.rs b/xtask/src/precommit.rs index 43bd5c4eb..3475bb99a 100644 --- a/xtask/src/precommit.rs +++ b/xtask/src/precommit.rs @@ -50,7 +50,7 @@ impl Precommit { println!("Running: cargo check"); if !Command::new("cargo") - .args(["check"]) + .args(["check", "--all-targets", "--all-features"]) .status() .map_err(|_| anyhow!("cargo check failed"))? .success()