From 3b7b64f4b1978f2499729347c1bdc999b21d36e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Femen=C3=ADa?= <131800808+pablf@users.noreply.github.com> Date: Sun, 24 Sep 2023 18:05:54 +0200 Subject: [PATCH 1/5] Bump argmin to 0.8.1 and serde optional (#309) * bumped argmin version * bumped argmin version * updated linfa-logistic * import * add param * import * fixed trait * fixed errors * updated argmin-math * made serde optional * dependency fixed * fixed deserialize * fmt * fixed deserialize * Delete metals.log * Delete metals.mv.db --- .github/workflows/checking.yml | 2 +- algorithms/linfa-ftrl/Cargo.toml | 13 +- algorithms/linfa-ftrl/src/hyperparams.rs | 19 ++- algorithms/linfa-ftrl/src/lib.rs | 10 +- algorithms/linfa-linear/Cargo.toml | 4 +- algorithms/linfa-logistic/Cargo.toml | 15 +- algorithms/linfa-logistic/src/argmin_param.rs | 120 +++++++++++++- algorithms/linfa-logistic/src/float.rs | 3 +- algorithms/linfa-logistic/src/hyperparams.rs | 21 ++- algorithms/linfa-logistic/src/lib.rs | 147 ++++++++++++------ 10 files changed, 283 insertions(+), 71 deletions(-) diff --git a/.github/workflows/checking.yml b/.github/workflows/checking.yml index cc9b41f4e..733ce7459 100644 --- a/.github/workflows/checking.yml +++ b/.github/workflows/checking.yml @@ -39,4 +39,4 @@ jobs: run: cargo check --workspace --all-targets - name: Run cargo check (with serde) - run: cargo check --workspace --all-targets --features "linfa-clustering/serde linfa-ica/serde linfa-kernel/serde linfa-reduction/serde linfa-svm/serde linfa-elasticnet/serde linfa-pls/serde linfa-trees/serde linfa-nn/serde linfa-linear/serde linfa-preprocessing/serde linfa-bayes/serde" + run: cargo check --workspace --all-targets --features "linfa-clustering/serde linfa-ica/serde linfa-kernel/serde linfa-reduction/serde linfa-svm/serde linfa-elasticnet/serde linfa-pls/serde linfa-trees/serde linfa-nn/serde linfa-linear/serde linfa-preprocessing/serde linfa-bayes/serde linfa-logistic/serde linfa-ftrl/serde" diff --git a/algorithms/linfa-ftrl/Cargo.toml b/algorithms/linfa-ftrl/Cargo.toml index b0d2e598f..d6b106fa2 100644 --- a/algorithms/linfa-ftrl/Cargo.toml +++ b/algorithms/linfa-ftrl/Cargo.toml @@ -13,14 +13,23 @@ readme = "README.md" keywords = ["machine-learning", "linfa", "ai", "ml", "ftrl"] categories = ["algorithms", "mathematics", "science"] +[features] +serde = ["serde_crate", "linfa/serde", "ndarray/serde", "argmin/serde1"] + +[dependencies.serde_crate] +package = "serde" +optional = true +version = "1.0" +features = ["derive"] + [dependencies] ndarray = { version = "0.15.4", features = ["serde"]} ndarray-rand = "0.14.0" -argmin = { version = "0.4.7", features = ["ndarray", "ndarray-rand"]} +argmin = { version = "0.8.1", default-features = false } +argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] } thiserror = "1.0" rand = "0.8.5" rand_xoshiro = "0.6.0" -serde = {version = "1.0.137", features = ["derive"]} linfa = { version = "0.6.1", path = "../.."} diff --git a/algorithms/linfa-ftrl/src/hyperparams.rs b/algorithms/linfa-ftrl/src/hyperparams.rs index 62eab2198..921daf592 100644 --- a/algorithms/linfa-ftrl/src/hyperparams.rs +++ b/algorithms/linfa-ftrl/src/hyperparams.rs @@ -1,15 +1,26 @@ use crate::error::FtrlError; use linfa::{Float, ParamGuard}; use rand::Rng; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[cfg(feature = "serde")] +use serde_crate::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] pub struct FtrlParams(pub(crate) FtrlValidParams); /// A verified hyper-parameter set ready for the estimation of a Follow the regularized leader - proximal model /// /// See [`FtrlParams`](crate::FtrlParams) for more information. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] pub struct FtrlValidParams { pub(crate) alpha: F, pub(crate) beta: F, diff --git a/algorithms/linfa-ftrl/src/lib.rs b/algorithms/linfa-ftrl/src/lib.rs index b7fccc403..050acfe40 100644 --- a/algorithms/linfa-ftrl/src/lib.rs +++ b/algorithms/linfa-ftrl/src/lib.rs @@ -13,9 +13,15 @@ use ndarray::Array1; use ndarray_rand::RandomExt; use rand::{distributions::Uniform, Rng}; use rand_xoshiro::{rand_core::SeedableRng, Xoshiro256Plus}; -use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde")] +use serde_crate::{Deserialize, Serialize}; -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Debug, Clone)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] pub struct Ftrl { /// FTRL (Follow The Regularized Leader - proximal) is a linear model for CTR prediction in online learning settings. /// It stores z and n values, which are later used to calculate weights at incremental model fit and during prediction. diff --git a/algorithms/linfa-linear/Cargo.toml b/algorithms/linfa-linear/Cargo.toml index 680b6485d..b60f94c95 100644 --- a/algorithms/linfa-linear/Cargo.toml +++ b/algorithms/linfa-linear/Cargo.toml @@ -32,8 +32,8 @@ ndarray = { version = "0.15", features = ["approx"] } linfa-linalg = { version = "0.1", default-features = false } ndarray-linalg = { version = "0.15", optional = true } num-traits = "0.2" -argmin = { version = "0.7", default-features = false } -argmin-math = { version = "0.2", features = ["ndarray_v0_15-nolinalg"] } +argmin = { version = "0.8.1", default-features = false } +argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] } thiserror = "1.0" linfa = { version = "0.6.1", path = "../.." } diff --git a/algorithms/linfa-logistic/Cargo.toml b/algorithms/linfa-logistic/Cargo.toml index 6a6433abf..3d4840ed5 100644 --- a/algorithms/linfa-logistic/Cargo.toml +++ b/algorithms/linfa-logistic/Cargo.toml @@ -13,15 +13,24 @@ readme = "README.md" keywords = ["machine-learning", "linfa", "ai", "ml", "linear"] categories = ["algorithms", "mathematics", "science"] +[features] +serde = ["serde_crate", "linfa/serde", "ndarray/serde", "argmin/serde1"] + +[dependencies.serde_crate] +package = "serde" +optional = true +version = "1.0" + [dependencies] ndarray = { version = "0.15", features = ["approx"] } ndarray-stats = "0.5.0" num-traits = "0.2" -argmin = { version = "0.4.6", features = ["ndarray", "ndarray-rand"] } -serde = "1.0" +argmin = { version = "0.8.1", default-features = false } +argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] } thiserror = "1.0" -linfa = { version = "0.6.1", path = "../..", features=["serde"] } + +linfa = { version = "0.6.1", path = "../.." } [dev-dependencies] approx = "0.4" diff --git a/algorithms/linfa-logistic/src/argmin_param.rs b/algorithms/linfa-logistic/src/argmin_param.rs index f76cee6f2..ca7a89015 100644 --- a/algorithms/linfa-logistic/src/argmin_param.rs +++ b/algorithms/linfa-logistic/src/argmin_param.rs @@ -7,9 +7,13 @@ //! Unfortunately, this requires that we re-implement some traits from Argmin. use crate::float::Float; -use argmin::prelude::*; -use ndarray::{Array, ArrayBase, Data, Dimension, Zip}; -use serde::{Deserialize, Serialize}; +use argmin_math::{ + ArgminAdd, ArgminDot, ArgminL1Norm, ArgminL2Norm, ArgminMinMax, ArgminMul, ArgminSignum, + ArgminSub, ArgminZeroLike, +}; +use ndarray::{Array, ArrayBase, Data, Dimension, Ix1, Ix2, Zip}; +#[cfg(feature = "serde")] +use serde_crate::{Deserialize, Serialize}; pub fn elem_dot, A2: Data, D: Dimension>( a: &ArrayBase, @@ -20,7 +24,12 @@ pub fn elem_dot, A2: Data, D: Dime .fold(F::zero(), |acc, &a, &b| acc + a * b) } -#[derive(Serialize, Clone, Deserialize, Debug, Default, PartialEq)] +#[derive(Debug, Clone, PartialEq, Default)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] pub struct ArgminParam(pub Array); impl ArgminParam { @@ -30,12 +39,24 @@ impl ArgminParam { } } +impl ArgminSub> for ArgminParam { + fn sub(&self, other: &F) -> ArgminParam { + ArgminParam(&self.0 - *other) + } +} + impl ArgminSub, ArgminParam> for ArgminParam { fn sub(&self, other: &ArgminParam) -> ArgminParam { ArgminParam(&self.0 - &other.0) } } +impl ArgminAdd> for ArgminParam { + fn add(&self, other: &F) -> ArgminParam { + ArgminParam(&self.0 + *other) + } +} + impl ArgminAdd, ArgminParam> for ArgminParam { fn add(&self, other: &ArgminParam) -> ArgminParam { ArgminParam(&self.0 + &other.0) @@ -48,8 +69,14 @@ impl ArgminDot, F> for ArgminParam ArgminNorm for ArgminParam { - fn norm(&self) -> F { +impl ArgminL1Norm for ArgminParam { + fn l1_norm(&self) -> F { + num_traits::Float::sqrt(elem_dot(&self.0, &self.0)) + } +} + +impl ArgminL2Norm for ArgminParam { + fn l2_norm(&self) -> F { num_traits::Float::sqrt(elem_dot(&self.0, &self.0)) } } @@ -65,3 +92,84 @@ impl ArgminMul, ArgminParam> for ArgminParam(&self.0 * &other.0) } } + +impl ArgminSignum for ArgminParam { + fn signum(self) -> ArgminParam { + self + } +} + +impl ArgminZeroLike for ArgminParam { + fn zero_like(&self) -> ArgminParam { + let dims = self.as_array().raw_dim(); + ArgminParam(Array::zeros(dims)) + } +} + +impl ArgminMinMax for ArgminParam { + fn min(x: &Self, y: &Self) -> ArgminParam { + let x_array = x.as_array(); + let y_array = y.as_array(); + + assert_eq!(x_array.shape(), y_array.shape()); + ArgminParam( + x_array + .iter() + .zip(y_array) + .map(|(&a, &b)| if a < b { a } else { b }) + .collect(), + ) + } + + fn max(x: &Self, y: &Self) -> ArgminParam { + let x_array = x.as_array(); + let y_array = y.as_array(); + + assert_eq!(x_array.shape(), y_array.shape()); + ArgminParam( + x_array + .iter() + .zip(y_array) + .map(|(&a, &b)| if a > b { a } else { b }) + .collect(), + ) + } +} + +impl ArgminMinMax for ArgminParam { + fn min(x: &Self, y: &Self) -> ArgminParam { + let x_array = x.as_array(); + let y_array = y.as_array(); + + assert_eq!(x_array.shape(), y_array.shape()); + let m = x_array.shape()[0]; + let n = x_array.shape()[1]; + let mut out = x_array.clone(); + for i in 0..m { + for j in 0..n { + let a = x_array[(i, j)]; + let b = y_array[(i, j)]; + out[(i, j)] = if a < b { a } else { b }; + } + } + ArgminParam(out) + } + + fn max(x: &Self, y: &Self) -> ArgminParam { + let x_array = x.as_array(); + let y_array = y.as_array(); + + assert_eq!(x_array.shape(), y_array.shape()); + let m = x_array.shape()[0]; + let n = x_array.shape()[1]; + let mut out = x_array.clone(); + for i in 0..m { + for j in 0..n { + let a = x_array[(i, j)]; + let b = y_array[(i, j)]; + out[(i, j)] = if a > b { a } else { b }; + } + } + ArgminParam(out) + } +} diff --git a/algorithms/linfa-logistic/src/float.rs b/algorithms/linfa-logistic/src/float.rs index 1cac762b2..ae15ee4dc 100644 --- a/algorithms/linfa-logistic/src/float.rs +++ b/algorithms/linfa-logistic/src/float.rs @@ -1,5 +1,6 @@ use crate::argmin_param::ArgminParam; -use argmin::prelude::{ArgminFloat, ArgminMul}; +use argmin::core::ArgminFloat; +use argmin_math::ArgminMul; use ndarray::{Dimension, Ix1, Ix2, NdFloat}; use num_traits::FromPrimitive; diff --git a/algorithms/linfa-logistic/src/hyperparams.rs b/algorithms/linfa-logistic/src/hyperparams.rs index 1705aed3a..a41bb82a0 100644 --- a/algorithms/linfa-logistic/src/hyperparams.rs +++ b/algorithms/linfa-logistic/src/hyperparams.rs @@ -4,16 +4,27 @@ use ndarray::{Array, Dimension}; use crate::error::Error; use crate::float::Float; -use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde")] +use serde_crate::{Deserialize, Serialize}; /// A generalized logistic regression type that specializes as either binomial logistic regression /// or multinomial logistic regression. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound(deserialize = "D: Deserialize<'de>"))] +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate"), + serde(bound(deserialize = "D: Deserialize<'de>")) +)] pub struct LogisticRegressionParams(LogisticRegressionValidParams); -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(bound(deserialize = "D: Deserialize<'de>"))] +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate"), + serde(bound(deserialize = "D: Deserialize<'de>")) +)] pub struct LogisticRegressionValidParams { pub(crate) alpha: F, pub(crate) fit_intercept: bool, diff --git a/algorithms/linfa-logistic/src/lib.rs b/algorithms/linfa-logistic/src/lib.rs index 33fab4ca0..eac7f89e8 100644 --- a/algorithms/linfa-logistic/src/lib.rs +++ b/algorithms/linfa-logistic/src/lib.rs @@ -19,9 +19,9 @@ pub mod error; use crate::error::{Error, Result}; -use argmin::prelude::*; +use argmin::core::{CostFunction, Executor, Gradient, IterState, OptimizationResult, Solver}; use argmin::solver::linesearch::MoreThuenteLineSearch; -use argmin::solver::quasinewton::lbfgs::LBFGS; +use argmin::solver::quasinewton::LBFGS; use linfa::dataset::AsSingleTargets; use linfa::prelude::DatasetBase; use linfa::traits::{Fit, PredictInplace}; @@ -30,14 +30,18 @@ use ndarray::{ Dimension, IntoDimension, Ix1, Ix2, RemoveAxis, Slice, Zip, }; use ndarray_stats::QuantileExt; -use serde::{Deserialize, Serialize}; use std::default::Default; +#[cfg(feature = "serde")] +use serde_crate::de::DeserializeOwned; +#[cfg(feature = "serde")] +use serde_crate::{Deserialize, Serialize}; + mod argmin_param; mod float; mod hyperparams; -use argmin_param::{elem_dot, ArgminParam}; +use argmin_param::*; use float::Float; use hyperparams::{LogisticRegressionParams, LogisticRegressionValidParams}; @@ -97,20 +101,27 @@ impl Default for LogisticRegressionParams { } } -type LBFGSType = LBFGS, F>, ArgminParam, F>; +type LBFGSType = LBFGS< + MoreThuenteLineSearch, ArgminParam, F>, + ArgminParam, + ArgminParam, + F, +>; type LBFGSType1 = LBFGSType; type LBFGSType2 = LBFGSType; +type IterStateType = IterState, ArgminParam, (), (), F>; + impl LogisticRegressionValidParams { /// Create the initial parameters, either from a user supplied array /// or an array of 0s - fn setup_init_params(&self, dims: D::Pattern) -> Array { + fn setup_init_params(&self, dims: D::Pattern) -> ArgminParam { if let Some(params) = self.initial_params.as_ref() { - params.clone() + ArgminParam(params.clone()) } else { let mut dims = dims.into_dimension(); dims.as_array_view_mut()[0] += self.fit_intercept as usize; - Array::zeros(dims) + ArgminParam(Array::zeros(dims)) } } @@ -170,20 +181,29 @@ impl LogisticRegressionValidParams { /// tolerance. fn setup_solver(&self) -> LBFGSType { let linesearch = MoreThuenteLineSearch::new(); - LBFGS::new(linesearch, 10).with_tol_grad(self.gradient_tolerance) + LBFGS::new(linesearch, 10) + .with_tolerance_grad(self.gradient_tolerance) + .unwrap() } +} +impl< + F: Float, + #[cfg(feature = "serde")] D: Dimension + Serialize + DeserializeOwned, + #[cfg(not(feature = "serde"))] D: Dimension, + > LogisticRegressionValidParams +{ /// Run the LBFGS solver until it converges or runs out of iterations. - fn run_solver( + fn run_solver>( &self, problem: P, solver: P::Solver, - init_params: P::Param, - ) -> Result> { - Executor::new(problem, solver, init_params) - .max_iters(self.max_iterations) + init_params: ArgminParam, + ) -> Result>> { + Executor::new(problem, solver) + .configure(|state| state.param(init_params).max_iters(self.max_iterations)) .run() - .map_err(|err| err.into()) + .map_err(move |err| err.into()) } } @@ -215,10 +235,13 @@ impl<'a, C: 'a + Ord + Clone, F: Float, D: Data, T: AsSingleTargets, T: AsSingleTargets>( } /// A fitted logistic regression which can make predictions -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] -#[serde(bound(deserialize = "C: Deserialize<'de>"))] +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate"), + serde(bound(deserialize = "C: Deserialize<'de>")) +)] pub struct FittedLogisticRegression { threshold: F, intercept: F, @@ -628,7 +659,12 @@ impl> } /// A fitted multinomial logistic regression which can make predictions -#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] pub struct MultiFittedLogisticRegression { intercept: Array1, params: Array2, @@ -703,13 +739,23 @@ impl> } } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] struct ClassLabel { class: C, label: F, } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "serde", + derive(Serialize, Deserialize), + serde(crate = "serde_crate") +)] struct BinaryClassLabels { pos: ClassLabel, neg: ClassLabel, @@ -726,19 +772,21 @@ struct LogisticRegressionProblem<'a, F: Float, A: Data, D: Dimension> type LogisticRegressionProblem1<'a, F, A> = LogisticRegressionProblem<'a, F, A, Ix1>; type LogisticRegressionProblem2<'a, F, A> = LogisticRegressionProblem<'a, F, A, Ix2>; -impl<'a, F: Float, A: Data> ArgminOp for LogisticRegressionProblem1<'a, F, A> { +impl<'a, F: Float, A: Data> CostFunction for LogisticRegressionProblem1<'a, F, A> { type Param = ArgminParam; type Output = F; - type Hessian = (); - type Jacobian = Array1; - type Float = F; /// Apply the cost function to a parameter `p` - fn apply(&self, p: &Self::Param) -> std::result::Result { + fn cost(&self, p: &Self::Param) -> std::result::Result { let w = p.as_array(); let cost = logistic_loss(self.x, &self.target, self.alpha, w); Ok(cost) } +} + +impl<'a, F: Float, A: Data> Gradient for LogisticRegressionProblem1<'a, F, A> { + type Param = ArgminParam; + type Gradient = ArgminParam; /// Compute the gradient at parameter `p`. fn gradient(&self, p: &Self::Param) -> std::result::Result { @@ -748,19 +796,21 @@ impl<'a, F: Float, A: Data> ArgminOp for LogisticRegressionProblem1<'a } } -impl<'a, F: Float, A: Data> ArgminOp for LogisticRegressionProblem2<'a, F, A> { +impl<'a, F: Float, A: Data> CostFunction for LogisticRegressionProblem2<'a, F, A> { type Param = ArgminParam; type Output = F; - type Hessian = (); - type Jacobian = Array1; - type Float = F; /// Apply the cost function to a parameter `p` - fn apply(&self, p: &Self::Param) -> std::result::Result { + fn cost(&self, p: &Self::Param) -> std::result::Result { let w = p.as_array(); let cost = multi_logistic_loss(self.x, &self.target, self.alpha, w); Ok(cost) } +} + +impl<'a, F: Float, A: Data> Gradient for LogisticRegressionProblem2<'a, F, A> { + type Param = ArgminParam; + type Gradient = ArgminParam; /// Compute the gradient at parameter `p`. fn gradient(&self, p: &Self::Param) -> std::result::Result { @@ -770,15 +820,19 @@ impl<'a, F: Float, A: Data> ArgminOp for LogisticRegressionProblem2<'a } } -trait SolvableProblem: ArgminOp + Sized { - type Solver: Solver; +trait SolvableProblem: Gradient + Sized { + type Solver: Solver>; } -impl<'a, F: Float, A: Data> SolvableProblem for LogisticRegressionProblem1<'a, F, A> { +impl<'a, F: Float, A: Data> SolvableProblem + for LogisticRegressionProblem1<'a, F, A> +{ type Solver = LBFGSType1; } -impl<'a, F: Float, A: Data> SolvableProblem for LogisticRegressionProblem2<'a, F, A> { +impl<'a, F: Float, A: Data> SolvableProblem + for LogisticRegressionProblem2<'a, F, A> +{ type Solver = LBFGSType2; } @@ -1092,13 +1146,16 @@ mod test { ); // Test serialization - let ser = rmp_serde::to_vec(&res).unwrap(); - let unser: FittedLogisticRegression = rmp_serde::from_slice(&ser).unwrap(); + #[cfg(feature = "serde")] + { + let ser = rmp_serde::to_vec(&res).unwrap(); + let unser: FittedLogisticRegression = rmp_serde::from_slice(&ser).unwrap(); - let x = array![[1.0]]; - let y_hat = unser.predict(&x); + let x = array![[1.0]]; + let y_hat = unser.predict(&x); - assert!(y_hat[0] == 0.0); + assert!(y_hat[0] == 0.0); + } } #[test] From ff5d836c2efdc9b698a90509e2a79a4aff2f9aad Mon Sep 17 00:00:00 2001 From: Yuhan Lin Date: Sun, 15 Oct 2023 21:52:37 +0000 Subject: [PATCH 2/5] Build fixes (#319) * Bump MSRV to 1.67 * Fix clippy errors * Remove exact thiserror versioning for linfa-hierarchical --- .github/workflows/checking.yml | 2 +- .github/workflows/codequality.yml | 2 +- .github/workflows/testing.yml | 4 ++-- algorithms/linfa-hierarchical/Cargo.toml | 2 +- src/metrics_classification.rs | 6 +++--- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/checking.yml b/.github/workflows/checking.yml index 733ce7459..9537f82ec 100644 --- a/.github/workflows/checking.yml +++ b/.github/workflows/checking.yml @@ -10,7 +10,7 @@ jobs: fail-fast: false matrix: toolchain: - - 1.65.0 + - 1.67.0 - stable - nightly os: diff --git a/.github/workflows/codequality.yml b/.github/workflows/codequality.yml index c32dafd07..f17f42e1f 100644 --- a/.github/workflows/codequality.yml +++ b/.github/workflows/codequality.yml @@ -10,7 +10,7 @@ jobs: strategy: matrix: toolchain: - - 1.65.0 + - 1.67.0 - stable steps: diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index a0ada73f4..9f3a8c091 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -10,7 +10,7 @@ jobs: fail-fast: false matrix: toolchain: - - 1.65.0 + - 1.67.0 - stable os: - ubuntu-latest @@ -35,7 +35,7 @@ jobs: fail-fast: false matrix: toolchain: - - 1.65.0 + - 1.67.0 - stable os: - ubuntu-latest diff --git a/algorithms/linfa-hierarchical/Cargo.toml b/algorithms/linfa-hierarchical/Cargo.toml index 9f5f74fa6..0397886df 100644 --- a/algorithms/linfa-hierarchical/Cargo.toml +++ b/algorithms/linfa-hierarchical/Cargo.toml @@ -16,7 +16,7 @@ categories = ["algorithms", "mathematics", "science"] [dependencies] ndarray = { version = "0.15" } kodama = "0.2" -thiserror = "=1.0.25" +thiserror = "1.0.25" linfa = { version = "0.6.1", path = "../.." } linfa-kernel = { version = "0.6.1", path = "../linfa-kernel" } diff --git a/src/metrics_classification.rs b/src/metrics_classification.rs index 16ea62fa9..adca89af4 100644 --- a/src/metrics_classification.rs +++ b/src/metrics_classification.rs @@ -618,19 +618,19 @@ mod tests { assert_split_eq( &x, - |cm| ConfusionMatrix::precision(cm), + ConfusionMatrix::precision, &array![1.0, 3. / 4.], &labels, ); assert_split_eq( &x, - |cm| ConfusionMatrix::recall(cm), + ConfusionMatrix::recall, &array![2.0 / 3.0, 1.0], &labels, ); assert_split_eq( &x, - |cm| ConfusionMatrix::f1_score(cm), + ConfusionMatrix::f1_score, &array![4.0 / 5.0, 6.0 / 7.0], &labels, ); From f9f7dd4c5d7942da4cd52d105a1fed1163fa3379 Mon Sep 17 00:00:00 2001 From: Andrea Frigido Date: Sun, 15 Oct 2023 23:45:46 +0100 Subject: [PATCH 3/5] Update license field following SPDX 2.1 license expression standard (#310) --- Cargo.toml | 2 +- algorithms/linfa-bayes/Cargo.toml | 2 +- algorithms/linfa-clustering/Cargo.toml | 2 +- algorithms/linfa-elasticnet/Cargo.toml | 2 +- algorithms/linfa-ftrl/Cargo.toml | 2 +- algorithms/linfa-hierarchical/Cargo.toml | 2 +- algorithms/linfa-ica/Cargo.toml | 2 +- algorithms/linfa-kernel/Cargo.toml | 2 +- algorithms/linfa-linear/Cargo.toml | 2 +- algorithms/linfa-logistic/Cargo.toml | 2 +- algorithms/linfa-nn/Cargo.toml | 2 +- algorithms/linfa-pls/Cargo.toml | 2 +- algorithms/linfa-preprocessing/Cargo.toml | 2 +- algorithms/linfa-reduction/Cargo.toml | 2 +- algorithms/linfa-svm/Cargo.toml | 2 +- algorithms/linfa-trees/Cargo.toml | 2 +- algorithms/linfa-tsne/Cargo.toml | 2 +- datasets/Cargo.toml | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c5f1980ce..55786d1f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ authors = [ ] description = "A Machine Learning framework for Rust" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-bayes/Cargo.toml b/algorithms/linfa-bayes/Cargo.toml index b2b4b5f9b..1f61165b7 100644 --- a/algorithms/linfa-bayes/Cargo.toml +++ b/algorithms/linfa-bayes/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" authors = ["VasanthakumarV "] description = "Collection of Naive Bayes Algorithms" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" keywords = ["factorization", "machine-learning", "linfa", "unsupervised"] diff --git a/algorithms/linfa-clustering/Cargo.toml b/algorithms/linfa-clustering/Cargo.toml index 7861bb80b..616569567 100644 --- a/algorithms/linfa-clustering/Cargo.toml +++ b/algorithms/linfa-clustering/Cargo.toml @@ -8,7 +8,7 @@ authors = [ "Rémi Lafage " ] description = "A collection of clustering algorithms" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa/" readme = "README.md" diff --git a/algorithms/linfa-elasticnet/Cargo.toml b/algorithms/linfa-elasticnet/Cargo.toml index d89b09f28..feb4e95b5 100644 --- a/algorithms/linfa-elasticnet/Cargo.toml +++ b/algorithms/linfa-elasticnet/Cargo.toml @@ -8,7 +8,7 @@ authors = [ description = "A Machine Learning framework for Rust" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-ftrl/Cargo.toml b/algorithms/linfa-ftrl/Cargo.toml index d6b106fa2..7382136a8 100644 --- a/algorithms/linfa-ftrl/Cargo.toml +++ b/algorithms/linfa-ftrl/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Liudmyla Kyrashchuk "] description = "A Machine Learning framework for Rust" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-hierarchical/Cargo.toml b/algorithms/linfa-hierarchical/Cargo.toml index 0397886df..c92bdfd14 100644 --- a/algorithms/linfa-hierarchical/Cargo.toml +++ b/algorithms/linfa-hierarchical/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Lorenz Schmidt "] edition = "2018" description = "Agglomerative Hierarchical clustering" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-ica/Cargo.toml b/algorithms/linfa-ica/Cargo.toml index 4b4212d47..2d11a5e37 100644 --- a/algorithms/linfa-ica/Cargo.toml +++ b/algorithms/linfa-ica/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" authors = ["VasanthakumarV "] description = "A collection of Independent Component Analysis (ICA) algorithms" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-kernel/Cargo.toml b/algorithms/linfa-kernel/Cargo.toml index 30626125f..214f8f9c4 100644 --- a/algorithms/linfa-kernel/Cargo.toml +++ b/algorithms/linfa-kernel/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" authors = ["Lorenz Schmidt "] description = "Kernel methods for non-linear algorithms" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-linear/Cargo.toml b/algorithms/linfa-linear/Cargo.toml index b60f94c95..cba623bf5 100644 --- a/algorithms/linfa-linear/Cargo.toml +++ b/algorithms/linfa-linear/Cargo.toml @@ -8,7 +8,7 @@ authors = [ description = "A Machine Learning framework for Rust" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-logistic/Cargo.toml b/algorithms/linfa-logistic/Cargo.toml index 3d4840ed5..d78bcf284 100644 --- a/algorithms/linfa-logistic/Cargo.toml +++ b/algorithms/linfa-logistic/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Körbitz / Google "] description = "A Machine Learning framework for Rust" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-nn/Cargo.toml b/algorithms/linfa-nn/Cargo.toml index a7bc71c4b..fc4ad3d86 100644 --- a/algorithms/linfa-nn/Cargo.toml +++ b/algorithms/linfa-nn/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" authors = ["YuhanLiin "] edition = "2018" description = "A collection of nearest neighbour algorithms" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa/" readme = "README.md" diff --git a/algorithms/linfa-pls/Cargo.toml b/algorithms/linfa-pls/Cargo.toml index 2ddf4d239..a7688ca63 100644 --- a/algorithms/linfa-pls/Cargo.toml +++ b/algorithms/linfa-pls/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" edition = "2018" authors = ["relf "] description = "Partial Least Squares family methods" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-preprocessing/Cargo.toml b/algorithms/linfa-preprocessing/Cargo.toml index 4f4bdde2f..36ced2d33 100644 --- a/algorithms/linfa-preprocessing/Cargo.toml +++ b/algorithms/linfa-preprocessing/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sauro98 "] description = "A Machine Learning framework for Rust" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-reduction/Cargo.toml b/algorithms/linfa-reduction/Cargo.toml index 62dde4258..ad5c8e5f6 100644 --- a/algorithms/linfa-reduction/Cargo.toml +++ b/algorithms/linfa-reduction/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" authors = ["Lorenz Schmidt "] description = "A collection of dimensionality reduction techniques" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-svm/Cargo.toml b/algorithms/linfa-svm/Cargo.toml index 922e57521..908aa2a31 100644 --- a/algorithms/linfa-svm/Cargo.toml +++ b/algorithms/linfa-svm/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" edition = "2018" authors = ["Lorenz Schmidt "] description = "Support Vector Machines" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-trees/Cargo.toml b/algorithms/linfa-trees/Cargo.toml index 0954a0ad5..011b8e485 100644 --- a/algorithms/linfa-trees/Cargo.toml +++ b/algorithms/linfa-trees/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" edition = "2018" authors = ["Moss Ebeling "] description = "A collection of tree-based algorithms" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/algorithms/linfa-tsne/Cargo.toml b/algorithms/linfa-tsne/Cargo.toml index cfd9bcb55..2e30dcbe0 100644 --- a/algorithms/linfa-tsne/Cargo.toml +++ b/algorithms/linfa-tsne/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Lorenz Schmidt "] edition = "2018" description = "Barnes-Hut t-distributed stochastic neighbor embedding" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" readme = "README.md" diff --git a/datasets/Cargo.toml b/datasets/Cargo.toml index af1939330..aa78e9edc 100644 --- a/datasets/Cargo.toml +++ b/datasets/Cargo.toml @@ -4,7 +4,7 @@ version = "0.6.1" authors = ["Lorenz Schmidt "] description = "Collection of small datasets for Linfa" edition = "2018" -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" [dependencies] From 34d1c84238fd0aa3e763e687c3603447414afd1a Mon Sep 17 00:00:00 2001 From: Yuhan Lin Date: Mon, 16 Oct 2023 04:34:10 +0000 Subject: [PATCH 4/5] Release version 0.7.0 (#320) * Update CHANGELOG * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * (cargo-release) version 0.7.0 * Add news entry * Fix docs * Add date to changelog --- CHANGELOG.md | 15 +++++++++++++++ Cargo.toml | 2 +- algorithms/linfa-bayes/Cargo.toml | 6 +++--- algorithms/linfa-clustering/Cargo.toml | 10 +++++----- algorithms/linfa-elasticnet/Cargo.toml | 6 +++--- algorithms/linfa-ftrl/Cargo.toml | 8 ++++---- algorithms/linfa-hierarchical/Cargo.toml | 8 ++++---- algorithms/linfa-ica/Cargo.toml | 6 +++--- algorithms/linfa-kernel/Cargo.toml | 6 +++--- algorithms/linfa-linear/Cargo.toml | 8 ++++---- algorithms/linfa-logistic/Cargo.toml | 6 +++--- algorithms/linfa-nn/Cargo.toml | 6 +++--- algorithms/linfa-pls/Cargo.toml | 8 ++++---- algorithms/linfa-preprocessing/Cargo.toml | 8 ++++---- .../linfa-preprocessing/src/linear_scaling.rs | 4 ++-- .../src/tf_idf_vectorization.rs | 10 +++++----- .../linfa-preprocessing/src/whitening.rs | 4 ++-- algorithms/linfa-reduction/Cargo.toml | 8 ++++---- algorithms/linfa-svm/Cargo.toml | 8 ++++---- algorithms/linfa-trees/Cargo.toml | 8 ++++---- algorithms/linfa-tsne/Cargo.toml | 8 ++++---- datasets/Cargo.toml | 4 ++-- docs/website/content/news/release_061.md | 4 ++-- docs/website/content/news/release_070.md | 18 ++++++++++++++++++ 24 files changed, 106 insertions(+), 73 deletions(-) create mode 100644 docs/website/content/news/release_070.md diff --git a/CHANGELOG.md b/CHANGELOG.md index c240bd29e..13a957773 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +Version 0.7.0 - 2023-10-15 +======================== +Changes +----------- + * add `array_from_gz_csv` and `array_from_csv` in `linfa-datasets` + * make Serde support in `linfa-linear`, `linfa-logistic`, and `linfa-ftrl` optional + * bump `argmin` to 0.8.1 + * add Serde support to `linfa-preprocessing` and `linfa-bayes` + * make licenses follow SPDX 2.1 license expression standard + +Removals +----------- + * Removed Approximate DBSCAN from `linfa-clustering` due to performance issues. It's now an alias to regular DBSCAN. + * Removed `partitions` dependency, which breaks in current versions of Rust. + Version 0.6.1 - 2022-12-03 ======================== New Algorithms diff --git a/Cargo.toml b/Cargo.toml index 55786d1f8..7d5bcebde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa" -version = "0.6.1" +version = "0.7.0" authors = [ "Luca Palmieri ", "Lorenz Schmidt ", diff --git a/algorithms/linfa-bayes/Cargo.toml b/algorithms/linfa-bayes/Cargo.toml index 1f61165b7..cd4fa60ca 100644 --- a/algorithms/linfa-bayes/Cargo.toml +++ b/algorithms/linfa-bayes/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-bayes" -version = "0.6.1" +version = "0.7.0" authors = ["VasanthakumarV "] description = "Collection of Naive Bayes Algorithms" edition = "2018" @@ -25,8 +25,8 @@ ndarray = { version = "0.15" , features = ["approx"]} ndarray-stats = "0.5" thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] approx = "0.4" -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality"] } diff --git a/algorithms/linfa-clustering/Cargo.toml b/algorithms/linfa-clustering/Cargo.toml index 616569567..bd55815c6 100644 --- a/algorithms/linfa-clustering/Cargo.toml +++ b/algorithms/linfa-clustering/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-clustering" -version = "0.6.1" +version = "0.7.0" edition = "2018" authors = [ "Luca Palmieri ", @@ -38,18 +38,18 @@ rand_xoshiro = "0.6" space = "0.12" thiserror = "1.0" #partitions = "0.2.4" This one will break in a future version of Rust and has no replacement -linfa = { version = "0.6.1", path = "../.." } -linfa-nn = { version = "0.6.1", path = "../linfa-nn" } +linfa = { version = "0.7.0", path = "../.." } +linfa-nn = { version = "0.7.0", path = "../linfa-nn" } noisy_float = "0.2.0" [dev-dependencies] ndarray-npy = { version = "0.8", default-features = false } -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["generate"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["generate"] } criterion = "0.4.0" serde_json = "1" approx = "0.4" lax = "0.15.0" -linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } [[bench]] name = "k_means" diff --git a/algorithms/linfa-elasticnet/Cargo.toml b/algorithms/linfa-elasticnet/Cargo.toml index feb4e95b5..948fa1d75 100644 --- a/algorithms/linfa-elasticnet/Cargo.toml +++ b/algorithms/linfa-elasticnet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-elasticnet" -version = "0.6.1" +version = "0.7.0" authors = [ "Paul Körbitz / Google ", "Lorenz Schmidt " @@ -37,9 +37,9 @@ num-traits = "0.2" approx = "0.4" thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["diabetes", "linnerud"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["diabetes", "linnerud"] } ndarray-rand = "0.14" rand_xoshiro = "0.6" diff --git a/algorithms/linfa-ftrl/Cargo.toml b/algorithms/linfa-ftrl/Cargo.toml index 7382136a8..3bc979e8d 100644 --- a/algorithms/linfa-ftrl/Cargo.toml +++ b/algorithms/linfa-ftrl/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-ftrl" -version = "0.6.1" +version = "0.7.0" authors = ["Liudmyla Kyrashchuk "] description = "A Machine Learning framework for Rust" @@ -31,13 +31,13 @@ thiserror = "1.0" rand = "0.8.5" rand_xoshiro = "0.6.0" -linfa = { version = "0.6.1", path = "../.."} +linfa = { version = "0.7.0", path = "../.."} [dev-dependencies] criterion = "0.4.0" approx = "0.4" -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality"] } -linfa = { version = "0.6.1", path = "../..", features = ["benchmarks"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } [[bench]] name = "ftrl" diff --git a/algorithms/linfa-hierarchical/Cargo.toml b/algorithms/linfa-hierarchical/Cargo.toml index c92bdfd14..fdca29c73 100644 --- a/algorithms/linfa-hierarchical/Cargo.toml +++ b/algorithms/linfa-hierarchical/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-hierarchical" -version = "0.6.1" +version = "0.7.0" authors = ["Lorenz Schmidt "] edition = "2018" @@ -18,10 +18,10 @@ ndarray = { version = "0.15" } kodama = "0.2" thiserror = "1.0.25" -linfa = { version = "0.6.1", path = "../.." } -linfa-kernel = { version = "0.6.1", path = "../linfa-kernel" } +linfa = { version = "0.7.0", path = "../.." } +linfa-kernel = { version = "0.7.0", path = "../linfa-kernel" } [dev-dependencies] rand = "0.8" ndarray-rand = "0.14" -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["iris"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["iris"] } diff --git a/algorithms/linfa-ica/Cargo.toml b/algorithms/linfa-ica/Cargo.toml index 2d11a5e37..64d0ccc81 100644 --- a/algorithms/linfa-ica/Cargo.toml +++ b/algorithms/linfa-ica/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-ica" -version = "0.6.1" +version = "0.7.0" authors = ["VasanthakumarV "] description = "A collection of Independent Component Analysis (ICA) algorithms" edition = "2018" @@ -34,13 +34,13 @@ num-traits = "0.2" rand_xoshiro = "0.6" thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] ndarray-npy = { version = "0.8", default-features = false } paste = "1.0" criterion = "0.4.0" -linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } [[bench]] name = "fast_ica" diff --git a/algorithms/linfa-kernel/Cargo.toml b/algorithms/linfa-kernel/Cargo.toml index 214f8f9c4..4646cea27 100644 --- a/algorithms/linfa-kernel/Cargo.toml +++ b/algorithms/linfa-kernel/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-kernel" -version = "0.6.1" +version = "0.7.0" authors = ["Lorenz Schmidt "] description = "Kernel methods for non-linear algorithms" edition = "2018" @@ -28,5 +28,5 @@ ndarray = "0.15" num-traits = "0.2" sprs = { version="0.11", default-features = false } -linfa = { version = "0.6.1", path = "../.." } -linfa-nn = { version = "0.6.1", path = "../linfa-nn" } +linfa = { version = "0.7.0", path = "../.." } +linfa-nn = { version = "0.7.0", path = "../linfa-nn" } diff --git a/algorithms/linfa-linear/Cargo.toml b/algorithms/linfa-linear/Cargo.toml index cba623bf5..11fd98a8f 100644 --- a/algorithms/linfa-linear/Cargo.toml +++ b/algorithms/linfa-linear/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-linear" -version = "0.6.1" +version = "0.7.0" authors = [ "Paul Körbitz / Google ", "VasanthakumarV " @@ -36,14 +36,14 @@ argmin = { version = "0.8.1", default-features = false } argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] } thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["diabetes"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["diabetes"] } approx = "0.4" criterion = "0.4.0" statrs = "0.16.0" -linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } [[bench]] name = "ols_bench" diff --git a/algorithms/linfa-logistic/Cargo.toml b/algorithms/linfa-logistic/Cargo.toml index d78bcf284..54838ebb8 100644 --- a/algorithms/linfa-logistic/Cargo.toml +++ b/algorithms/linfa-logistic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-logistic" -version = "0.6.1" +version = "0.7.0" authors = ["Paul Körbitz / Google "] description = "A Machine Learning framework for Rust" @@ -30,9 +30,9 @@ argmin-math = { version = "0.3", features = ["ndarray_v0_15-nolinalg"] } thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] approx = "0.4" -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality"] } rmp-serde = "1" diff --git a/algorithms/linfa-nn/Cargo.toml b/algorithms/linfa-nn/Cargo.toml index fc4ad3d86..f1400c042 100644 --- a/algorithms/linfa-nn/Cargo.toml +++ b/algorithms/linfa-nn/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-nn" -version = "0.6.1" +version = "0.7.0" authors = ["YuhanLiin "] edition = "2018" description = "A collection of nearest neighbour algorithms" @@ -33,14 +33,14 @@ thiserror = "1.0" kdtree = "0.6.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] approx = "0.4" criterion = "0.4.0" rand_xoshiro = "0.6" ndarray-rand = "0.14" -linfa = { version = "0.6.0", path = "../..", features = ["benchmarks"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } [[bench]] name = "nn" diff --git a/algorithms/linfa-pls/Cargo.toml b/algorithms/linfa-pls/Cargo.toml index a7688ca63..4cf09001c 100644 --- a/algorithms/linfa-pls/Cargo.toml +++ b/algorithms/linfa-pls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-pls" -version = "0.6.1" +version = "0.7.0" edition = "2018" authors = ["relf "] description = "Partial Least Squares family methods" @@ -33,11 +33,11 @@ ndarray-rand = "0.14" num-traits = "0.2" paste = "1.0" thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] -linfa = { version = "0.6.1", path = "../..", features = ["benchmarks"] } -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["linnerud"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["linnerud"] } approx = "0.4" rand_xoshiro = "0.6" criterion = "0.4.0" diff --git a/algorithms/linfa-preprocessing/Cargo.toml b/algorithms/linfa-preprocessing/Cargo.toml index 36ced2d33..acc31d5af 100644 --- a/algorithms/linfa-preprocessing/Cargo.toml +++ b/algorithms/linfa-preprocessing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-preprocessing" -version = "0.6.1" +version = "0.7.0" authors = ["Sauro98 "] description = "A Machine Learning framework for Rust" @@ -18,7 +18,7 @@ blas = ["ndarray-linalg", "linfa/ndarray-linalg"] serde = ["serde_crate", "ndarray/serde", "serde_regex"] [dependencies] -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } ndarray = { version = "0.15", features = ["approx"] } ndarray-linalg = { version = "0.15", optional = true } linfa-linalg = { version = "0.1", default-features = false } @@ -41,8 +41,8 @@ default-features = false features = ["std", "derive"] [dev-dependencies] -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["diabetes", "winequality"] } -linfa-bayes = { version = "0.6.1", path = "../linfa-bayes" } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["diabetes", "winequality"] } +linfa-bayes = { version = "0.7.0", path = "../linfa-bayes" } iai = "0.1" curl = "0.4.35" flate2 = "1.0.20" diff --git a/algorithms/linfa-preprocessing/src/linear_scaling.rs b/algorithms/linfa-preprocessing/src/linear_scaling.rs index 7a930cd6d..03f094748 100644 --- a/algorithms/linfa-preprocessing/src/linear_scaling.rs +++ b/algorithms/linfa-preprocessing/src/linear_scaling.rs @@ -19,7 +19,7 @@ use serde_crate::{Deserialize, Serialize}; serde(crate = "serde_crate") )] #[derive(Clone, Debug, PartialEq, Eq)] -/// Possible scaling methods for [LinearScaler](LinearScaler) +/// Possible scaling methods for [LinearScaler] /// /// * Standard (with mean, with std): subtracts the mean to each feature and scales it by the inverse of its standard deviation /// * MinMax (min, max): scales each feature to fit in the range `min..=max`, default values are @@ -271,7 +271,7 @@ impl LinearScaler { &self.scales } - /// Returns the method used for fitting. Useful for printing, since [ScalingMethod](ScalingMethod) implements `Display` + /// Returns the method used for fitting. Useful for printing, since [ScalingMethod] implements `Display` pub fn method(&self) -> &ScalingMethod { &self.method } diff --git a/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs b/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs index b13e92659..3e3bf464f 100644 --- a/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs +++ b/algorithms/linfa-preprocessing/src/tf_idf_vectorization.rs @@ -40,12 +40,12 @@ impl TfIdfMethod { } } -/// Simlar to [`CountVectorizer`](CountVectorizer) but instead of +/// Simlar to [`CountVectorizer`] but instead of /// just counting the term frequency of each vocabulary entry in each given document, /// it computes the term frequecy times the inverse document frequency, thus giving more importance /// to entries that appear many times but only on some documents. The weight function can be adjusted /// by setting the appropriate [method](TfIdfMethod). This struct provides the same string -/// processing customizations described in [`CountVectorizer`](CountVectorizer). +/// processing customizations described in [`CountVectorizer`]. #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), @@ -123,7 +123,7 @@ impl TfIdfVectorizer { } /// Learns a vocabulary from the texts in `x`, according to the specified attributes and maps each - /// vocabulary entry to an integer value, producing a [FittedTfIdfVectorizer](FittedTfIdfVectorizer). + /// vocabulary entry to an integer value, producing a [FittedTfIdfVectorizer]. /// /// Returns an error if: /// * one of the `n_gram` boundaries is set to zero or the minimum value is greater than the maximum value @@ -140,8 +140,8 @@ impl TfIdfVectorizer { }) } - /// Produces a [FittedTfIdfVectorizer](FittedTfIdfVectorizer) with the input vocabulary. - /// All struct attributes are ignored in the fitting but will be used by the [FittedTfIdfVectorizer](FittedTfIdfVectorizer) + /// Produces a [FittedTfIdfVectorizer] with the input vocabulary. + /// All struct attributes are ignored in the fitting but will be used by the [FittedTfIdfVectorizer] /// to transform any text to be examined. As such this will return an error in the same cases as the `fit` method. pub fn fit_vocabulary(&self, words: &[T]) -> Result { let fitted_vectorizer = self.count_vectorizer.fit_vocabulary(words)?; diff --git a/algorithms/linfa-preprocessing/src/whitening.rs b/algorithms/linfa-preprocessing/src/whitening.rs index 721796011..0af437701 100644 --- a/algorithms/linfa-preprocessing/src/whitening.rs +++ b/algorithms/linfa-preprocessing/src/whitening.rs @@ -39,7 +39,7 @@ pub enum WhiteningMethod { } /// Struct that can be fitted to the input data to obtain the related whitening matrix. -/// Fitting returns a [FittedWhitener](FittedWhitener) struct that can be used to +/// Fitting returns a [FittedWhitener] struct that can be used to /// apply the whitening transformation to the input data. #[cfg_attr( feature = "serde", @@ -157,7 +157,7 @@ impl, T: AsTargets> Fit, T, Prepro /// Struct that can be used to whiten data. Data will be scaled according to the whitening matrix learned /// during fitting. -/// Obtained by fitting a [Whitener](Whitener). +/// Obtained by fitting a [Whitener]. /// /// Transforming the data used during fitting will yield a scaled data matrix with /// unit diagonal covariance matrix. diff --git a/algorithms/linfa-reduction/Cargo.toml b/algorithms/linfa-reduction/Cargo.toml index ad5c8e5f6..de78f1f83 100644 --- a/algorithms/linfa-reduction/Cargo.toml +++ b/algorithms/linfa-reduction/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-reduction" -version = "0.6.1" +version = "0.7.0" authors = ["Lorenz Schmidt "] description = "A collection of dimensionality reduction techniques" edition = "2018" @@ -33,10 +33,10 @@ num-traits = "0.2" thiserror = "1.0" rand = { version = "0.8", features = ["small_rng"] } -linfa = { version = "0.6.1", path = "../.." } -linfa-kernel = { version = "0.6.1", path = "../linfa-kernel" } +linfa = { version = "0.7.0", path = "../.." } +linfa-kernel = { version = "0.7.0", path = "../linfa-kernel" } [dev-dependencies] ndarray-npy = { version = "0.8", default-features = false } -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["iris", "generate"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["iris", "generate"] } approx = { version = "0.4" } diff --git a/algorithms/linfa-svm/Cargo.toml b/algorithms/linfa-svm/Cargo.toml index 908aa2a31..e2ca1f4f0 100644 --- a/algorithms/linfa-svm/Cargo.toml +++ b/algorithms/linfa-svm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-svm" -version = "0.6.1" +version = "0.7.0" edition = "2018" authors = ["Lorenz Schmidt "] description = "Support Vector Machines" @@ -29,10 +29,10 @@ ndarray-rand = "0.14" num-traits = "0.2" thiserror = "1.0" -linfa = { version = "0.6.1", path = "../.." } -linfa-kernel = { version = "0.6.1", path = "../linfa-kernel" } +linfa = { version = "0.7.0", path = "../.." } +linfa-kernel = { version = "0.7.0", path = "../linfa-kernel" } [dev-dependencies] -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["winequality", "diabetes"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["winequality", "diabetes"] } rand_xoshiro = "0.6" approx = "0.4" diff --git a/algorithms/linfa-trees/Cargo.toml b/algorithms/linfa-trees/Cargo.toml index 011b8e485..a627b6fec 100644 --- a/algorithms/linfa-trees/Cargo.toml +++ b/algorithms/linfa-trees/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-trees" -version = "0.6.1" +version = "0.7.0" edition = "2018" authors = ["Moss Ebeling "] description = "A collection of tree-based algorithms" @@ -27,14 +27,14 @@ features = ["std", "derive"] ndarray = { version = "0.15" , features = ["rayon", "approx"]} ndarray-rand = "0.14" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] rand = { version = "0.8", features = ["small_rng"] } criterion = "0.4.0" approx = "0.4" -linfa-datasets = { version = "0.6.1", path = "../../datasets/", features = ["iris"] } -linfa = { version = "0.6.1", path = "../..", features = ["benchmarks"] } +linfa-datasets = { version = "0.7.0", path = "../../datasets/", features = ["iris"] } +linfa = { version = "0.7.0", path = "../..", features = ["benchmarks"] } [[bench]] name = "decision_tree" diff --git a/algorithms/linfa-tsne/Cargo.toml b/algorithms/linfa-tsne/Cargo.toml index 2e30dcbe0..a86dae4af 100644 --- a/algorithms/linfa-tsne/Cargo.toml +++ b/algorithms/linfa-tsne/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-tsne" -version = "0.6.1" +version = "0.7.0" authors = ["Lorenz Schmidt "] edition = "2018" @@ -20,14 +20,14 @@ ndarray-rand = "0.14" bhtsne = "0.4.0" pdqselect = "=0.1.0" -linfa = { version = "0.6.1", path = "../.." } +linfa = { version = "0.7.0", path = "../.." } [dev-dependencies] rand = "0.8" approx = "0.4" -linfa-datasets = { version = "0.6.1", path = "../../datasets", features = ["iris"] } -linfa-reduction = { version = "0.6.1", path = "../linfa-reduction" } +linfa-datasets = { version = "0.7.0", path = "../../datasets", features = ["iris"] } +linfa-reduction = { version = "0.7.0", path = "../linfa-reduction" } [target.'cfg(not(target_family = "windows"))'.dev-dependencies] mnist = { version = "0.5", features = ["download"] } diff --git a/datasets/Cargo.toml b/datasets/Cargo.toml index aa78e9edc..f1364a573 100644 --- a/datasets/Cargo.toml +++ b/datasets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linfa-datasets" -version = "0.6.1" +version = "0.7.0" authors = ["Lorenz Schmidt "] description = "Collection of small datasets for Linfa" edition = "2018" @@ -8,7 +8,7 @@ license = "MIT OR Apache-2.0" repository = "https://github.com/rust-ml/linfa" [dependencies] -linfa = { version = "0.6.1", path = ".." } +linfa = { version = "0.7.0", path = ".." } ndarray = { version = "0.15" } ndarray-csv = "=0.5.1" csv = "1.1" diff --git a/docs/website/content/news/release_061.md b/docs/website/content/news/release_061.md index 2ebb8c7f1..988dc89e4 100644 --- a/docs/website/content/news/release_061.md +++ b/docs/website/content/news/release_061.md @@ -1,6 +1,6 @@ +++ -title = "Release 0.6.0" -date = "2022-06-15" +title = "Release 0.6.1" +date = "2022-12-03" +++ Linfa's 0.6.1 release mainly consists of fixes to existing algorithms and the overall crate. The Isotonic Regression algorithm has also been added to `linfa-linear`. diff --git a/docs/website/content/news/release_070.md b/docs/website/content/news/release_070.md new file mode 100644 index 000000000..584f2f565 --- /dev/null +++ b/docs/website/content/news/release_070.md @@ -0,0 +1,18 @@ ++++ +title = "Release 0.7.0" +date = "2023-10-15" ++++ + +Linfa's 0.7.0 release mainly consists of improvements to Serde support. It also removes Approximate DBSCAN from `linfa-clustering` due to subpar performance and outdated dependencies. + +## Improvements and fixes + + * Add `array_from_gz_csv` and `array_from_csv` in `linfa-datasets`. + * Make Serde support in `linfa-linear`, `linfa-logistic`, and `linfa-ftrl` optional. + * Add Serde support to `linfa-preprocessing` and `linfa-bayes`. + * Bump `argmin` to 0.8.1. + * Make licenses follow SPDX 2.1 license expression standard. + +## Removals + +Approximate DBSCAN is an alternative implementation of the DBSCAN algorithm that trades precision for speed. However, the implementation in `linfa-clustering` is actually slower than the regular DBSCAN implementation. It also depends on the `partitions` crate, which is incompatible with current versions of Rust. Thus, we have decided to remove Approximate DBSCAN from Linfa. The Approximate DBSCAN types and APIs are now aliases to regular DBSCAN. From 39b3eeeb7ae912dafdfb433cccb424cd3475491b Mon Sep 17 00:00:00 2001 From: YuhanLiin Date: Mon, 16 Oct 2023 01:40:30 -0400 Subject: [PATCH 5/5] Fix docs --- algorithms/linfa-nn/src/balltree.rs | 4 ++-- algorithms/linfa-nn/src/kdtree.rs | 4 ++-- algorithms/linfa-nn/src/lib.rs | 4 ++-- algorithms/linfa-nn/src/linear.rs | 4 ++-- algorithms/linfa-trees/src/decision_trees/algorithm.rs | 2 +- src/metrics_regression.rs | 4 ++-- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/algorithms/linfa-nn/src/balltree.rs b/algorithms/linfa-nn/src/balltree.rs index e29045609..8954b6991 100644 --- a/algorithms/linfa-nn/src/balltree.rs +++ b/algorithms/linfa-nn/src/balltree.rs @@ -158,7 +158,7 @@ impl<'a, F: Float> BallTreeInner<'a, F> { } } -/// Spatial indexing structure created by [`BallTree`](BallTree) +/// Spatial indexing structure created by [`BallTree`] #[derive(Debug, Clone, PartialEq)] pub struct BallTreeIndex<'a, F: Float, D: Distance> { tree: BallTreeInner<'a, F>, @@ -282,7 +282,7 @@ impl<'a, F: Float, D: Distance> NearestNeighbourIndex for BallTreeIndex<'a /// Implementation of ball tree, a space partitioning data structure that partitions its points /// into nested hyperspheres called "balls". It performs spatial queries in `O(k * logN)` time, /// where `k` is the number of points returned by the query. Calling `from_batch` returns a -/// [`BallTreeIndex`](BallTreeIndex). +/// [`BallTreeIndex`]. /// /// More details can be found [here](https://en.wikipedia.org/wiki/Ball_tree). This implementation /// is based off of the [ball_tree](https://docs.rs/ball-tree/0.2.0/ball_tree/) crate. diff --git a/algorithms/linfa-nn/src/kdtree.rs b/algorithms/linfa-nn/src/kdtree.rs index d6ec643bb..b04135488 100644 --- a/algorithms/linfa-nn/src/kdtree.rs +++ b/algorithms/linfa-nn/src/kdtree.rs @@ -8,7 +8,7 @@ use crate::{ NnError, Point, }; -/// Spatial indexing structure created by [`KdTree`](KdTree) +/// Spatial indexing structure created by [`KdTree`] #[derive(Debug)] pub struct KdTreeIndex<'a, F: Float, D: Distance>( kdtree::KdTree, usize), &'a [F]>, @@ -90,7 +90,7 @@ impl<'a, F: Float, D: Distance> NearestNeighbourIndex for KdTreeIndex<'a, /// Implementation of K-D tree, a fast space-partitioning data structure. For each parent node, /// the indexed points are split with a hyperplane into two child nodes. Due to its tree-like /// structure, the K-D tree performs spatial queries in `O(k * logN)` time, where `k` is the number -/// of points returned by the query. Calling `from_batch` returns a [`KdTree`](KdTree). +/// of points returned by the query. Calling `from_batch` returns a [`KdTree`]. /// /// More details can be found [here](https://en.wikipedia.org/wiki/K-d_tree). /// diff --git a/algorithms/linfa-nn/src/lib.rs b/algorithms/linfa-nn/src/lib.rs index 599b60018..87a2d0c1a 100644 --- a/algorithms/linfa-nn/src/lib.rs +++ b/algorithms/linfa-nn/src/lib.rs @@ -58,7 +58,7 @@ pub enum NnError { /// Nearest neighbour algorithm builds a spatial index structure out of a batch of points. The /// distance between points is calculated using a provided distance function. The index implements -/// the [`NearestNeighbourIndex`](NearestNeighbourIndex) trait and allows for efficient +/// the [`NearestNeighbourIndex`] trait and allows for efficient /// computing of nearest neighbour and range queries. pub trait NearestNeighbour: std::fmt::Debug + Send + Sync + Unpin { /// Builds a spatial index using a MxN two-dimensional array representing M points with N @@ -115,7 +115,7 @@ pub trait NearestNeighbourIndex: Send + Sync + Unpin { ) -> Result, usize)>, NnError>; } -/// Enum that dispatches to one of the crate's [`NearestNeighbour`](NearestNeighbour) +/// Enum that dispatches to one of the crate's [`NearestNeighbour`] /// implementations based on value. This enum should be used instead of using types like /// `LinearSearch` and `KdTree` directly. /// diff --git a/algorithms/linfa-nn/src/linear.rs b/algorithms/linfa-nn/src/linear.rs index d227e1833..a9948a1a0 100644 --- a/algorithms/linfa-nn/src/linear.rs +++ b/algorithms/linfa-nn/src/linear.rs @@ -11,7 +11,7 @@ use crate::{ NearestNeighbourIndex, NnError, Point, }; -/// Spatial indexing structure created by [`LinearSearch`](LinearSearch) +/// Spatial indexing structure created by [`LinearSearch`] #[derive(Debug, Clone, PartialEq)] pub struct LinearSearchIndex<'a, F: Float, D: Distance>(ArrayView2<'a, F>, D); @@ -76,7 +76,7 @@ impl<'a, F: Float, D: Distance> NearestNeighbourIndex for LinearSearchInde /// Implementation of linear search, which is the simplest nearest neighbour algorithm. All queries /// are implemented by scanning through every point, so all of them are `O(N)`. Calling -/// `from_batch` returns a [`LinearSearchIndex`](LinearSearchIndex). +/// `from_batch` returns a [`LinearSearchIndex`]. #[derive(Default, Clone, Debug, PartialEq, Eq)] #[cfg_attr( feature = "serde", diff --git a/algorithms/linfa-trees/src/decision_trees/algorithm.rs b/algorithms/linfa-trees/src/decision_trees/algorithm.rs index a281db39d..c460a9299 100644 --- a/algorithms/linfa-trees/src/decision_trees/algorithm.rs +++ b/algorithms/linfa-trees/src/decision_trees/algorithm.rs @@ -615,7 +615,7 @@ impl DecisionTree { self.iter_nodes().filter(|node| node.is_leaf()).count() } - /// Generates a [`Tikz`](Tikz) structure to print the + /// Generates a [`Tikz`] structure to print the /// fitted tree in Tex using tikz and forest, with the following default parameters: /// /// * `legend=false` diff --git a/src/metrics_regression.rs b/src/metrics_regression.rs index b1b5bfcb4..b380c93f1 100644 --- a/src/metrics_regression.rs +++ b/src/metrics_regression.rs @@ -14,7 +14,7 @@ use std::ops::{Div, Sub}; /// Regression metrices trait for single targets. /// /// It is possible to compute the listed mectrics between two 1D arrays. -/// To compare bi-dimensional arrays use [`MultiTargetRegression`](MultiTargetRegression). +/// To compare bi-dimensional arrays use [`MultiTargetRegression`]. pub trait SingleTargetRegression>: AsSingleTargets { @@ -135,7 +135,7 @@ impl, T2: AsSingleTargets, D: D /// Regression metrices trait for multiple targets. /// /// It is possible to compute the listed mectrics between two 2D arrays. -/// To compare single-dimensional arrays use [`SingleTargetRegression`](SingleTargetRegression). +/// To compare single-dimensional arrays use [`SingleTargetRegression`]. pub trait MultiTargetRegression>: AsMultiTargets {