From df62b63d4965a44f1b05077a535239db11b27615 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 14:03:08 +0100 Subject: [PATCH 01/77] Differentiate between adapted and unadapted FDMs --- src/methods.jl | 107 +++++++++++++++++++++++++++++-------------------- 1 file changed, 64 insertions(+), 43 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 4b8e03b6..5ac90dca 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -1,60 +1,76 @@ export FiniteDifferenceMethod, fdm, backward_fdm, forward_fdm, central_fdm, extrapolate_fdm """ - estimate_magitude(f, x::T) where T<:AbstractFloat + FiniteDifferences.DEFAULT_CONDITION -Estimate the magnitude of `f` in a neighbourhood of `x`, assuming that the outputs of `f` -have a "typical" order of magnitude. The result should be interpreted as a very rough -estimate. This function deals with the case that `f(x) = 0`. +The default value for the [condition number](https://en.wikipedia.org/wiki/Condition_number) +of finite difference method. The condition number specifies the amplification of the ∞-norm +when passed to the function's derivative. """ -function estimate_magitude(f, x::T) where T<:AbstractFloat - M = float(maximum(abs, f(x))) - M > 0 && (return M) - # Ouch, `f(x) = 0`. But it may not be zero around `x`. We conclude that `x` is likely a - # pathological input for `f`. Perturb `x`. Assume that the pertubed value for `x` is - # highly unlikely also a pathological value for `f`. - Δ = convert(T, 0.1) * max(abs(x), one(x)) - return float(maximum(abs, f(x + Δ))) -end +const DEFAULT_CONDITION = 10 """ - estimate_roundoff_error(f, x::T) where T<:AbstractFloat + FiniteDifferences.DEFAULT_FACTOR -Estimate the round-off error of `f(x)`. This function deals with the case that `f(x) = 0`. +The default factor number. The factor number specifies the multiple to amplify the estimated +round-off errors by. """ -function estimate_roundoff_error(f, x::T) where T<:AbstractFloat - # Estimate the round-off error. It can happen that the function is zero around `x`, in - # which case we cannot take `eps(f(x))`. Therefore, we assume a lower bound that is - # equal to `eps(T) / 1000`, which gives `f` four orders of magnitude wiggle room. - return max(eps(estimate_magitude(f, x)), eps(T) / 1000) -end +const DEFAULT_FACTOR = 1 + +abstract type FiniteDifferenceMethod end """ - FiniteDifferences.DEFAULT_CONDITION + UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod -The default [condition number](https://en.wikipedia.org/wiki/Condition_number) used when -computing bounds. It provides amplification of the ∞-norm when passed to the function's -derivatives. +A finite difference method that estimates a `Q`th order derivative from `P` function +evaluations. This method does not dynamically adapt its step size. + +# Fields +- `grid::NTuple{P,Float64}`: Multiples of the step size that the function will be evaluated + at. +- `coefs::NTuple{P,Float64}`: Coefficients corresponding to the grid functions that the + function evaluations will be weighted by. +- `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to + perform adaptation for this finite difference method. +- `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). +- `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). """ -const DEFAULT_CONDITION = 100 +struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod + grid::NTuple{P,Float64} + coefs::NTuple{P,Float64} + condition::Float64 + factor::Float64 +end """ - FiniteDifferenceMethod{G<:AbstractVector, C<:AbstractVector, E<:Function} + AdaptedFiniteDifferenceMethod{ + P, + Q, + E<:FiniteDifferenceMethod + } <: FiniteDifferenceMethod -A finite difference method. +A finite difference method that estimates a `Q`th order derivative from `P` function +evaluations. This method does dynamically adapt its step size. # Fields -- `grid::G`: Multiples of the step size that the function will be evaluated at. -- `q::Int`: Order of derivative to estimate. -- `coefs::C`: Coefficients corresponding to the grid functions that the function evaluations - will be weighted by. -- `bound_estimator::Function`: A function that takes in the function and the evaluation - point and returns a bound on the magnitude of the `length(grid)`th derivative. +- `grid::NTuple{P,Float64}`: Multiples of the step size that the function will be evaluated + at. +- `coefs::NTuple{P,Float64}`: Coefficients corresponding to the grid functions that the + function evaluations will be weighted by. +- `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). +- `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). +- `bound_estimator::E`: A finite difference method that is tuned to perform adaptation for + this finite difference method. """ -struct FiniteDifferenceMethod{G<:AbstractVector, C<:AbstractVector, E<:Function} - grid::G - q::Int - coefs::C +struct AdaptedFiniteDifferenceMethod{ + P, + Q, + E<:FiniteDifferenceMethod +} <: FiniteDifferenceMethod + grid::NTuple{P,Float64} + coefs::NTuple{P,Float64} + condition::Float64 + factor::Float64 bound_estimator::E end @@ -62,7 +78,8 @@ end FiniteDifferenceMethod( grid::AbstractVector, q::Int; - condition::Real=DEFAULT_CONDITION + condition::Real=DEFAULT_CONDITION, + factor::Real=DEFAULT_FACTOR ) Construct a finite difference method. @@ -70,7 +87,10 @@ Construct a finite difference method. # Arguments - `grid::Abstract`: The grid. See [`FiniteDifferenceMethod`](@ref). - `q::Int`: Order of the derivative to estimate. + +# Keyword Arguments - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref). +- `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref). # Returns - `FiniteDifferenceMethod`: Specified finite difference method. @@ -78,15 +98,16 @@ Construct a finite difference method. function FiniteDifferenceMethod( grid::AbstractVector, q::Int; - condition::Real=DEFAULT_CONDITION + condition::Real=DEFAULT_CONDITION, + factor::Real=DEFAULT_FACTOR ) p = length(grid) _check_p_q(p, q) - return FiniteDifferenceMethod( + return UnadaptedFiniteDifferenceMethod{p,q}( grid, - q, _coefs(grid, q), - _make_default_bound_estimator(condition=condition) + Float64(condition), + Float64(factor) ) end From f826fa881297e9b773b0a1cf5661c76018be1cb2 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 14:18:38 +0100 Subject: [PATCH 02/77] Use tuples to store coefs to reduce allocs --- src/methods.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 5ac90dca..5e3ee071 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -232,10 +232,10 @@ function _check_p_q(p::Integer, q::Integer) return end -const _COEFFS_CACHE = Dict{Tuple{AbstractVector{<:Real}, Integer}, Vector{Float64}}() +const _COEFFS_CACHE = Dict{Tuple{Tuple{Vararg{Int}}, Integer}, Tuple{Vararg{Float64}}}() # Compute coefficients for the method and cache the result. -function _coefs(grid::AbstractVector{<:Real}, q::Integer) +function _coefs(grid::Tuple{Vararg{Int}}, q::Integer) where N return get!(_COEFFS_CACHE, (grid, q)) do p = length(grid) # For high precision on the `\`, we use `Rational`, and to prevent overflows we use @@ -244,7 +244,7 @@ function _coefs(grid::AbstractVector{<:Real}, q::Integer) C = [Rational{Int128}(g^i) for i in 0:(p - 1), g in grid] x = zeros(Rational{Int128}, p) x[q + 1] = factorial(q) - return Float64.(C \ x) + return Tuple(Float64.(C \ x)) end end From 16ef6909ae77ee7a30c249029a7b42c3da98e5cb Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 14:22:27 +0100 Subject: [PATCH 03/77] Make grids tuples --- src/methods.jl | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 5e3ee071..150455fe 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -385,19 +385,21 @@ function _make_adaptive_bound_estimator( end end -_forward_grid(p::Int) = collect(0:(p - 1)) +_forward_grid(p::Int) = Tuple(0:(p - 1)) -_backward_grid(p::Int) = collect((1 - p):0) +_backward_grid(p::Int) = Tuple((1 - p):0) function _central_grid(p::Int) if isodd(p) - return collect(div(1 - p, 2):div(p - 1, 2)) + return Tuple(div(1 - p, 2):div(p - 1, 2)) else - return vcat(div(-p, 2):-1, 1:div(p, 2)) + return ((div(-p, 2):-1)..., (1:div(p, 2))...) end end -_exponentiate_grid(grid::Vector, base::Int=3) = sign.(grid) .* base .^ abs.(grid) ./ base +function _exponentiate_grid(grid::Tuple{Vararg{Int}}, base::Int=3) + return sign.(grid) .* div.(base .^ abs.(grid), base) +end function _is_symmetric(vec::Vector; centre_zero::Bool=false, negate_half::Bool=false) half_sign = negate_half ? -1 : 1 From c3dd629238ce3cc97c051223f66fead717daeb3e Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 14:34:52 +0100 Subject: [PATCH 04/77] Simplify construction of adapted methods --- src/methods.jl | 48 ++++++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 150455fe..e87a8ac3 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -324,18 +324,31 @@ for direction in [:forward, :central, :backward] q::Int; adapt::Int=1, condition::Real=DEFAULT_CONDITION, + factor::Real=DEFAULT_FACTOR, geom::Bool=false ) _check_p_q(p, q) grid = $grid_fun(p) geom && (grid = _exponentiate_grid(grid)) coefs = _coefs(grid, q) - return FiniteDifferenceMethod( - grid, - q, - coefs, - _make_adaptive_bound_estimator($fdm_fun, p, q, adapt, condition, geom=geom), - ) + if adapt >= 1 + bound_estimator = $fdm_fun( + p + 2 + p; + adapt=adapt - 1, + condition=condition, + factor=factor + ) + return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}( + grid, + coefs, + condition, + factor, + bound_estimator + ) + else + return UnadaptedFiniteDifferenceMethod{p, q}(grid, coefs, condition, factor) + end end @doc """ @@ -344,11 +357,11 @@ for direction in [:forward, :central, :backward] q::Int; adapt::Int=1, condition::Real=DEFAULT_CONDITION, + factor::Real=DEFAULT_FACTOR, geom::Bool=false ) -Contruct a finite difference method at a $($(Meta.quot(direction))) grid of `p` linearly -spaced points. +Contruct a finite difference method at a $($(Meta.quot(direction))) grid of `p` points. # Arguments - `p::Int`: Number of grid points. @@ -359,6 +372,7 @@ spaced points. `p`th order derivative, which is important for the step size computation. Recurse this procedure `adapt` times. - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref). +- `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref). - `geom::Bool`: Use geometrically spaced points instead of linearly spaced points. # Returns @@ -367,24 +381,6 @@ spaced points. end end -function _make_adaptive_bound_estimator( - constructor::Function, - p::Int, - q::Int, - adapt::Int, - condition::Int; - kw_args... -) - if adapt >= 1 - estimate_derivative = constructor( - p + 1, p, adapt=adapt - 1, condition=condition; kw_args... - ) - return (f, x) -> estimate_magitude(x′ -> estimate_derivative(f, x′), x) - else - return _make_default_bound_estimator(condition=condition) - end -end - _forward_grid(p::Int) = Tuple(0:(p - 1)) _backward_grid(p::Int) = Tuple((1 - p):0) From 3f1b78b9cae93b83b04f264594beeffb4e119aa3 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 14:35:49 +0100 Subject: [PATCH 05/77] Add conversions --- src/methods.jl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index e87a8ac3..b0dbb6f3 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -343,11 +343,16 @@ for direction in [:forward, :central, :backward] grid, coefs, condition, - factor, - bound_estimator + Float64(factor), + Float64(bound_estimator) ) else - return UnadaptedFiniteDifferenceMethod{p, q}(grid, coefs, condition, factor) + return UnadaptedFiniteDifferenceMethod{p, q}( + grid, + coefs, + Float64(condition), + Float64(factor) + ) end end From d4aa79483a9deb550e08e9a1d002f08d540258f1 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 14:39:09 +0100 Subject: [PATCH 06/77] Propagate geom keyword and remove redundant method --- src/methods.jl | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index b0dbb6f3..3acfbeed 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -248,12 +248,6 @@ function _coefs(grid::Tuple{Vararg{Int}}, q::Integer) where N end end -# Estimate the bound on the derivative by amplifying the ∞-norm. -function _make_default_bound_estimator(; condition::Real=DEFAULT_CONDITION) - default_bound_estimator(f, x) = condition * estimate_magitude(f, x) - return default_bound_estimator -end - function Base.show(io::IO, m::MIME"text/plain", x::FiniteDifferenceMethod) @printf io "FiniteDifferenceMethod:\n" @printf io " order of method: %d\n" length(x.grid) @@ -337,7 +331,8 @@ for direction in [:forward, :central, :backward] p; adapt=adapt - 1, condition=condition, - factor=factor + factor=factor, + geom=geom ) return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}( grid, From b3017f1f530d6164788a90587d4d2b7e5454b235 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 2 Jan 2021 15:12:55 +0100 Subject: [PATCH 07/77] Make adaptation part of type and precompute more --- src/methods.jl | 84 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 28 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 3acfbeed..933b11aa 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -17,10 +17,10 @@ round-off errors by. """ const DEFAULT_FACTOR = 1 -abstract type FiniteDifferenceMethod end +abstract type FiniteDifferenceMethod{P,Q} end """ - UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod + UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q} A finite difference method that estimates a `Q`th order derivative from `P` function evaluations. This method does not dynamically adapt its step size. @@ -34,12 +34,18 @@ evaluations. This method does not dynamically adapt its step size. perform adaptation for this finite difference method. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). +- `∇f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate + the magnitude of `∇f(x)`. +- `f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate + the magnitude of `f(x)`. """ -struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod +struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q} grid::NTuple{P,Float64} coefs::NTuple{P,Float64} condition::Float64 factor::Float64 + ∇f_magnitude_mult::Float64 + f_magnitude_mult::Float64 end """ @@ -47,7 +53,7 @@ end P, Q, E<:FiniteDifferenceMethod - } <: FiniteDifferenceMethod + } <: FiniteDifferenceMethod{P,Q} A finite difference method that estimates a `Q`th order derivative from `P` function evaluations. This method does dynamically adapt its step size. @@ -59,18 +65,24 @@ evaluations. This method does dynamically adapt its step size. function evaluations will be weighted by. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). -- `bound_estimator::E`: A finite difference method that is tuned to perform adaptation for - this finite difference method. +- `∇f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate + the magnitude of `∇f(x)`. +- `f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate + the magnitude of `f(x)`. +- `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to + perform adaptation for this finite difference method. """ struct AdaptedFiniteDifferenceMethod{ P, Q, E<:FiniteDifferenceMethod -} <: FiniteDifferenceMethod +} <: FiniteDifferenceMethod{P,Q} grid::NTuple{P,Float64} coefs::NTuple{P,Float64} condition::Float64 factor::Float64 + ∇f_magnitude_mult::Float64 + f_magnitude_mult::Float64 bound_estimator::E end @@ -96,18 +108,20 @@ Construct a finite difference method. - `FiniteDifferenceMethod`: Specified finite difference method. """ function FiniteDifferenceMethod( - grid::AbstractVector, + grid::NTuple{P, Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR -) - p = length(grid) - _check_p_q(p, q) - return UnadaptedFiniteDifferenceMethod{p,q}( +) where P + _check_p_q(P, q) + coefs, ∇f_magnitude_mult, f_magnitude_mult = _coefs(grid, q) + return UnadaptedFiniteDifferenceMethod{P,q}( + grid, grid, - _coefs(grid, q), Float64(condition), - Float64(factor) + Float64(factor), + ∇f_magnitude_mult, + f_magnitude_mult ) end @@ -232,26 +246,36 @@ function _check_p_q(p::Integer, q::Integer) return end -const _COEFFS_CACHE = Dict{Tuple{Tuple{Vararg{Int}}, Integer}, Tuple{Vararg{Float64}}}() +const _COEFFS_CACHE = Dict{ + Tuple{Tuple{Vararg{Int}}, Integer}, + Tuple{Tuple{Vararg{Float64}}, Float64, Float64} +}() # Compute coefficients for the method and cache the result. -function _coefs(grid::Tuple{Vararg{Int}}, q::Integer) where N +function _coefs(grid::NTuple{P, Int}, q::Integer) where P return get!(_COEFFS_CACHE, (grid, q)) do - p = length(grid) # For high precision on the `\`, we use `Rational`, and to prevent overflows we use # `Int128`. At the end we go to `Float64` for fast floating point math, rather than # rational math. - C = [Rational{Int128}(g^i) for i in 0:(p - 1), g in grid] - x = zeros(Rational{Int128}, p) + C = [Rational{Int128}(g^i) for i in 0:(P - 1), g in grid] + x = zeros(Rational{Int128}, P) x[q + 1] = factorial(q) - return Tuple(Float64.(C \ x)) + coefs = Tuple(Float64.(C \ x)) + # Also precompute multipliers. + ∇f_magnitude_mult = sum(coefs .* grid .^ P) / factorial(P) + f_magnitude_mult = sum(abs.(coefs)) + return coefs, ∇f_magnitude_mult, f_magnitude_mult end end -function Base.show(io::IO, m::MIME"text/plain", x::FiniteDifferenceMethod) +function Base.show( + io::IO, + m::MIME"text/plain", + x::FiniteDifferenceMethod{P, Q} +) where {P, Q} @printf io "FiniteDifferenceMethod:\n" - @printf io " order of method: %d\n" length(x.grid) - @printf io " order of derivative: %d\n" x.q + @printf io " order of method: %d\n" P + @printf io " order of derivative: %d\n" Q @printf io " grid: %s\n" x.grid @printf io " coefficients: %s\n" x.coefs end @@ -324,10 +348,10 @@ for direction in [:forward, :central, :backward] _check_p_q(p, q) grid = $grid_fun(p) geom && (grid = _exponentiate_grid(grid)) - coefs = _coefs(grid, q) + coefs, ∇f_magnitude_mult, f_magnitude_mult = _coefs(grid, q) if adapt >= 1 bound_estimator = $fdm_fun( - p + 2 + p + 2, p; adapt=adapt - 1, condition=condition, @@ -337,16 +361,20 @@ for direction in [:forward, :central, :backward] return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}( grid, coefs, - condition, + Float64(condition), Float64(factor), - Float64(bound_estimator) + ∇f_magnitude_mult, + f_magnitude_mult, + bound_estimator ) else return UnadaptedFiniteDifferenceMethod{p, q}( grid, coefs, Float64(condition), - Float64(factor) + Float64(factor), + ∇f_magnitude_mult, + f_magnitude_mult ) end end From ec78b97ad892d4d8e5feeffe49ddf6edabef1e85 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sun, 3 Jan 2021 15:25:03 +0100 Subject: [PATCH 08/77] First go at restructuring --- src/methods.jl | 312 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 216 insertions(+), 96 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 933b11aa..3db76f0e 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -30,22 +30,23 @@ evaluations. This method does not dynamically adapt its step size. at. - `coefs::NTuple{P,Float64}`: Coefficients corresponding to the grid functions that the function evaluations will be weighted by. +- `coefs_neighbourhood::NTuple{3,NTuple{P,Float64}}`: Sets of coefficients used for + estimating the magnitude of the derivative in a neighbourhood. - `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to perform adaptation for this finite difference method. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). -- `∇f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate - the magnitude of `∇f(x)`. -- `f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate - the magnitude of `f(x)`. +- `∇f_magnitude_mult::Float64`: Internally computed quantity. +- `f_error_mult::Float64`: Internally computed quantity. """ struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q} - grid::NTuple{P,Float64} + grid::NTuple{P,Int} coefs::NTuple{P,Float64} + coefs_neighbourhood::NTuple{3,NTuple{P,Float64}} condition::Float64 factor::Float64 ∇f_magnitude_mult::Float64 - f_magnitude_mult::Float64 + f_error_mult::Float64 end """ @@ -63,12 +64,12 @@ evaluations. This method does dynamically adapt its step size. at. - `coefs::NTuple{P,Float64}`: Coefficients corresponding to the grid functions that the function evaluations will be weighted by. +- `coefs_neighbourhood::NTuple{3,NTuple{P,Float64}}`: Sets of coefficients used for + estimating the magnitude of the derivative in a neighbourhood. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). -- `∇f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate - the magnitude of `∇f(x)`. -- `f_magnitude_mult::Float64`: Internally computed quantity that is used to estimate - the magnitude of `f(x)`. +- `∇f_magnitude_mult::Float64`: Internally computed quantity. +- `f_error_mult::Float64`: Internally computed quantity. - `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to perform adaptation for this finite difference method. """ @@ -77,18 +78,19 @@ struct AdaptedFiniteDifferenceMethod{ Q, E<:FiniteDifferenceMethod } <: FiniteDifferenceMethod{P,Q} - grid::NTuple{P,Float64} + grid::NTuple{P,Int} coefs::NTuple{P,Float64} + coefs_neighbourhood::NTuple{3,NTuple{P,Float64}} condition::Float64 factor::Float64 ∇f_magnitude_mult::Float64 - f_magnitude_mult::Float64 + f_error_mult::Float64 bound_estimator::E end """ FiniteDifferenceMethod( - grid::AbstractVector, + grid::NTuple{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR @@ -97,10 +99,11 @@ end Construct a finite difference method. # Arguments -- `grid::Abstract`: The grid. See [`FiniteDifferenceMethod`](@ref). +- `grid::NTuple{P,Int}`: The grid. See [`AdaptedFiniteDifferenceMethod`](@ref) or + [`UnadaptedFiniteDifferenceMethod`](@ref). - `q::Int`: Order of the derivative to estimate. -# Keyword Arguments +# Keywords - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref). - `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref). @@ -108,20 +111,21 @@ Construct a finite difference method. - `FiniteDifferenceMethod`: Specified finite difference method. """ function FiniteDifferenceMethod( - grid::NTuple{P, Int}, + grid::NTuple{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR ) where P _check_p_q(P, q) - coefs, ∇f_magnitude_mult, f_magnitude_mult = _coefs(grid, q) + coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs(grid, q) return UnadaptedFiniteDifferenceMethod{P,q}( grid, - grid, + coefs, + coefs_neighbourhood, Float64(condition), Float64(factor), ∇f_magnitude_mult, - f_magnitude_mult + f_error_mult ) end @@ -129,8 +133,7 @@ end (m::FiniteDifferenceMethod)( f::Function, x::T; - factor::Real=1, - max_step::Real=0.1 * max(abs(x), one(x)) + max_range::Real=Inf ) where T<:AbstractFloat Estimate the derivative of `f` at `x` using the finite differencing method `m` and an @@ -141,9 +144,7 @@ automatically determined step size. - `x::T`: Input to estimate derivative at. # Keywords -- `factor::Real=1`: Factor to amplify the estimated round-off error by. This can be used - to force a more conservative step size. -- `max_step::Real=0.1 * max(abs(x), one(x))`: Maximum step size. +- `max_range::Real=Inf`: Upper bound on how far `f` can be evaluated away from `x`. # Returns - Estimate of the derivative. @@ -155,38 +156,60 @@ julia> fdm = central_fdm(5, 1) FiniteDifferenceMethod: order of method: 5 order of derivative: 1 - grid: [-2, -1, 0, 1, 2] - coefficients: [0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333] + grid: (-2, -1, 0, 1, 2) + coefficients: (0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333) julia> fdm(sin, 1) -0.5403023058681155 +0.5403023058681607 julia> fdm(sin, 1) - cos(1) # Check the error. --2.4313884239290928e-14 +2.098321516541546e-14 julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and estimates the error. -(0.0010632902144695163, 1.9577610541734626e-13) +(0.001065235154086019, 1.9541865128909085e-13) ``` """ @inline function (m::FiniteDifferenceMethod)(f::Function, x::Real; kw_args...) # Assume that converting to float is desired. return _call_method(m, f, float(x); kw_args...) end +@inline function _call_method( + m::FiniteDifferenceMethod{P,0}, + f::Function, + x::T; + max_range::Real=Inf +) where {P,T<:AbstractFloat} + # The automatic step size calculation fails if `Q == 0`, so handle that edge case. + return f(x) + end @inline function _call_method( m::FiniteDifferenceMethod, f::Function, x::T; - factor::Real=1, - max_step::Real=0.1 * max(abs(x), one(x)) -) where T<:AbstractFloat - # The automatic step size calculation fails if `m.q == 0`, so handle that edge case. - iszero(m.q) && return f(x) - h, _ = estimate_step(m, f, x, factor=factor, max_step=max_step) - return _eval_method(m, f, x, h) + max_range::Real=Inf +) where {T<:AbstractFloat} + step, _ = estimate_step(m, f, x, max_range=max_range) + return _eval_method(m, _evals(m, f, x, step), x, step, m.coefs) +end + +function _estimate_magnitudes( + m::FiniteDifferenceMethod, + f::Function, + x::T; + max_range::Real=Inf +) where {T<:AbstractFloat} + step, _ = estimate_step(m, f, x, max_range=max_range) + fs = _evals(m, f, x, step) + # Estimate magnitude of `∇f` in a neighbourhood of `x`. + ∇fs = _eval_method.((m,), (fs,), x, step, m.coefs_neighbourhood) + ∇f_magnitude = maximum(maximum.(abs.(∇fs))) + # Estimate magnitude of `f` in a neighbourhood of `x`. + f_magnitude = maximum(maximum.(abs.(fs))) + return ∇f_magnitude, f_magnitude end """ - (m::FiniteDifferenceMethod)(f::Function, x::T, h::Real) where T<:AbstractFloat + (m::FiniteDifferenceMethod)(f::Function, x::T, step::Real) where T<:AbstractFloat Estimate the derivative of `f` at `x` using the finite differencing method `m` and a given step size. @@ -194,7 +217,7 @@ step size. # Arguments - `f::Function`: Function to estimate derivative of. - `x::T`: Input to estimate derivative at. -- `h::Real`: Step size. +- `step::Real`: Step size. # Returns - Estimate of the derivative. @@ -206,30 +229,37 @@ julia> fdm = central_fdm(5, 1) FiniteDifferenceMethod: order of method: 5 order of derivative: 1 - grid: [-2, -1, 0, 1, 2] - coefficients: [0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333] + grid: (-2, -1, 0, 1, 2) + coefficients: (0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333) -julia> fdm(sin, 1, 1e-3) -0.5403023058679624 + julia> fdm(sin, 1, 1e-3) + 0.5403023058679624 julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. -1.7741363933510002e-13 ``` """ -@inline function (m::FiniteDifferenceMethod)(f::Function, x::Real, h::Real) +@inline function (m::FiniteDifferenceMethod)(f::Function, x::Real, step::Real) # Assume that converting to float is desired. - return _eval_method(m, f, float(x), h) + x = float(x) + return _eval_method(m, _evals(m, f, x, step), x, step, m.coefs) end -@inline function _eval_method( +@inline function _evals( m::FiniteDifferenceMethod, f::Function, x::T, - h::Real -) where T<:AbstractFloat - return sum( - i -> convert(T, m.coefs[i]) * f(T(x + h * m.grid[i])), - eachindex(m.grid) - ) / h^m.q + step::T +) where {T<:AbstractFloat} + return f.(x .+ step .* m.grid) +end +@inline function _eval_method( + m::FiniteDifferenceMethod{P,Q}, + fs::NTuple{P}, + x::T, + step::Real, + coefs::NTuple{P,Float64} +) where {P,Q,T<:AbstractFloat} + return sum(T.(coefs) .* fs) ./ T(step^Q) end # Check the method and derivative orders for consistency. @@ -246,25 +276,49 @@ function _check_p_q(p::Integer, q::Integer) return end +function _compute_coefs(grid, p, q) + # For high precision on the `\`, we use `Rational`, and to prevent overflows we use + # `Int128`. At the end we go to `Float64` for fast floating point math, rather than + # rational math. + C = [Rational{Int128}(g^i) for i in 0:(p - 1), g in grid] + x = zeros(Rational{Int128}, p) + x[q + 1] = factorial(q) + return Tuple(Float64.(C \ x)) +end + const _COEFFS_CACHE = Dict{ - Tuple{Tuple{Vararg{Int}}, Integer}, - Tuple{Tuple{Vararg{Float64}}, Float64, Float64} + Tuple{Tuple{Vararg{Int}},Integer}, + Tuple{Tuple{Vararg{Float64}},Tuple{Vararg{Tuple{Vararg{Float64}}}},Float64,Float64} }() # Compute coefficients for the method and cache the result. function _coefs(grid::NTuple{P, Int}, q::Integer) where P return get!(_COEFFS_CACHE, (grid, q)) do - # For high precision on the `\`, we use `Rational`, and to prevent overflows we use - # `Int128`. At the end we go to `Float64` for fast floating point math, rather than - # rational math. - C = [Rational{Int128}(g^i) for i in 0:(P - 1), g in grid] - x = zeros(Rational{Int128}, P) - x[q + 1] = factorial(q) - coefs = Tuple(Float64.(C \ x)) - # Also precompute multipliers. - ∇f_magnitude_mult = sum(coefs .* grid .^ P) / factorial(P) - f_magnitude_mult = sum(abs.(coefs)) - return coefs, ∇f_magnitude_mult, f_magnitude_mult + coefs = _compute_coefs(grid, P, q) + # Compute coefficients for a neighbourhood estimate. + if all(grid .>= 0) + coefs_neighbourhood = ( + _compute_coefs(grid .- 2, P, q), + _compute_coefs(grid .- 1, P, q), + _compute_coefs(grid, P, q) + ) + elseif all(grid .<= 0) + coefs_neighbourhood = ( + _compute_coefs(grid, P, q), + _compute_coefs(grid .+ 1, P, q), + _compute_coefs(grid .+ 2, P, q) + ) + else + coefs_neighbourhood = ( + _compute_coefs(grid .- 1, P, q), + _compute_coefs(grid, P, q), + _compute_coefs(grid .+ 1, P, q) + ) + end + # Ccompute multipliers. + ∇f_magnitude_mult = sum(abs.(coefs .* grid .^ P)) / factorial(P) + f_error_mult = sum(abs.(coefs)) + return coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult end end @@ -285,8 +339,7 @@ end m::FiniteDifferenceMethod, f::Function, x::T; - factor::Real=1, - max_step::Real=0.1 * max(abs(x), one(x)) + max_range::Real=Inf ) where T<:AbstractFloat Estimate the step size for a finite difference method `m`. Also estimates the error of the @@ -298,40 +351,103 @@ estimate of the derivative. - `x::T`: Point to estimate the derivative at. # Keywords -- `factor::Real=1`. Factor to amplify the estimated round-off error by. This can be used - to force a more conservative step size. -- `max_step::Real=0.1 * max(abs(x), one(x))`: Maximum step size. +- `max_range::Real=Inf`: Upper bound on how far `f` can be evaluated away from `x`. # Returns -- `Tuple{T, <:AbstractFloat}`: Estimated step size and an estimate of the error of the - finite difference estimate. +- `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimated step size and an estimate of the + error of the finite difference estimate. """ function estimate_step( - m::FiniteDifferenceMethod, + m::UnadaptedFiniteDifferenceMethod, f::Function, x::T; - factor::Real=1, - max_step::Real=0.1 * max(abs(x), one(x)) -) where T<:AbstractFloat - p = length(m.coefs) - q = m.q - - # Estimate the round-off error. - ε = estimate_roundoff_error(f, x) * factor - - # Estimate the bound on the derivatives. - M = m.bound_estimator(f, x) + max_range::Real=Inf +) where {T<:AbstractFloat} + step, acc = _estimate_step_acc(m, x, max_range) + return step, acc +end +function estimate_step( + m::AdaptedFiniteDifferenceMethod, + f::Function, + x::T; + max_range::Real=Inf +) where {T<:AbstractFloat} + ∇f_magnitude, f_magnitude = _estimate_magnitudes( + m.bound_estimator, + f, + x; + max_range=max_range + ) + if ∇f_magnitude == 0 || f_magnitude == 0 + step, acc = _estimate_step_acc(m, x, max_range) + else + step, acc = _estimate_step_acc(m, x, ∇f_magnitude, eps(f_magnitude), max_range) + end + return step, acc +end +function _compute_step_acc( + m::FiniteDifferenceMethod{P,Q}, + ∇f_magnitude::Real, + f_error::Real +) where {P,Q} # Set the step size by minimising an upper bound on the error of the estimate. - C₁ = ε * sum(abs, m.coefs) - C₂ = M * sum(n -> abs(m.coefs[n] * m.grid[n]^p), eachindex(m.coefs)) / factorial(p) - # Type inference fails on this, so we annotate it, which gives big performance benefits. - h::T = convert(T, min((q / (p - q) * (C₁ / C₂))^(1 / p), max_step)) - + C₁ = f_error * m.f_error_mult * m.factor + C₂ = ∇f_magnitude * m.∇f_magnitude_mult + step = (Q / (P - Q) * (C₁ / C₂))^(1 / P) # Estimate the accuracy of the method. - accuracy = h^(-q) * C₁ + h^(p - q) * C₂ + acc = C₁ * step^(-Q) + C₂ * step^(P - Q) + return step, acc +end - return h, accuracy +function _compute_default( + m::FiniteDifferenceMethod, + x::T +) where {T<:AbstractFloat} + return _compute_step_acc(m, m.condition, eps(T)) +end + +function _estimate_step_acc( + m::FiniteDifferenceMethod, + x::T, + max_range::Real +) where {T<:AbstractFloat} + step, acc = _compute_default(m, x) + return _limit_step(m, x, step, acc, max_range) +end +function _estimate_step_acc( + m::AdaptedFiniteDifferenceMethod{P,Q,E}, + x::T, + ∇f_magnitude::Real, + f_error::Real, + max_range::Real +) where {P,Q,E,T<:AbstractFloat} + step, acc = _compute_step_acc(m, ∇f_magnitude, f_error) + return _limit_step(m, x, step, acc, max_range) +end + +function _limit_step( + m::FiniteDifferenceMethod, + x::T, + step::Real, + acc::Real, + max_range::Real +) where {T<:AbstractFloat} + # First, limit the step size based on the maximum range. + step_max = max_range / maximum(abs.(m.grid)) + if step > step_max + step = step_max + acc = NaN + end + # Second, prevent very large step sizes, which can occur for high-order methods or + # slowly-varying functions. + step_default, _ = _compute_default(m, x) + step_max_default = 1000step_default + if step > step_max_default + step = step_max_default + acc = NaN + end + return step, acc end for direction in [:forward, :central, :backward] @@ -348,9 +464,11 @@ for direction in [:forward, :central, :backward] _check_p_q(p, q) grid = $grid_fun(p) geom && (grid = _exponentiate_grid(grid)) - coefs, ∇f_magnitude_mult, f_magnitude_mult = _coefs(grid, q) + coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs(grid, q) if adapt >= 1 bound_estimator = $fdm_fun( + # We need to increase the order by two to be able to estimate the + # magnitude of the derivative of `f` in a neighbourhood of `x`. p + 2, p; adapt=adapt - 1, @@ -361,20 +479,22 @@ for direction in [:forward, :central, :backward] return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}( grid, coefs, + coefs_neighbourhood, Float64(condition), Float64(factor), ∇f_magnitude_mult, - f_magnitude_mult, + f_error_mult, bound_estimator ) else return UnadaptedFiniteDifferenceMethod{p, q}( grid, coefs, + coefs_neighbourhood, Float64(condition), Float64(factor), ∇f_magnitude_mult, - f_magnitude_mult + f_error_mult ) end end @@ -446,7 +566,7 @@ end m::FiniteDifferenceMethod, f::Function, x::T, - h::Real=0.1 * max(abs(x), one(x)); + step::Real=0.1 * max(abs(x), one(x)); power=nothing, breaktol=Inf, kw_args... @@ -462,7 +582,7 @@ automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it - `m::FiniteDifferenceMethod`: Finite difference method to estimate the step size for. - `f::Function`: Function to evaluate the derivative of. - `x::T`: Point to estimate the derivative at. -- `h::Real=0.1 * max(abs(x), one(x))`: Initial step size. +- `step::Real=0.1 * max(abs(x), one(x))`: Initial step size. # Returns - `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimate of the derivative and error. @@ -471,7 +591,7 @@ function extrapolate_fdm( m::FiniteDifferenceMethod, f::Function, x::T, - h::Real=0.1 * max(abs(x), one(x)); + step::Real=0.1 * max(abs(x), one(x)); power::Int=1, breaktol::Real=Inf, kw_args... From 8e36074c7061a31ed5f32494325d1d409c606a9f Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sun, 3 Jan 2021 15:31:55 +0100 Subject: [PATCH 09/77] Allow real inputs to extrapolate_fdm --- src/methods.jl | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 3db76f0e..6219c244 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -545,7 +545,7 @@ function _exponentiate_grid(grid::Tuple{Vararg{Int}}, base::Int=3) return sign.(grid) .* div.(base .^ abs.(grid), base) end -function _is_symmetric(vec::Vector; centre_zero::Bool=false, negate_half::Bool=false) +function _is_symmetric(vec::Tuple; centre_zero::Bool=false, negate_half::Bool=false) half_sign = negate_half ? -1 : 1 if isodd(length(vec)) centre_zero && vec[end ÷ 2 + 1] != 0 && return false @@ -565,14 +565,14 @@ end extrapolate_fdm( m::FiniteDifferenceMethod, f::Function, - x::T, - step::Real=0.1 * max(abs(x), one(x)); - power=nothing, - breaktol=Inf, + x::Real, + initial_step::Real=10, + power::Int=1, + breaktol::Real=Inf, kw_args... ) where T<:AbstractFloat -Use Richardson extrapolation to refine a finite difference method. +Use Richardson extrapolation to extrapolate a finite difference method. Takes further in keyword arguments for `Richardson.extrapolate`. This method automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it defaults @@ -581,8 +581,8 @@ automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it # Arguments - `m::FiniteDifferenceMethod`: Finite difference method to estimate the step size for. - `f::Function`: Function to evaluate the derivative of. -- `x::T`: Point to estimate the derivative at. -- `step::Real=0.1 * max(abs(x), one(x))`: Initial step size. +- `x::Real`: Point to estimate the derivative at. +- `initial_step::Real=10`: Initial step size. # Returns - `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimate of the derivative and error. @@ -590,12 +590,18 @@ automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it function extrapolate_fdm( m::FiniteDifferenceMethod, f::Function, - x::T, - step::Real=0.1 * max(abs(x), one(x)); + x::Real, + initial_step::Real=10, power::Int=1, breaktol::Real=Inf, kw_args... ) where T<:AbstractFloat (power == 1 && _is_symmetric(m)) && (power = 2) - return extrapolate(h -> m(f, x, h), h; power=power, breaktol=breaktol, kw_args...) + return extrapolate( + step -> m(f, x, step), + float(initial_step); + power=power, + breaktol=breaktol, + kw_args... + ) end From cb63d6b01048ce07f7a69578f2786988a682c0d1 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 15:17:56 +0100 Subject: [PATCH 10/77] Add StaticArrays --- Project.toml | 1 + src/FiniteDifferences.jl | 1 + 2 files changed, 2 insertions(+) diff --git a/Project.toml b/Project.toml index bc926fa2..0f29049a 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Richardson = "708f8203-808e-40c0-ba2d-98a6953ed40d" +StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [compat] ChainRulesCore = "0.9" diff --git a/src/FiniteDifferences.jl b/src/FiniteDifferences.jl index bc67c23d..b0de5f90 100644 --- a/src/FiniteDifferences.jl +++ b/src/FiniteDifferences.jl @@ -5,6 +5,7 @@ module FiniteDifferences using Printf using Random using Richardson + using StaticArrays export to_vec, grad, jacobian, jvp, j′vp From 721bcd5e32e017cd1426113e1c61c790d53c2e33 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 15:18:09 +0100 Subject: [PATCH 11/77] Rework and use SAs to get zero allocs --- src/methods.jl | 331 ++++++++++++++++++++++++++++++------------------- 1 file changed, 200 insertions(+), 131 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 6219c244..c9f432ff 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -26,11 +26,10 @@ A finite difference method that estimates a `Q`th order derivative from `P` func evaluations. This method does not dynamically adapt its step size. # Fields -- `grid::NTuple{P,Float64}`: Multiples of the step size that the function will be evaluated - at. -- `coefs::NTuple{P,Float64}`: Coefficients corresponding to the grid functions that the +- `grid::SVector{P,Int}`: Multiples of the step size that the function will be evaluated at. +- `coefs::SVector{P,Float64}`: Coefficients corresponding to the grid functions that the function evaluations will be weighted by. -- `coefs_neighbourhood::NTuple{3,NTuple{P,Float64}}`: Sets of coefficients used for +- `coefs_neighbourhood::NTuple{3,SVector{P,Float64}}`: Sets of coefficients used for estimating the magnitude of the derivative in a neighbourhood. - `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to perform adaptation for this finite difference method. @@ -40,9 +39,9 @@ evaluations. This method does not dynamically adapt its step size. - `f_error_mult::Float64`: Internally computed quantity. """ struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q} - grid::NTuple{P,Int} - coefs::NTuple{P,Float64} - coefs_neighbourhood::NTuple{3,NTuple{P,Float64}} + grid::SVector{P,Int} + coefs::SVector{P,Float64} + coefs_neighbourhood::NTuple{3,SVector{P,Float64}} condition::Float64 factor::Float64 ∇f_magnitude_mult::Float64 @@ -60,11 +59,10 @@ A finite difference method that estimates a `Q`th order derivative from `P` func evaluations. This method does dynamically adapt its step size. # Fields -- `grid::NTuple{P,Float64}`: Multiples of the step size that the function will be evaluated - at. -- `coefs::NTuple{P,Float64}`: Coefficients corresponding to the grid functions that the +- `grid::SVector{P,Int}`: Multiples of the step size that the function will be evaluated at. +- `coefs::SVector{P,Float64}`: Coefficients corresponding to the grid functions that the function evaluations will be weighted by. -- `coefs_neighbourhood::NTuple{3,NTuple{P,Float64}}`: Sets of coefficients used for +- `coefs_neighbourhood::NTuple{3,SVector{P,Float64}}`: Sets of coefficients used for estimating the magnitude of the derivative in a neighbourhood. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). @@ -78,9 +76,9 @@ struct AdaptedFiniteDifferenceMethod{ Q, E<:FiniteDifferenceMethod } <: FiniteDifferenceMethod{P,Q} - grid::NTuple{P,Int} - coefs::NTuple{P,Float64} - coefs_neighbourhood::NTuple{3,NTuple{P,Float64}} + grid::SVector{P,Int} + coefs::SVector{P,Float64} + coefs_neighbourhood::NTuple{3,SVector{P,Float64}} condition::Float64 factor::Float64 ∇f_magnitude_mult::Float64 @@ -90,7 +88,7 @@ end """ FiniteDifferenceMethod( - grid::NTuple{P,Int}, + grid::SVector{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR @@ -99,7 +97,7 @@ end Construct a finite difference method. # Arguments -- `grid::NTuple{P,Int}`: The grid. See [`AdaptedFiniteDifferenceMethod`](@ref) or +- `grid::SVector{P,Int}`: The grid. See [`AdaptedFiniteDifferenceMethod`](@ref) or [`UnadaptedFiniteDifferenceMethod`](@ref). - `q::Int`: Order of the derivative to estimate. @@ -111,13 +109,13 @@ Construct a finite difference method. - `FiniteDifferenceMethod`: Specified finite difference method. """ function FiniteDifferenceMethod( - grid::NTuple{P,Int}, + grid::SVector{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR ) where P _check_p_q(P, q) - coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs(grid, q) + coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q) return UnadaptedFiniteDifferenceMethod{P,q}( grid, coefs, @@ -156,8 +154,8 @@ julia> fdm = central_fdm(5, 1) FiniteDifferenceMethod: order of method: 5 order of derivative: 1 - grid: (-2, -1, 0, 1, 2) - coefficients: (0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333) + grid: [-2, -1, 0, 1, 2] + coefficients: [0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333] julia> fdm(sin, 1) 0.5403023058681607 @@ -169,43 +167,100 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and (0.001065235154086019, 1.9541865128909085e-13) ``` """ -@inline function (m::FiniteDifferenceMethod)(f::Function, x::Real; kw_args...) +function (m::FiniteDifferenceMethod)(f::Function, x::Real; kw_args...) # Assume that converting to float is desired. return _call_method(m, f, float(x); kw_args...) end -@inline function _call_method( - m::FiniteDifferenceMethod{P,0}, + +# The automatic step size calculation fails if `Q == 0`, so handle that edge case. +for t in [:UnadaptedFiniteDifferenceMethod, :AdaptedFiniteDifferenceMethod] + @eval function _call_method( + m::$t{P,0}, + f::Function, + x::T; + max_range::Real=Inf + ) where {P,T<:AbstractFloat} + return f(x) + end +end + +macro _evals(P, m, f, x, step) + assemble_f = Expr(:call, + Expr(:curly, :SVector, :($P)), + [Expr(:call, f, :(xs[$i])) for i = 1:P]... + ) + return esc(quote + xs = $x .+ $step .* $(m).grid + $assemble_f + end) +end + +macro _eval_method(P, Q, fs, coefs, step) + dot = Expr(:call, :+, [:($fs[$i] * $coefs[$i]) for i = 1:P]...) + return esc(:($dot ./ $step^$Q)) +end + +@generated function _call_method( + m::UnadaptedFiniteDifferenceMethod{P,Q}, f::Function, x::T; max_range::Real=Inf -) where {P,T<:AbstractFloat} - # The automatic step size calculation fails if `Q == 0`, so handle that edge case. - return f(x) - end -@inline function _call_method( - m::FiniteDifferenceMethod, +) where {P,Q,T<:AbstractFloat} + return quote + step = T(first(_estimate_step_acc(m, x, max_range))) + fs = @_evals($P, m, f, x, step) + return @_eval_method($P, $Q, fs, T.(m.coefs), step) + end +end +@generated function _call_method( + m::AdaptedFiniteDifferenceMethod{P,Q}, f::Function, x::T; max_range::Real=Inf -) where {T<:AbstractFloat} - step, _ = estimate_step(m, f, x, max_range=max_range) - return _eval_method(m, _evals(m, f, x, step), x, step, m.coefs) +) where {P,Q,T<:AbstractFloat} + return quote + step = T(first(@_estimate_step_adapted($P, $Q, m, f, x, max_range))) + fs = @_evals($P, m, f, x, step) + return @_eval_method($P, $Q, fs, T.(m.coefs), step) + end end -function _estimate_magnitudes( - m::FiniteDifferenceMethod, +@generated function _estimate_magnitudes( + m::UnadaptedFiniteDifferenceMethod{P,Q}, f::Function, x::T; max_range::Real=Inf -) where {T<:AbstractFloat} - step, _ = estimate_step(m, f, x, max_range=max_range) - fs = _evals(m, f, x, step) - # Estimate magnitude of `∇f` in a neighbourhood of `x`. - ∇fs = _eval_method.((m,), (fs,), x, step, m.coefs_neighbourhood) - ∇f_magnitude = maximum(maximum.(abs.(∇fs))) - # Estimate magnitude of `f` in a neighbourhood of `x`. - f_magnitude = maximum(maximum.(abs.(fs))) - return ∇f_magnitude, f_magnitude +) where {P,Q,T<:AbstractFloat} + return quote + step = T(first(_estimate_step_acc(m, x, max_range))) + @_estimate_magnitudes_body($P, $Q) + end +end +@generated function _estimate_magnitudes( + m::AdaptedFiniteDifferenceMethod{P,Q}, + f::Function, + x::T; + max_range::Real=Inf +) where {P,Q,T<:AbstractFloat} + return quote + step = T(first(@_estimate_step_adapted($P, $Q, m, f, x, max_range))) + @_estimate_magnitudes_body($P, $Q) + end +end +macro _estimate_magnitudes_body(P, Q) + return esc(quote + fs = @_evals($P, m, f, x, step) + # Estimate magnitude of `∇f` in a neighbourhood of `x`. + ∇fs = SVector{3}( + @_eval_method($P, $Q, fs, m.coefs_neighbourhood[1], step), + @_eval_method($P, $Q, fs, m.coefs_neighbourhood[2], step), + @_eval_method($P, $Q, fs, m.coefs_neighbourhood[3], step) + ) + ∇f_magnitude = maximum(maximum.(abs, ∇fs)) + # Estimate magnitude of `f` in a neighbourhood of `x`. + f_magnitude = maximum(maximum.(abs, fs)) + return ∇f_magnitude, f_magnitude + end) end """ @@ -229,37 +284,36 @@ julia> fdm = central_fdm(5, 1) FiniteDifferenceMethod: order of method: 5 order of derivative: 1 - grid: (-2, -1, 0, 1, 2) - coefficients: (0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333) + grid: [-2, -1, 0, 1, 2] + coefficients: [0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333] - julia> fdm(sin, 1, 1e-3) +julia> fdm(sin, 1, 1e-3) 0.5403023058679624 julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. -1.7741363933510002e-13 ``` """ -@inline function (m::FiniteDifferenceMethod)(f::Function, x::Real, step::Real) +@inline function (m::FiniteDifferenceMethod{P, Q})( + f::Function, + x::Real, + step::Real +) where {P,Q} # Assume that converting to float is desired. x = float(x) - return _eval_method(m, _evals(m, f, x, step), x, step, m.coefs) + return _call_method_with_step(m, f, x, step) end -@inline function _evals( - m::FiniteDifferenceMethod, - f::Function, - x::T, - step::T -) where {T<:AbstractFloat} - return f.(x .+ step .* m.grid) -end -@inline function _eval_method( +@generated function _call_method_with_step( m::FiniteDifferenceMethod{P,Q}, - fs::NTuple{P}, + f::Function, x::T, - step::Real, - coefs::NTuple{P,Float64} + step::Real ) where {P,Q,T<:AbstractFloat} - return sum(T.(coefs) .* fs) ./ T(step^Q) + return quote + step = T(step) + fs = @_evals($P, m, f, x, step) + return @_eval_method($P, $Q, fs, T.(m.coefs), step) + end end # Check the method and derivative orders for consistency. @@ -276,46 +330,47 @@ function _check_p_q(p::Integer, q::Integer) return end -function _compute_coefs(grid, p, q) +function _coefs(grid, p, q) # For high precision on the `\`, we use `Rational`, and to prevent overflows we use - # `Int128`. At the end we go to `Float64` for fast floating point math, rather than + # `BigInt`. At the end we go to `Float64` for fast floating point math, rather than # rational math. - C = [Rational{Int128}(g^i) for i in 0:(p - 1), g in grid] - x = zeros(Rational{Int128}, p) + C = [Rational{BigInt}(g^i) for i in 0:(p - 1), g in grid] + x = zeros(Rational{BigInt}, p) x[q + 1] = factorial(q) - return Tuple(Float64.(C \ x)) + return SVector{p}(Float64.(C \ x)) end -const _COEFFS_CACHE = Dict{ - Tuple{Tuple{Vararg{Int}},Integer}, - Tuple{Tuple{Vararg{Float64}},Tuple{Vararg{Tuple{Vararg{Float64}}}},Float64,Float64} +const _COEFFS_MULTS_CACHE = Dict{ + Tuple{SVector,Integer}, + Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64} }() # Compute coefficients for the method and cache the result. -function _coefs(grid::NTuple{P, Int}, q::Integer) where P - return get!(_COEFFS_CACHE, (grid, q)) do - coefs = _compute_coefs(grid, P, q) +function _coefs_mults(grid::SVector{P, Int}, q::Integer) where P + return get!(_COEFFS_MULTS_CACHE, (grid, q)) do + # Compute coefficients for derivative estimate. + coefs = _coefs(grid, P, q) # Compute coefficients for a neighbourhood estimate. if all(grid .>= 0) coefs_neighbourhood = ( - _compute_coefs(grid .- 2, P, q), - _compute_coefs(grid .- 1, P, q), - _compute_coefs(grid, P, q) + _coefs(grid .- 2, P, q), + _coefs(grid .- 1, P, q), + _coefs(grid, P, q) ) elseif all(grid .<= 0) coefs_neighbourhood = ( - _compute_coefs(grid, P, q), - _compute_coefs(grid .+ 1, P, q), - _compute_coefs(grid .+ 2, P, q) + _coefs(grid, P, q), + _coefs(grid .+ 1, P, q), + _coefs(grid .+ 2, P, q) ) else coefs_neighbourhood = ( - _compute_coefs(grid .- 1, P, q), - _compute_coefs(grid, P, q), - _compute_coefs(grid .+ 1, P, q) + _coefs(grid .- 1, P, q), + _coefs(grid, P, q), + _coefs(grid .+ 1, P, q) ) end - # Ccompute multipliers. + # Compute multipliers. ∇f_magnitude_mult = sum(abs.(coefs .* grid .^ P)) / factorial(P) f_error_mult = sum(abs.(coefs)) return coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult @@ -362,28 +417,57 @@ function estimate_step( f::Function, x::T; max_range::Real=Inf -) where {T<:AbstractFloat} - step, acc = _estimate_step_acc(m, x, max_range) - return step, acc +) where T<:AbstractFloat + return _estimate_step_acc(m, x, max_range) end -function estimate_step( - m::AdaptedFiniteDifferenceMethod, +@generated function estimate_step( + m::AdaptedFiniteDifferenceMethod{P,Q}, f::Function, x::T; max_range::Real=Inf +) where {P,Q,T<:AbstractFloat} + return :(@_estimate_step_adapted($P, $Q, m, f, x, max_range)) +end +macro _estimate_step_adapted(P, Q, m, f, x, max_range) + return esc(quote + ∇f_magnitude, f_magnitude = _estimate_magnitudes( + $(m).bound_estimator, + $f, + $x, + max_range=$max_range + ) + if ∇f_magnitude == 0 || f_magnitude == 0 + step, acc = _estimate_step_acc($m, $x, $max_range) + else + step, acc = _estimate_step_acc( + $m, + $x, + ∇f_magnitude, + eps(f_magnitude), + $max_range + ) + end + step, acc + end) +end + +function _estimate_step_acc( + m::FiniteDifferenceMethod, + x::T, + max_range::Real ) where {T<:AbstractFloat} - ∇f_magnitude, f_magnitude = _estimate_magnitudes( - m.bound_estimator, - f, - x; - max_range=max_range - ) - if ∇f_magnitude == 0 || f_magnitude == 0 - step, acc = _estimate_step_acc(m, x, max_range) - else - step, acc = _estimate_step_acc(m, x, ∇f_magnitude, eps(f_magnitude), max_range) - end - return step, acc + step, acc = _compute_step_acc(m, x) + return _limit_step(m, x, step, acc, max_range) +end +function _estimate_step_acc( + m::AdaptedFiniteDifferenceMethod, + x::T, + ∇f_magnitude::Real, + f_error::Real, + max_range::Real +) where {T<:AbstractFloat} + step, acc = _compute_step_acc(m, ∇f_magnitude, f_error) + return _limit_step(m, x, step, acc, max_range) end function _compute_step_acc( @@ -399,33 +483,14 @@ function _compute_step_acc( acc = C₁ * step^(-Q) + C₂ * step^(P - Q) return step, acc end - -function _compute_default( +function _compute_step_acc( m::FiniteDifferenceMethod, x::T ) where {T<:AbstractFloat} + # Set the step size using an heuristic and [`DEFAULT_CONDITION`](@ref). return _compute_step_acc(m, m.condition, eps(T)) end -function _estimate_step_acc( - m::FiniteDifferenceMethod, - x::T, - max_range::Real -) where {T<:AbstractFloat} - step, acc = _compute_default(m, x) - return _limit_step(m, x, step, acc, max_range) -end -function _estimate_step_acc( - m::AdaptedFiniteDifferenceMethod{P,Q,E}, - x::T, - ∇f_magnitude::Real, - f_error::Real, - max_range::Real -) where {P,Q,E,T<:AbstractFloat} - step, acc = _compute_step_acc(m, ∇f_magnitude, f_error) - return _limit_step(m, x, step, acc, max_range) -end - function _limit_step( m::FiniteDifferenceMethod, x::T, @@ -441,7 +506,7 @@ function _limit_step( end # Second, prevent very large step sizes, which can occur for high-order methods or # slowly-varying functions. - step_default, _ = _compute_default(m, x) + step_default, _ = _compute_step_acc(m, x) step_max_default = 1000step_default if step > step_max_default step = step_max_default @@ -464,7 +529,7 @@ for direction in [:forward, :central, :backward] _check_p_q(p, q) grid = $grid_fun(p) geom && (grid = _exponentiate_grid(grid)) - coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs(grid, q) + coefs, coefs_nbhd, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q) if adapt >= 1 bound_estimator = $fdm_fun( # We need to increase the order by two to be able to estimate the @@ -479,7 +544,7 @@ for direction in [:forward, :central, :backward] return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}( grid, coefs, - coefs_neighbourhood, + coefs_nbhd, Float64(condition), Float64(factor), ∇f_magnitude_mult, @@ -490,7 +555,7 @@ for direction in [:forward, :central, :backward] return UnadaptedFiniteDifferenceMethod{p, q}( grid, coefs, - coefs_neighbourhood, + coefs_nbhd, Float64(condition), Float64(factor), ∇f_magnitude_mult, @@ -529,23 +594,27 @@ Contruct a finite difference method at a $($(Meta.quot(direction))) grid of `p` end end -_forward_grid(p::Int) = Tuple(0:(p - 1)) +_forward_grid(p::Int) = SVector{p}(0:(p - 1)) -_backward_grid(p::Int) = Tuple((1 - p):0) +_backward_grid(p::Int) = SVector{p}((1 - p):0) function _central_grid(p::Int) if isodd(p) - return Tuple(div(1 - p, 2):div(p - 1, 2)) + return SVector{p}(div(1 - p, 2):div(p - 1, 2)) else - return ((div(-p, 2):-1)..., (1:div(p, 2))...) + return SVector{p}((div(-p, 2):-1)..., (1:div(p, 2))...) end end -function _exponentiate_grid(grid::Tuple{Vararg{Int}}, base::Int=3) +function _exponentiate_grid(grid::SVector{P,Int}, base::Int=3) where P return sign.(grid) .* div.(base .^ abs.(grid), base) end -function _is_symmetric(vec::Tuple; centre_zero::Bool=false, negate_half::Bool=false) +function _is_symmetric( + vec::SVector{P}; + centre_zero::Bool=false, + negate_half::Bool=false +) where P half_sign = negate_half ? -1 : 1 if isodd(length(vec)) centre_zero && vec[end ÷ 2 + 1] != 0 && return false @@ -591,7 +660,7 @@ function extrapolate_fdm( m::FiniteDifferenceMethod, f::Function, x::Real, - initial_step::Real=10, + initial_step::Real=10; power::Int=1, breaktol::Real=Inf, kw_args... From 9bbbda40867da5460a96f6fa82d896b63d50179d Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 15:18:34 +0100 Subject: [PATCH 12/77] Fix tests --- test/methods.jl | 77 ++++++++++++++----------------------------------- 1 file changed, 22 insertions(+), 55 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index 83a1d55f..95183578 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -1,39 +1,4 @@ -import FiniteDifferences: estimate_magitude, estimate_roundoff_error - @testset "Methods" begin - @testset "estimate_magitude" begin - f64(x::Float64) = x - f64_int(x::Float64) = Int(10x) - @test estimate_magitude(f64, 0.0) === 0.1 - @test estimate_magitude(f64, 1.0) === 1.0 - @test estimate_magitude(f64_int, 0.0) === 1.0 - @test estimate_magitude(f64_int, 1.0) === 10.0 - - f32(x::Float32) = x - f32_int(x::Float32) = Int(10 * x) - @test estimate_magitude(f32, 0f0) === 0.1f0 - @test estimate_magitude(f32, 1f0) === 1f0 - # In this case, the `Int` is converted with `float`, so we expect a `Float64`. - @test estimate_magitude(f32_int, 0f0) === 1.0 - @test estimate_magitude(f32_int, 1f0) === 10.0 - end - - @testset "estimate_roundoff_error" begin - # `Float64`s: - @test estimate_roundoff_error(identity, 1.0) == eps(1.0) - # Pertubation from `estimate_magitude`: - @test estimate_roundoff_error(identity, 0.0) == eps(0.1) - - # `Float32`s: - @test estimate_roundoff_error(identity, 1f0) == eps(1f0) - # Pertubation from `estimate_magitude`: - @test estimate_roundoff_error(identity, 0.0f0) == eps(0.1f0) - - # Test lower bound of `eps(T) / 1000`. - @test estimate_roundoff_error(x -> 1e-100, 0.0) == eps(1.0) / 1000 - @test estimate_roundoff_error(x -> 1f-100, 0f0) == eps(1f0) / 1000 - end - # The different approaches to approximating the gradient to try. methods = [forward_fdm, backward_fdm, central_fdm] @@ -47,7 +12,7 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error (f=sin, d1=cos(1), d2=-sin(1)), (f=exp, d1=exp(1), d2=exp(1)), (f=abs2, d1=2, d2=2), - (f=x -> sqrt(x + 1), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2)), + (f=x -> sqrt(max(x + 1,0)), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2)), ] # Test all combinations of the above settings, i.e. differentiate all functions using @@ -73,12 +38,13 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error end @testset "Adaptation improves estimate" begin - @test forward_fdm(5, 1, adapt=0)(log, 0.001) ≈ 997.077814 + @test abs(forward_fdm(5, 1, adapt=0)(log, 0.001) - 1000) > 1 @test forward_fdm(5, 1, adapt=1)(log, 0.001) ≈ 1000 end @testset "Limiting step size" begin - @test !isfinite(central_fdm(5, 1)(abs, 0.001, max_step=0)) + @test !isfinite(central_fdm(5, 1)(abs, 0.001, max_range=0)) + @test central_fdm(10, 1)(log, 1e-3, max_range=9e-4) ≈ 1000 @test central_fdm(5, 1)(abs, 0.001) ≈ 1.0 end @@ -86,7 +52,8 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error # Regression test against issues with precision during computation of _coeffs # see https://github.com/JuliaDiff/FiniteDifferences.jl/issues/64 - @test central_fdm(9, 5, adapt=4, condition=1)(exp, 1.0) ≈ exp(1) atol=1e-7 + @test central_fdm(9, 5, adapt=4)(exp, 1.0) ≈ exp(1) atol=2e-7 + @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=5e-12 poly(x) = 4x^3 + 3x^2 + 2x + 1 @test central_fdm(9, 3, adapt=4)(poly, 1.0) ≈ 24 atol=1e-11 @@ -104,27 +71,27 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error @testset "_is_symmetric" begin # Test odd grids: - @test FiniteDifferences._is_symmetric([2, 1, 0, 1, 2]) - @test !FiniteDifferences._is_symmetric([2, 1, 0, 3, 2]) - @test !FiniteDifferences._is_symmetric([4, 1, 0, 1, 2]) + @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, 1, 2)) + @test !FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, 3, 2)) + @test !FiniteDifferences._is_symmetric(SVector{5}(4, 1, 0, 1, 2)) # Test even grids: - @test FiniteDifferences._is_symmetric([2, 1, 1, 2]) - @test !FiniteDifferences._is_symmetric([2, 1, 3, 2]) - @test !FiniteDifferences._is_symmetric([4, 1, 1, 2]) + @test FiniteDifferences._is_symmetric(SVector{4}(2, 1, 1, 2)) + @test !FiniteDifferences._is_symmetric(SVector{4}(2, 1, 3, 2)) + @test !FiniteDifferences._is_symmetric(SVector{4}(4, 1, 1, 2)) # Test zero at centre: - @test FiniteDifferences._is_symmetric([2, 1, 4, 1, 2]) - @test !FiniteDifferences._is_symmetric([2, 1, 4, 1, 2], centre_zero=true) - @test FiniteDifferences._is_symmetric([2, 1, 1, 2], centre_zero=true) + @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, 1, 2)) + @test !FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, 1, 2), centre_zero=true) + @test FiniteDifferences._is_symmetric(SVector{4}(2, 1, 1, 2), centre_zero=true) # Test negation of a half: - @test !FiniteDifferences._is_symmetric([2, 1, -1, -2]) - @test FiniteDifferences._is_symmetric([2, 1, -1, -2], negate_half=true) - @test FiniteDifferences._is_symmetric([2, 1, 0, -1, -2], negate_half=true) - @test FiniteDifferences._is_symmetric([2, 1, 4, -1, -2], negate_half=true) + @test !FiniteDifferences._is_symmetric(SVector{4}(2, 1, -1, -2)) + @test FiniteDifferences._is_symmetric(SVector{4}(2, 1, -1, -2), negate_half=true) + @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, -1, -2), negate_half=true) + @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, -1, -2), negate_half=true) @test !FiniteDifferences._is_symmetric( - [2, 1, 4, -1, -2], + SVector{5}(2, 1, 4, -1, -2), negate_half=true, centre_zero=true ) @@ -155,8 +122,8 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error end @testset "Derivative of cosc at 0 (#124)" begin - @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-9 - @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-14 + @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-13 + @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-15 end @testset "Derivative of a constant (#125)" begin From 4fc61e2cdee5caae2e6232d57a167fa3ac5400cc Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 16:20:07 +0100 Subject: [PATCH 13/77] Unspaghetti code --- src/methods.jl | 231 ++++++++++++++++--------------------------------- 1 file changed, 76 insertions(+), 155 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index c9f432ff..4995148d 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -167,100 +167,19 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and (0.001065235154086019, 1.9541865128909085e-13) ``` """ -function (m::FiniteDifferenceMethod)(f::Function, x::Real; kw_args...) +function (m::FiniteDifferenceMethod)(f::TF, x::Real; max_range::Real=Inf) where TF<:Function # Assume that converting to float is desired. - return _call_method(m, f, float(x); kw_args...) -end - -# The automatic step size calculation fails if `Q == 0`, so handle that edge case. -for t in [:UnadaptedFiniteDifferenceMethod, :AdaptedFiniteDifferenceMethod] - @eval function _call_method( - m::$t{P,0}, - f::Function, - x::T; - max_range::Real=Inf - ) where {P,T<:AbstractFloat} - return f(x) - end -end - -macro _evals(P, m, f, x, step) - assemble_f = Expr(:call, - Expr(:curly, :SVector, :($P)), - [Expr(:call, f, :(xs[$i])) for i = 1:P]... - ) - return esc(quote - xs = $x .+ $step .* $(m).grid - $assemble_f - end) -end - -macro _eval_method(P, Q, fs, coefs, step) - dot = Expr(:call, :+, [:($fs[$i] * $coefs[$i]) for i = 1:P]...) - return esc(:($dot ./ $step^$Q)) -end - -@generated function _call_method( - m::UnadaptedFiniteDifferenceMethod{P,Q}, - f::Function, - x::T; - max_range::Real=Inf -) where {P,Q,T<:AbstractFloat} - return quote - step = T(first(_estimate_step_acc(m, x, max_range))) - fs = @_evals($P, m, f, x, step) - return @_eval_method($P, $Q, fs, T.(m.coefs), step) - end -end -@generated function _call_method( - m::AdaptedFiniteDifferenceMethod{P,Q}, - f::Function, - x::T; - max_range::Real=Inf -) where {P,Q,T<:AbstractFloat} - return quote - step = T(first(@_estimate_step_adapted($P, $Q, m, f, x, max_range))) - fs = @_evals($P, m, f, x, step) - return @_eval_method($P, $Q, fs, T.(m.coefs), step) - end + x = float(x) + step = first(estimate_step(m, f, x, max_range=max_range)) + return m(f, x, step) end - -@generated function _estimate_magnitudes( - m::UnadaptedFiniteDifferenceMethod{P,Q}, - f::Function, - x::T; +function (m::FiniteDifferenceMethod{P,0})( + f::TF, + x::Real; max_range::Real=Inf -) where {P,Q,T<:AbstractFloat} - return quote - step = T(first(_estimate_step_acc(m, x, max_range))) - @_estimate_magnitudes_body($P, $Q) - end -end -@generated function _estimate_magnitudes( - m::AdaptedFiniteDifferenceMethod{P,Q}, - f::Function, - x::T; - max_range::Real=Inf -) where {P,Q,T<:AbstractFloat} - return quote - step = T(first(@_estimate_step_adapted($P, $Q, m, f, x, max_range))) - @_estimate_magnitudes_body($P, $Q) - end -end -macro _estimate_magnitudes_body(P, Q) - return esc(quote - fs = @_evals($P, m, f, x, step) - # Estimate magnitude of `∇f` in a neighbourhood of `x`. - ∇fs = SVector{3}( - @_eval_method($P, $Q, fs, m.coefs_neighbourhood[1], step), - @_eval_method($P, $Q, fs, m.coefs_neighbourhood[2], step), - @_eval_method($P, $Q, fs, m.coefs_neighbourhood[3], step) - ) - ∇f_magnitude = maximum(maximum.(abs, ∇fs)) - # Estimate magnitude of `f` in a neighbourhood of `x`. - f_magnitude = maximum(maximum.(abs, fs)) - return ∇f_magnitude, f_magnitude - end) +) where {P,TF<:Function} + # The automatic step size calculation fails if `Q == 0`, so handle that edge case. + return f(x) end """ @@ -294,26 +213,36 @@ julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. -1.7741363933510002e-13 ``` """ -@inline function (m::FiniteDifferenceMethod{P, Q})( - f::Function, +function (m::FiniteDifferenceMethod{P, Q})( + f::TF, x::Real, step::Real -) where {P,Q} +) where {P,Q,TF<:Function} # Assume that converting to float is desired. x = float(x) - return _call_method_with_step(m, f, x, step) + fs = _evals(m, f, x, step) + return _eval_method(m, fs, x, step, m.coefs) end -@generated function _call_method_with_step( - m::FiniteDifferenceMethod{P,Q}, - f::Function, + +function _evals( + m::FiniteDifferenceMethod, + f::TF, x::T, step::Real -) where {P,Q,T<:AbstractFloat} - return quote - step = T(step) - fs = @_evals($P, m, f, x, step) - return @_eval_method($P, $Q, fs, T.(m.coefs), step) - end +) where {TF<:Function,T<:AbstractFloat} + xs = x .+ T(step) .* m.grid + return f.(xs) +end + +function _eval_method( + m::FiniteDifferenceMethod{P,Q}, + fs::SVector{P,TF}, + x::T, + step::Real, + coefs::SVector{P,Float64} +) where {P, Q, TF, T<:AbstractFloat} + coefs = T.(coefs) + return sum(fs .* coefs) ./ T(step)^Q end # Check the method and derivative orders for consistency. @@ -414,60 +343,59 @@ estimate of the derivative. """ function estimate_step( m::UnadaptedFiniteDifferenceMethod, - f::Function, + f::TF, x::T; max_range::Real=Inf -) where T<:AbstractFloat - return _estimate_step_acc(m, x, max_range) +) where {TF<:Function,T<:AbstractFloat} + step, acc = _compute_step_acc_default(m, x) + return _limit_step(m, x, step, acc, max_range) end -@generated function estimate_step( +function estimate_step( m::AdaptedFiniteDifferenceMethod{P,Q}, - f::Function, + f::TF, x::T; max_range::Real=Inf -) where {P,Q,T<:AbstractFloat} - return :(@_estimate_step_adapted($P, $Q, m, f, x, max_range)) +) where {P,Q,TF<:Function,T<:AbstractFloat} + ∇f_magnitude, f_magnitude = _estimate_magnitudes( + m.bound_estimator, + f, + x, + max_range=max_range + ) + if ∇f_magnitude == 0 || f_magnitude == 0 + step, acc = _compute_step_acc_default(m, x) + else + step, acc = _compute_step_acc(m, ∇f_magnitude, eps(f_magnitude)) + end + return _limit_step(m, x, step, acc, max_range) end -macro _estimate_step_adapted(P, Q, m, f, x, max_range) - return esc(quote - ∇f_magnitude, f_magnitude = _estimate_magnitudes( - $(m).bound_estimator, - $f, - $x, - max_range=$max_range - ) - if ∇f_magnitude == 0 || f_magnitude == 0 - step, acc = _estimate_step_acc($m, $x, $max_range) - else - step, acc = _estimate_step_acc( - $m, - $x, - ∇f_magnitude, - eps(f_magnitude), - $max_range - ) - end - step, acc - end) + +function _estimate_magnitudes( + m::FiniteDifferenceMethod{P,Q}, + f::TF, + x::T; + max_range::Real=Inf +) where {P,Q,TF<:Function,T<:AbstractFloat} + step = T(first(estimate_step(m, f, x, max_range=max_range))) + fs = _evals(m, f, x, step) + # Estimate magnitude of `∇f` in a neighbourhood of `x`. + ∇fs = SVector{3}( + _eval_method(m, fs, x, step, m.coefs_neighbourhood[1]), + _eval_method(m, fs, x, step, m.coefs_neighbourhood[2]), + _eval_method(m, fs, x, step, m.coefs_neighbourhood[3]) + ) + ∇f_magnitude = maximum(maximum.(abs, ∇fs)) + # Estimate magnitude of `f` in a neighbourhood of `x`. + f_magnitude = maximum(maximum.(abs, fs)) + return ∇f_magnitude, f_magnitude end -function _estimate_step_acc( +function _compute_step_acc_default( m::FiniteDifferenceMethod, - x::T, - max_range::Real -) where {T<:AbstractFloat} - step, acc = _compute_step_acc(m, x) - return _limit_step(m, x, step, acc, max_range) -end -function _estimate_step_acc( - m::AdaptedFiniteDifferenceMethod, - x::T, - ∇f_magnitude::Real, - f_error::Real, - max_range::Real + x::T ) where {T<:AbstractFloat} - step, acc = _compute_step_acc(m, ∇f_magnitude, f_error) - return _limit_step(m, x, step, acc, max_range) + # Compute a default step size using a heuristic and [`DEFAULT_CONDITION`](@ref). + return _compute_step_acc(m, m.condition, eps(T)) end function _compute_step_acc( @@ -483,13 +411,6 @@ function _compute_step_acc( acc = C₁ * step^(-Q) + C₂ * step^(P - Q) return step, acc end -function _compute_step_acc( - m::FiniteDifferenceMethod, - x::T -) where {T<:AbstractFloat} - # Set the step size using an heuristic and [`DEFAULT_CONDITION`](@ref). - return _compute_step_acc(m, m.condition, eps(T)) -end function _limit_step( m::FiniteDifferenceMethod, @@ -506,7 +427,7 @@ function _limit_step( end # Second, prevent very large step sizes, which can occur for high-order methods or # slowly-varying functions. - step_default, _ = _compute_step_acc(m, x) + step_default, _ = _compute_step_acc_default(m, x) step_max_default = 1000step_default if step > step_max_default step = step_max_default From 3d2e44fd9257e663c2729f8b280ccdab9c37667e Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 17:18:59 +0100 Subject: [PATCH 14/77] Change kw_arg to arg to fix type inference --- src/methods.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 4995148d..5b2c45c7 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -213,7 +213,7 @@ julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. -1.7741363933510002e-13 ``` """ -function (m::FiniteDifferenceMethod{P, Q})( +function (m::FiniteDifferenceMethod{P,Q})( f::TF, x::Real, step::Real @@ -240,7 +240,7 @@ function _eval_method( x::T, step::Real, coefs::SVector{P,Float64} -) where {P, Q, TF, T<:AbstractFloat} +) where {P,Q,TF,T<:AbstractFloat} coefs = T.(coefs) return sum(fs .* coefs) ./ T(step)^Q end @@ -360,9 +360,9 @@ function estimate_step( m.bound_estimator, f, x, - max_range=max_range + max_range ) - if ∇f_magnitude == 0 || f_magnitude == 0 + if ∇f_magnitude == 0.0 || f_magnitude == 0.0 step, acc = _compute_step_acc_default(m, x) else step, acc = _compute_step_acc(m, ∇f_magnitude, eps(f_magnitude)) @@ -373,10 +373,10 @@ end function _estimate_magnitudes( m::FiniteDifferenceMethod{P,Q}, f::TF, - x::T; + x::T, max_range::Real=Inf ) where {P,Q,TF<:Function,T<:AbstractFloat} - step = T(first(estimate_step(m, f, x, max_range=max_range))) + step = first(estimate_step(m, f, x, max_range=max_range)) fs = _evals(m, f, x, step) # Estimate magnitude of `∇f` in a neighbourhood of `x`. ∇fs = SVector{3}( From 759cceb6253136b32a197212682fd7c1b928b9ef Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 17:23:07 +0100 Subject: [PATCH 15/77] Add BenchmarkTools as test dep --- Project.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 0f29049a..1c62faef 100644 --- a/Project.toml +++ b/Project.toml @@ -17,8 +17,8 @@ julia = "1" [extras] Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" [targets] -test = ["Random", "StaticArrays", "Test"] +test = ["Random", "Test", "BenchmarkTools"] From e4bbc212f4c718bf10c0733fcf01048e7aa10850 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 17:35:27 +0100 Subject: [PATCH 16/77] Add test for allocations --- test/methods.jl | 5 +++++ test/runtests.jl | 1 + 2 files changed, 6 insertions(+) diff --git a/test/methods.jl b/test/methods.jl index 95183578..32259576 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -32,6 +32,11 @@ end end + @testset "Test allocations" begin + m = central_fdm(5, 2, adapt=2) + @test (@benchmark $m(sin, 1)).allocs == 0 + end + # Integration test to ensure that Integer-output functions can be tested. @testset "Integer output" begin @test isapprox(central_fdm(5, 1)(x -> 5, 0), 0; rtol=1e-12, atol=1e-12) diff --git a/test/runtests.jl b/test/runtests.jl index b994063a..9caa6b75 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,3 +1,4 @@ +using BenchmarkTools using ChainRulesCore using FiniteDifferences using LinearAlgebra From e96e7e11639dc40da643856e596dabf4fddbb90a Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 17:39:22 +0100 Subject: [PATCH 17/77] Reorganise tests --- test/methods.jl | 69 ++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 35 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index 32259576..f5b6a135 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -1,34 +1,36 @@ @testset "Methods" begin - # The different approaches to approximating the gradient to try. - methods = [forward_fdm, backward_fdm, central_fdm] - - # The different floating point types to try and the associated required relative - # tolerance. - types = [Float32, Float64] - - # The different functions to evaluate (.f), their first derivative at 1 (.d1), - # and second derivative at 1 (.d2). - foos = [ - (f=sin, d1=cos(1), d2=-sin(1)), - (f=exp, d1=exp(1), d2=exp(1)), - (f=abs2, d1=2, d2=2), - (f=x -> sqrt(max(x + 1,0)), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2)), - ] - - # Test all combinations of the above settings, i.e. differentiate all functions using - # all methods and data types. - @testset "foo=$(foo.f), method=$m, type=$(T)" for foo in foos, m in methods, T in types - @testset "method-order=$order" for order in [1, 2, 3, 4, 5] - @test m(order, 0, adapt=2)(foo.f, T(1)) isa T - @test m(order, 0, adapt=2)(foo.f, T(1)) == T(foo.f(1)) - end + @testset "Correctness" begin + # The different approaches to approximating the gradient to try. + methods = [forward_fdm, backward_fdm, central_fdm] + + # The different floating point types to try and the associated required relative + # tolerance. + types = [Float32, Float64] + + # The different functions to evaluate (`.f`), their first derivative at 1 (`.d1`), + # and second derivative at 1 (`.d2`). + foos = [ + (f=sin, d1=cos(1), d2=-sin(1)), + (f=exp, d1=exp(1), d2=exp(1)), + (f=abs2, d1=2, d2=2), + (f=x -> sqrt(max(x + 1,0)), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2)), + ] + + # Test all combinations of the above settings, i.e. differentiate all functions using + # all methods and data types. + @testset "foo=$(foo.f), method=$m, type=$(T)" for foo in foos, m in methods, T in types + @testset "method-order=$order" for order in [1, 2, 3, 4, 5] + @test m(order, 0, adapt=2)(foo.f, T(1)) isa T + @test m(order, 0, adapt=2)(foo.f, T(1)) == T(foo.f(1)) + end - @test m(10, 1)(foo.f, T(1)) isa T - @test m(10, 1)(foo.f, T(1)) ≈ T(foo.d1) + @test m(10, 1)(foo.f, T(1)) isa T + @test m(10, 1)(foo.f, T(1)) ≈ T(foo.d1) - @test m(10, 2)(foo.f, T(1)) isa T - if T == Float64 - @test m(10, 2)(foo.f, T(1)) ≈ T(foo.d2) + @test m(10, 2)(foo.f, T(1)) isa T + if T == Float64 + @test m(10, 2)(foo.f, T(1)) ≈ T(foo.d2) + end end end @@ -53,13 +55,10 @@ @test central_fdm(5, 1)(abs, 0.001) ≈ 1.0 end - @testset "Accuracy at high orders, with high adapt" begin - # Regression test against issues with precision during computation of _coeffs - # see https://github.com/JuliaDiff/FiniteDifferences.jl/issues/64 - + @testset "Accuracy at high orders and high adaptation (issue #64)" begin + # Regression test against issues with precision during computation of coefficients. @test central_fdm(9, 5, adapt=4)(exp, 1.0) ≈ exp(1) atol=2e-7 @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=5e-12 - poly(x) = 4x^3 + 3x^2 + 2x + 1 @test central_fdm(9, 3, adapt=4)(poly, 1.0) ≈ 24 atol=1e-11 end @@ -126,12 +125,12 @@ end end - @testset "Derivative of cosc at 0 (#124)" begin + @testset "Derivative of cosc at 0 (issue #124)" begin @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-13 @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-15 end - @testset "Derivative of a constant (#125)" begin + @testset "Derivative of a constant (issue #125)" begin @test central_fdm(2, 1)(x -> 0, 0) ≈ 0 atol=1e-10 end end From 942297a2e39edccc7ecd8a5fef3b43c9d247df1c Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 17:40:53 +0100 Subject: [PATCH 18/77] Loosen very tight tolerances --- test/methods.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index f5b6a135..a3e8e575 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -58,7 +58,7 @@ @testset "Accuracy at high orders and high adaptation (issue #64)" begin # Regression test against issues with precision during computation of coefficients. @test central_fdm(9, 5, adapt=4)(exp, 1.0) ≈ exp(1) atol=2e-7 - @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=5e-12 + @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=1e-11 poly(x) = 4x^3 + 3x^2 + 2x + 1 @test central_fdm(9, 3, adapt=4)(poly, 1.0) ≈ 24 atol=1e-11 end @@ -127,7 +127,7 @@ @testset "Derivative of cosc at 0 (issue #124)" begin @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-13 - @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-15 + @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-14 end @testset "Derivative of a constant (issue #125)" begin From e92fb114ba8af26efa3f3c3d679585d80530f683 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 17:55:18 +0100 Subject: [PATCH 19/77] Make max_range part of FDM struct --- src/methods.jl | 79 ++++++++++++++++++++++--------------------------- test/methods.jl | 4 +-- 2 files changed, 37 insertions(+), 46 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 5b2c45c7..314c38b7 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -35,6 +35,8 @@ evaluations. This method does not dynamically adapt its step size. perform adaptation for this finite difference method. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). +- `max_range::Float64`: Maximum distance that a function is evaluated from the input at + which the derivative is estimated. - `∇f_magnitude_mult::Float64`: Internally computed quantity. - `f_error_mult::Float64`: Internally computed quantity. """ @@ -44,6 +46,7 @@ struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q} coefs_neighbourhood::NTuple{3,SVector{P,Float64}} condition::Float64 factor::Float64 + max_range::Float64 ∇f_magnitude_mult::Float64 f_error_mult::Float64 end @@ -66,6 +69,8 @@ evaluations. This method does dynamically adapt its step size. estimating the magnitude of the derivative in a neighbourhood. - `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref). - `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref). +- `max_range::Float64`: Maximum distance that a function is evaluated from the input at + which the derivative is estimated. - `∇f_magnitude_mult::Float64`: Internally computed quantity. - `f_error_mult::Float64`: Internally computed quantity. - `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to @@ -81,6 +86,7 @@ struct AdaptedFiniteDifferenceMethod{ coefs_neighbourhood::NTuple{3,SVector{P,Float64}} condition::Float64 factor::Float64 + max_range::Float64 ∇f_magnitude_mult::Float64 f_error_mult::Float64 bound_estimator::E @@ -91,7 +97,8 @@ end grid::SVector{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, - factor::Real=DEFAULT_FACTOR + factor::Real=DEFAULT_FACTOR, + max_range::Real=Inf ) Construct a finite difference method. @@ -104,6 +111,8 @@ Construct a finite difference method. # Keywords - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref). - `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref). +- `max_range::Real=Inf`: Maximum distance that a function is evaluated from the input at + which the derivative is estimated. # Returns - `FiniteDifferenceMethod`: Specified finite difference method. @@ -112,7 +121,8 @@ function FiniteDifferenceMethod( grid::SVector{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, - factor::Real=DEFAULT_FACTOR + factor::Real=DEFAULT_FACTOR, + max_range::Real=Inf ) where P _check_p_q(P, q) coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q) @@ -122,17 +132,14 @@ function FiniteDifferenceMethod( coefs_neighbourhood, Float64(condition), Float64(factor), + Float64(max_range), ∇f_magnitude_mult, f_error_mult ) end """ - (m::FiniteDifferenceMethod)( - f::Function, - x::T; - max_range::Real=Inf - ) where T<:AbstractFloat + (m::FiniteDifferenceMethod)(f::Function, x::T) where T<:AbstractFloat Estimate the derivative of `f` at `x` using the finite differencing method `m` and an automatically determined step size. @@ -141,9 +148,6 @@ automatically determined step size. - `f::Function`: Function to estimate derivative of. - `x::T`: Input to estimate derivative at. -# Keywords -- `max_range::Real=Inf`: Upper bound on how far `f` can be evaluated away from `x`. - # Returns - Estimate of the derivative. @@ -167,17 +171,13 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and (0.001065235154086019, 1.9541865128909085e-13) ``` """ -function (m::FiniteDifferenceMethod)(f::TF, x::Real; max_range::Real=Inf) where TF<:Function +function (m::FiniteDifferenceMethod)(f::TF, x::Real) where TF<:Function # Assume that converting to float is desired. x = float(x) - step = first(estimate_step(m, f, x, max_range=max_range)) + step = first(estimate_step(m, f, x)) return m(f, x, step) end -function (m::FiniteDifferenceMethod{P,0})( - f::TF, - x::Real; - max_range::Real=Inf -) where {P,TF<:Function} +function (m::FiniteDifferenceMethod{P,0})(f::TF, x::Real) where {P,TF<:Function} # The automatic step size calculation fails if `Q == 0`, so handle that edge case. return f(x) end @@ -322,8 +322,7 @@ end function estimate_step( m::FiniteDifferenceMethod, f::Function, - x::T; - max_range::Real=Inf + x::T ) where T<:AbstractFloat Estimate the step size for a finite difference method `m`. Also estimates the error of the @@ -334,9 +333,6 @@ estimate of the derivative. - `f::Function`: Function to evaluate the derivative of. - `x::T`: Point to estimate the derivative at. -# Keywords -- `max_range::Real=Inf`: Upper bound on how far `f` can be evaluated away from `x`. - # Returns - `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimated step size and an estimate of the error of the finite difference estimate. @@ -344,39 +340,31 @@ estimate of the derivative. function estimate_step( m::UnadaptedFiniteDifferenceMethod, f::TF, - x::T; - max_range::Real=Inf + x::T ) where {TF<:Function,T<:AbstractFloat} step, acc = _compute_step_acc_default(m, x) - return _limit_step(m, x, step, acc, max_range) + return _limit_step(m, x, step, acc) end function estimate_step( m::AdaptedFiniteDifferenceMethod{P,Q}, f::TF, - x::T; - max_range::Real=Inf + x::T ) where {P,Q,TF<:Function,T<:AbstractFloat} - ∇f_magnitude, f_magnitude = _estimate_magnitudes( - m.bound_estimator, - f, - x, - max_range - ) + ∇f_magnitude, f_magnitude = _estimate_magnitudes(m.bound_estimator, f, x) if ∇f_magnitude == 0.0 || f_magnitude == 0.0 step, acc = _compute_step_acc_default(m, x) else step, acc = _compute_step_acc(m, ∇f_magnitude, eps(f_magnitude)) end - return _limit_step(m, x, step, acc, max_range) + return _limit_step(m, x, step, acc) end function _estimate_magnitudes( m::FiniteDifferenceMethod{P,Q}, f::TF, - x::T, - max_range::Real=Inf + x::T ) where {P,Q,TF<:Function,T<:AbstractFloat} - step = first(estimate_step(m, f, x, max_range=max_range)) + step = first(estimate_step(m, f, x)) fs = _evals(m, f, x, step) # Estimate magnitude of `∇f` in a neighbourhood of `x`. ∇fs = SVector{3}( @@ -390,10 +378,7 @@ function _estimate_magnitudes( return ∇f_magnitude, f_magnitude end -function _compute_step_acc_default( - m::FiniteDifferenceMethod, - x::T -) where {T<:AbstractFloat} +function _compute_step_acc_default(m::FiniteDifferenceMethod, x::T) where {T<:AbstractFloat} # Compute a default step size using a heuristic and [`DEFAULT_CONDITION`](@ref). return _compute_step_acc(m, m.condition, eps(T)) end @@ -416,11 +401,10 @@ function _limit_step( m::FiniteDifferenceMethod, x::T, step::Real, - acc::Real, - max_range::Real + acc::Real ) where {T<:AbstractFloat} # First, limit the step size based on the maximum range. - step_max = max_range / maximum(abs.(m.grid)) + step_max = m.max_range / maximum(abs.(m.grid)) if step > step_max step = step_max acc = NaN @@ -445,6 +429,7 @@ for direction in [:forward, :central, :backward] adapt::Int=1, condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, + max_range::Real=Inf, geom::Bool=false ) _check_p_q(p, q) @@ -460,6 +445,7 @@ for direction in [:forward, :central, :backward] adapt=adapt - 1, condition=condition, factor=factor, + max_range=max_range, geom=geom ) return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}( @@ -468,6 +454,7 @@ for direction in [:forward, :central, :backward] coefs_nbhd, Float64(condition), Float64(factor), + Float64(max_range), ∇f_magnitude_mult, f_error_mult, bound_estimator @@ -479,6 +466,7 @@ for direction in [:forward, :central, :backward] coefs_nbhd, Float64(condition), Float64(factor), + Float64(max_range), ∇f_magnitude_mult, f_error_mult ) @@ -492,6 +480,7 @@ for direction in [:forward, :central, :backward] adapt::Int=1, condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, + max_range::Real=Inf, geom::Bool=false ) @@ -507,6 +496,8 @@ Contruct a finite difference method at a $($(Meta.quot(direction))) grid of `p` this procedure `adapt` times. - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref). - `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref). +- `max_range::Real=Inf`: Maximum distance that a function is evaluated from the input at + which the derivative is estimated. - `geom::Bool`: Use geometrically spaced points instead of linearly spaced points. # Returns diff --git a/test/methods.jl b/test/methods.jl index a3e8e575..631f24b0 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -50,8 +50,8 @@ end @testset "Limiting step size" begin - @test !isfinite(central_fdm(5, 1)(abs, 0.001, max_range=0)) - @test central_fdm(10, 1)(log, 1e-3, max_range=9e-4) ≈ 1000 + @test !isfinite(central_fdm(5, 1, max_range=0)(abs, 0.001)) + @test central_fdm(10, 1, max_range=9e-4)(log, 1e-3) ≈ 1000 @test central_fdm(5, 1)(abs, 0.001) ≈ 1.0 end From 9c7cd0953578f1f6759711670bdffbf71a784f27 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 18:08:04 +0100 Subject: [PATCH 20/77] Fix custom grid --- src/methods.jl | 14 ++++++++------ test/methods.jl | 4 ++++ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 314c38b7..be56528c 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -94,7 +94,7 @@ end """ FiniteDifferenceMethod( - grid::SVector{P,Int}, + grid::Vector{Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, @@ -104,7 +104,7 @@ end Construct a finite difference method. # Arguments -- `grid::SVector{P,Int}`: The grid. See [`AdaptedFiniteDifferenceMethod`](@ref) or +- `grid::Vector{Int}`: The grid. See [`AdaptedFiniteDifferenceMethod`](@ref) or [`UnadaptedFiniteDifferenceMethod`](@ref). - `q::Int`: Order of the derivative to estimate. @@ -118,15 +118,17 @@ Construct a finite difference method. - `FiniteDifferenceMethod`: Specified finite difference method. """ function FiniteDifferenceMethod( - grid::SVector{P,Int}, + grid::Vector{Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, max_range::Real=Inf -) where P - _check_p_q(P, q) +) + p = length(grid) + grid = SVector{p}(grid) # Internally, we work with static vectors. + _check_p_q(p, q) coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q) - return UnadaptedFiniteDifferenceMethod{P,q}( + return UnadaptedFiniteDifferenceMethod{p,q}( grid, coefs, coefs_neighbourhood, diff --git a/test/methods.jl b/test/methods.jl index 631f24b0..03438733 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -34,6 +34,10 @@ end end + @testset "Test custom grid" begin + @test FiniteDifferenceMethod([-2, 0, 5], 1)(sin, 1) ≈ cos(1) + end + @testset "Test allocations" begin m = central_fdm(5, 2, adapt=2) @test (@benchmark $m(sin, 1)).allocs == 0 From 7c33edf95599a2c8e381e914ec6dd087bfe73dda Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 18:20:18 +0100 Subject: [PATCH 21/77] Loosen tolerance --- test/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index 03438733..ac1685bb 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -62,7 +62,7 @@ @testset "Accuracy at high orders and high adaptation (issue #64)" begin # Regression test against issues with precision during computation of coefficients. @test central_fdm(9, 5, adapt=4)(exp, 1.0) ≈ exp(1) atol=2e-7 - @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=1e-11 + @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=1e-10 poly(x) = 4x^3 + 3x^2 + 2x + 1 @test central_fdm(9, 3, adapt=4)(poly, 1.0) ≈ 24 atol=1e-11 end From ce7147d96a429d810be40cf2605b54efa9716b4a Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 18:25:32 +0100 Subject: [PATCH 22/77] Loosen tolerances even more --- test/methods.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index ac1685bb..8648af13 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -130,8 +130,8 @@ end @testset "Derivative of cosc at 0 (issue #124)" begin - @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-13 - @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-14 + @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-9 + @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-13 end @testset "Derivative of a constant (issue #125)" begin From 8941d9bbc5e40d397ff7190bda9bc59af85837e8 Mon Sep 17 00:00:00 2001 From: Wessel Date: Mon, 4 Jan 2021 21:34:01 +0100 Subject: [PATCH 23/77] Update src/methods.jl Co-authored-by: willtebbutt --- src/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index be56528c..921a675d 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -122,7 +122,7 @@ function FiniteDifferenceMethod( q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, - max_range::Real=Inf + max_range::Real=Inf, ) p = length(grid) grid = SVector{p}(grid) # Internally, we work with static vectors. From 771aabc4371fc1d64545de95e717299e32141101 Mon Sep 17 00:00:00 2001 From: Wessel Date: Mon, 4 Jan 2021 21:34:16 +0100 Subject: [PATCH 24/77] Update test/methods.jl Co-authored-by: willtebbutt --- test/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index 8648af13..ca9fd424 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -99,7 +99,7 @@ @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, -1, -2), negate_half=true) @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, -1, -2), negate_half=true) @test !FiniteDifferences._is_symmetric( - SVector{5}(2, 1, 4, -1, -2), + SVector{5}(2, 1, 4, -1, -2); negate_half=true, centre_zero=true ) From 1f9b4ae915e5418e278e85494eb9e3c778b3365a Mon Sep 17 00:00:00 2001 From: Wessel Date: Mon, 4 Jan 2021 21:34:24 +0100 Subject: [PATCH 25/77] Update test/methods.jl Co-authored-by: willtebbutt --- test/methods.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index ca9fd424..5890c12c 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -20,8 +20,8 @@ # all methods and data types. @testset "foo=$(foo.f), method=$m, type=$(T)" for foo in foos, m in methods, T in types @testset "method-order=$order" for order in [1, 2, 3, 4, 5] - @test m(order, 0, adapt=2)(foo.f, T(1)) isa T - @test m(order, 0, adapt=2)(foo.f, T(1)) == T(foo.f(1)) + @test m(order, 0; adapt=2)(foo.f, T(1)) isa T + @test m(order, 0; adapt=2)(foo.f, T(1)) == T(foo.f(1)) end @test m(10, 1)(foo.f, T(1)) isa T From 1036d87de10e982ccfc7a3c724d0c9f8a345ca57 Mon Sep 17 00:00:00 2001 From: Wessel Date: Mon, 4 Jan 2021 21:35:42 +0100 Subject: [PATCH 26/77] Update src/methods.jl Co-authored-by: willtebbutt --- src/methods.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 921a675d..fe546b94 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -77,9 +77,7 @@ evaluations. This method does dynamically adapt its step size. perform adaptation for this finite difference method. """ struct AdaptedFiniteDifferenceMethod{ - P, - Q, - E<:FiniteDifferenceMethod + P, Q, E<:FiniteDifferenceMethod } <: FiniteDifferenceMethod{P,Q} grid::SVector{P,Int} coefs::SVector{P,Float64} From a54bec81ad70e709527d1dd03ea48e1133e9649f Mon Sep 17 00:00:00 2001 From: Wessel Date: Mon, 4 Jan 2021 21:39:43 +0100 Subject: [PATCH 27/77] Update src/methods.jl Co-authored-by: willtebbutt --- src/methods.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index fe546b94..feacf7ce 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -214,9 +214,7 @@ julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. ``` """ function (m::FiniteDifferenceMethod{P,Q})( - f::TF, - x::Real, - step::Real + f::TF, x::Real, step::Real, ) where {P,Q,TF<:Function} # Assume that converting to float is desired. x = float(x) From 579984ac8a25168b72b372c3d61ce4e0457666f3 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Mon, 4 Jan 2021 21:46:55 +0100 Subject: [PATCH 28/77] Add comment and simplify line --- src/methods.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index be56528c..197ef730 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -232,8 +232,7 @@ function _evals( x::T, step::Real ) where {TF<:Function,T<:AbstractFloat} - xs = x .+ T(step) .* m.grid - return f.(xs) + return f.(x .+ T(step) .* m.grid) end function _eval_method( @@ -243,6 +242,8 @@ function _eval_method( step::Real, coefs::SVector{P,Float64} ) where {P,Q,TF,T<:AbstractFloat} + # If we substitute `T.(coefs)` in the expression below, then allocations occur. We + # therefore perform the broadcasting first. coefs = T.(coefs) return sum(fs .* coefs) ./ T(step)^Q end From fb005efe7f8b618c0e54b946c33b81c05991cf01 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 09:42:38 +0100 Subject: [PATCH 29/77] Add accuracy tests --- test/methods.jl | 82 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 28 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index 5890c12c..c2ef7680 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -1,39 +1,73 @@ @testset "Methods" begin @testset "Correctness" begin - # The different approaches to approximating the gradient to try. + # Finite difference methods to test. methods = [forward_fdm, backward_fdm, central_fdm] - # The different floating point types to try and the associated required relative - # tolerance. + # The different floating point types to try. types = [Float32, Float64] # The different functions to evaluate (`.f`), their first derivative at 1 (`.d1`), - # and second derivative at 1 (`.d2`). - foos = [ - (f=sin, d1=cos(1), d2=-sin(1)), - (f=exp, d1=exp(1), d2=exp(1)), - (f=abs2, d1=2, d2=2), - (f=x -> sqrt(max(x + 1,0)), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2)), + # and their second derivative at 1 (`.d2`). + fs = [ + (f=sin, d1=cos(1), d2=-sin(1), only_forward=false), + (f=exp, d1=exp(1), d2=exp(1), only_forward=false), + (f=abs2, d1=2, d2=2, only_forward=false), + (f=x -> sqrt(x + 1), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2), only_forward=true), ] - # Test all combinations of the above settings, i.e. differentiate all functions using - # all methods and data types. - @testset "foo=$(foo.f), method=$m, type=$(T)" for foo in foos, m in methods, T in types + # Test all combinations of the above settings, i.e. differentiate all functions + # using all methods and data types. + @testset "f=$(f.f), method=$m, type=$(T)" for f in fs, m in methods, T in types + # Check if only `forward_fdm` is allowed. + f.only_forward && m != forward_fdm && continue + @testset "method-order=$order" for order in [1, 2, 3, 4, 5] - @test m(order, 0; adapt=2)(foo.f, T(1)) isa T - @test m(order, 0; adapt=2)(foo.f, T(1)) == T(foo.f(1)) + @test m(order, 0; adapt=2)(f.f, T(1)) isa T + @test m(order, 0; adapt=2)(f.f, T(1)) == T(f.f(1)) end - @test m(10, 1)(foo.f, T(1)) isa T - @test m(10, 1)(foo.f, T(1)) ≈ T(foo.d1) + @test m(10, 1)(f.f, T(1)) isa T + @test m(10, 1)(f.f, T(1)) ≈ T(f.d1) - @test m(10, 2)(foo.f, T(1)) isa T - if T == Float64 - @test m(10, 2)(foo.f, T(1)) ≈ T(foo.d2) - end + @test m(10, 2)(f.f, T(1)) isa T + T == Float64 && @test m(10, 2)(f.f, T(1)) ≈ T(f.d2) + end + end + + @testset "Accuracy" begin + # Finite difference methods to test. + methods = [forward_fdm, backward_fdm, central_fdm] + + # `f`s, `x`s, the derivatives of `f` at `x`, and a factor that loosens tolerances. + fs = [ + (f=x -> 0, x=0, d=0, factor=1), + (f=x -> x, x=0, d=1, factor=1), + (f=exp, x=0, d=1, factor=1), + (f=sin, x=0, d=1, factor=1), + (f=cos, x=0, d=0, factor=1), + (f=sinc, x=0, d=0, factor=1), + (f=cosc, x=0, d=-(pi ^ 2) / 3, factor=10) + ] + @testset "f=$(f.f), method=$m" for f in fs, m in methods + @test m(4, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-8 * f.factor + @test m(5, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-8 * f.factor + @test m(6, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-9 * f.factor + @test m(7, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-10 * f.factor + @test m(8, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-10 * f.factor + @test m(9, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-11 * f.factor + @test m(10, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-11 * f.factor + @test m(11, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-12 * f.factor end end + @testset "Derivative of cosc at 0 (issue #124)" begin + @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-13 + end + + @testset "Derivative of a constant (issue #125)" begin + @test central_fdm(2, 1)(x -> 0, 0) ≈ 0 atol=1e-10 + end + @testset "Test custom grid" begin @test FiniteDifferenceMethod([-2, 0, 5], 1)(sin, 1) ≈ cos(1) end @@ -129,12 +163,4 @@ end end - @testset "Derivative of cosc at 0 (issue #124)" begin - @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-9 - @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-13 - end - - @testset "Derivative of a constant (issue #125)" begin - @test central_fdm(2, 1)(x -> 0, 0) ≈ 0 atol=1e-10 - end end From 5b8308cd518d7797ddba72a27c1444ce7e2ea97b Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 09:44:02 +0100 Subject: [PATCH 30/77] Limit samples and evals in test --- test/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index c2ef7680..637c97c7 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -74,7 +74,7 @@ @testset "Test allocations" begin m = central_fdm(5, 2, adapt=2) - @test (@benchmark $m(sin, 1)).allocs == 0 + @test (@benchmark $m(sin, 1; samples=1, evals=1)).allocs == 0 end # Integration test to ensure that Integer-output functions can be tested. From 257f12eedcf20934d68deba7c51d2f7aa1651af3 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 09:47:56 +0100 Subject: [PATCH 31/77] Fix test --- test/methods.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index 637c97c7..cb06ac7d 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -74,7 +74,7 @@ @testset "Test allocations" begin m = central_fdm(5, 2, adapt=2) - @test (@benchmark $m(sin, 1; samples=1, evals=1)).allocs == 0 + @test @benchmark($m(sin, 1); samples=1, evals=1).allocs == 0 end # Integration test to ensure that Integer-output functions can be tested. @@ -162,5 +162,4 @@ end end end - end From 12813dbe7b45dcb1304b6b2a59f0a32d750d8d1d Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 09:48:06 +0100 Subject: [PATCH 32/77] Add method for SVector --- src/methods.jl | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index a56af8ef..717f95f6 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -116,17 +116,15 @@ Construct a finite difference method. - `FiniteDifferenceMethod`: Specified finite difference method. """ function FiniteDifferenceMethod( - grid::Vector{Int}, + grid::SVector{P,Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, max_range::Real=Inf, -) - p = length(grid) - grid = SVector{p}(grid) # Internally, we work with static vectors. - _check_p_q(p, q) +) where P + _check_p_q(P, q) coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q) - return UnadaptedFiniteDifferenceMethod{p,q}( + return UnadaptedFiniteDifferenceMethod{P,q}( grid, coefs, coefs_neighbourhood, @@ -137,6 +135,9 @@ function FiniteDifferenceMethod( f_error_mult ) end +function FiniteDifferenceMethod(grid::Vector{Int}, q::Int; kw_args...) + return FiniteDifferenceMethod(SVector{length(grid)}(grid), q; kw_args...) +end """ (m::FiniteDifferenceMethod)(f::Function, x::T) where T<:AbstractFloat From 5c97c51b0002d8a32e0735084ebee0591771d121 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 09:58:58 +0100 Subject: [PATCH 33/77] Fix docs --- Project.toml | 4 +-- docs/Manifest.toml | 83 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 61 insertions(+), 26 deletions(-) diff --git a/Project.toml b/Project.toml index 1c62faef..2a74321f 100644 --- a/Project.toml +++ b/Project.toml @@ -12,13 +12,13 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [compat] ChainRulesCore = "0.9" -Richardson = "1.2" +Richardson = "^1.2" julia = "1" [extras] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" [targets] test = ["Random", "Test", "BenchmarkTools"] diff --git a/docs/Manifest.toml b/docs/Manifest.toml index 8a54eae9..61f85cc3 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -1,39 +1,51 @@ # This file is machine-generated - editing it directly is not advised +[[ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" + +[[Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + [[Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" [[ChainRulesCore]] -deps = ["MuladdMacro"] -git-tree-sha1 = "9907341fe861268ddd0fc60be260633756b126a2" +deps = ["LinearAlgebra", "MuladdMacro", "SparseArrays"] +git-tree-sha1 = "15081c431bb25848ad9b0d172a65794f3a3e197a" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "0.9.4" +version = "0.9.24" [[Dates]] deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" -[[Distributed]] -deps = ["Random", "Serialization", "Sockets"] -uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" - [[DocStringExtensions]] deps = ["LibGit2", "Markdown", "Pkg", "Test"] -git-tree-sha1 = "c5714d9bcdba66389612dc4c47ed827c64112997" +git-tree-sha1 = "50ddf44c53698f5e784bbebb3f4b21c5807401b1" uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.8.2" +version = "0.8.3" [[Documenter]] -deps = ["Base64", "Dates", "DocStringExtensions", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "1c593d1efa27437ed9dd365d1143c594b563e138" +deps = ["Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] +git-tree-sha1 = "a4875e0763112d6d017126f3944f4133abb342ae" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.25.1" +version = "0.25.5" + +[[Downloads]] +deps = ["ArgTools", "LibCURL"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" [[FiniteDifferences]] deps = ["ChainRulesCore", "LinearAlgebra", "Printf", "Random", "Richardson"] -path = ".." +git-tree-sha1 = "e4e4bfc56a703113ba1692e8e9c719ddbb46f47d" uuid = "26cc04aa-876d-5657-8c51-4c34ba976000" -version = "0.10.8" +version = "0.11.6" + +[[IOCapture]] +deps = ["Logging"] +git-tree-sha1 = "377252859f740c217b936cebcd918a44f9b53b59" +uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" +version = "0.1.1" [[InteractiveUtils]] deps = ["Markdown"] @@ -41,9 +53,17 @@ uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" [[JSON]] deps = ["Dates", "Mmap", "Parsers", "Unicode"] -git-tree-sha1 = "b34d7cef7b337321e97d22242c3c2b91f476748e" +git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.21.0" +version = "0.21.1" + +[[LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" + +[[LibCURL_jll]] +deps = ["Libdl"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" [[LibGit2]] deps = ["Printf"] @@ -66,19 +86,22 @@ uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" [[Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" +[[MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" + [[MuladdMacro]] git-tree-sha1 = "c6190f9a7fc5d9d5915ab29f2134421b12d24a68" uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221" version = "0.2.2" [[Parsers]] -deps = ["Dates", "Test"] -git-tree-sha1 = "10134f2ee0b1978ae7752c41306e131a684e1f06" +deps = ["Dates"] +git-tree-sha1 = "50c9a9ed8c714945e01cd53a21007ed3865ed714" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "1.0.7" +version = "1.0.15" [[Pkg]] -deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] +deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" [[Printf]] @@ -86,7 +109,7 @@ deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets"] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" [[Random]] @@ -95,9 +118,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[Richardson]] deps = ["LinearAlgebra"] -git-tree-sha1 = "74d2cf4de9eda38175c3f94bd94c755a023d5623" +git-tree-sha1 = "e03ca566bec93f8a3aeb059c8ef102f268a38949" uuid = "708f8203-808e-40c0-ba2d-98a6953ed40d" -version = "1.1.0" +version = "1.4.0" [[SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" @@ -108,8 +131,20 @@ uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" [[Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" +[[SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" + +[[Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" + [[Test]] -deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [[UUIDs]] From 22802a4f19c18f768cd0a2d76910d479ca79867b Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 11:00:58 +0100 Subject: [PATCH 34/77] Use big to allow very high orders --- src/methods.jl | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 717f95f6..94fc30b7 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -128,9 +128,9 @@ function FiniteDifferenceMethod( grid, coefs, coefs_neighbourhood, - Float64(condition), - Float64(factor), - Float64(max_range), + condition, + factor, + max_range, ∇f_magnitude_mult, f_error_mult ) @@ -250,13 +250,9 @@ function _check_p_q(p::Integer, q::Integer) q >= 0 || throw(DomainError(q, "order of derivative (`q`) must be non-negative")) q < p || throw(DomainError( (q, p), - "order of the method (q) must be strictly greater than that of the derivative (p)", + "order of the method (`p`) must be strictly greater than that of the derivative " * + "(`q`)", )) - # Check whether the method can be computed. We require the factorial of the method order - # to be computable with regular `Int`s, but `factorial` will after 20, so 20 is the - # largest we can allow. - p > 20 && throw(DomainError(p, "order of the method (`p`) is too large to be computed")) - return end function _coefs(grid, p, q) @@ -265,7 +261,7 @@ function _coefs(grid, p, q) # rational math. C = [Rational{BigInt}(g^i) for i in 0:(p - 1), g in grid] x = zeros(Rational{BigInt}, p) - x[q + 1] = factorial(q) + x[q + 1] = factorial(big(q)) return SVector{p}(Float64.(C \ x)) end @@ -300,7 +296,7 @@ function _coefs_mults(grid::SVector{P, Int}, q::Integer) where P ) end # Compute multipliers. - ∇f_magnitude_mult = sum(abs.(coefs .* grid .^ P)) / factorial(P) + ∇f_magnitude_mult = sum(abs.(coefs .* grid .^ P)) / factorial(big(P)) f_error_mult = sum(abs.(coefs)) return coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult end @@ -452,9 +448,9 @@ for direction in [:forward, :central, :backward] grid, coefs, coefs_nbhd, - Float64(condition), - Float64(factor), - Float64(max_range), + condition, + factor, + max_range, ∇f_magnitude_mult, f_error_mult, bound_estimator @@ -464,9 +460,9 @@ for direction in [:forward, :central, :backward] grid, coefs, coefs_nbhd, - Float64(condition), - Float64(factor), - Float64(max_range), + condition, + factor, + max_range, ∇f_magnitude_mult, f_error_mult ) From 23357dc6f8c68a01242c7585ea62570cb1c29d93 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 11:01:10 +0100 Subject: [PATCH 35/77] Extend documentation --- README.md | 134 +++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 118 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 4a0c306e..6d1a6fc8 100644 --- a/README.md +++ b/README.md @@ -29,8 +29,7 @@ Right now here are the differences: #### FDM.jl This package was formerly called FDM.jl. We recommend users of FDM.jl [update to FiniteDifferences.jl](https://github.com/JuliaDiff/FiniteDifferences.jl/issues/37). - -## Example: Scalar Derivatives +## Scalar Derivatives Compute the first derivative of `sin` with a 5th order central method: @@ -43,13 +42,126 @@ Compute the second derivative of `sin` with a 5th order central method: ```julia julia> central_fdm(5, 2)(sin, 1) + sin(1) --4.367495254342657e-11 +-8.767431225464861e-11 +``` + +To obtain better accuracy, you can increase the order of the method: + +```julia +julia> central_fdm(12, 2)(sin, 1) + sin(1) +5.240252676230739e-14 ``` The functions `forward_fdm` and `backward_fdm` can be used to construct forward differences and backward differences respectively. -Alternatively, you can construct a finite difference method on a custom grid: +### Dealing with Singularities + +The function `log(x)` is only defined for `x > 0`. +If we try to use `central_fdm` to estimate the derivative of `log` near `x = 0`, +then we run into `DomainError`s, because `central_fdm` happens evaluates `log` +at some `x < 0`. + +```julia +julia> central_fdm(5, 1)(log, 1e-3) +ERROR: DomainError with -0.02069596546590111 +``` + +To deal with this situation, you have two options. + +The first option is to use `forward_fdm`, which only evaluates `log` at inputs +greater or equal to `x`: + +```julia +julia> forward_fdm(5, 1)(log, 1e-3) - 1000 +-3.741856744454708e-7 +``` + +This works fine, but the downside is that you're restricted to a one-sided +methods (`forward_fdm`), which tend to perform worse than central methods +(`central_fdm`). + +The second option is to limit the distance tat the finite difference method is +allowed to evaluate `log` away from `x`. Since `x = 1e-3`, a reasonable value +for this limit is `9e-4`: + +```julia +julia> central_fdm(5, 1, max_range=9e-4)(log, 1e-3) - 1000 +-4.027924660476856e-10 +``` + +Another commonly encountered example is `logdet`, which is only defined +for positive-definite matrices. +Here you can use a forward method in combination with a positive-definite +deviation from `x`: + +```julia +julia> x = diagm([1.0, 2.0, 3.0]); v = Matrix(1.0I, 3, 3); + +julia> forward_fdm(5, 1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x)) +-4.222400207254395e-12 +``` + +A range-limited central method is also possible: + +```julia +julia> central_fdm(5, 1, max_range=9e-1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x)) +-1.283417816466681e-13 +``` + +### Richardson Extrapolation + +The finite difference methods defined in this package can be extrapolated using +[Richardson extrapolation](https://github.com/JuliaMath/Richardson.jl). +This can offer superior numerical accuracy: +Richardson extrapolation attempts polynomial extrapolation of the finite +difference estimate as a function of the step size until a convergence criterion +is reached. + +```julia +julia> extrapolate_fdm(central_fdm(2, 1), sin, 1)[1] - cos(1) +1.6653345369377348e-14 +``` + +Similarly, you can estimate higher order derivatives: + +```julia +julia> extrapolate_fdm(central_fdm(5, 4), sin, 1)[1] - sin(1) +-1.626274487942503e-5 +``` + +In this case, the accuracy can be improved by lowering (making closer to `1`) +the [contraction rate](https://github.com/JuliaMath/Richardson.jl#usage): + +```julia +julia> extrapolate_fdm(central_fdm(5, 4), sin, 1, contract=0.8)[1] - sin(1) +2.0725743343774639e-10 +``` + +This performs similarly to a `10`th order central method: + +```julia +julia> central_fdm(10, 4)(sin, 1) - sin(1) +-1.0301381969668455e-10 +``` + +If you really want, you can even extrapolate the `10`th order central method, +but that provides no further gains: + +```julia +julia> extrapolate_fdm(central_fdm(10, 4), sin, 1, contract=0.8)[1] - sin(1) +5.673617131662922e-10 +``` + +In this case, the central method can be pushed to a high order to obtain +improved accuracy: + +```julia +julia> central_fdm(20, 4)(sin, 1) - sin(1) +-5.2513549064769904e-14 +``` + +### A Finite Difference Method on a Custom Grid ```julia julia> method = FiniteDifferenceMethod([-2, 0, 5], 1) @@ -63,17 +175,7 @@ julia> method(sin, 1) - cos(1) -8.423706177040913e-11 ``` -Compute a directional derivative: - -```julia -julia> f(x) = sum(x) -f (generic function with 1 method) - -julia> central_fdm(5, 1)(ε -> f([1, 1, 1] .+ ε .* [1, 2, 3]), 0) - 6 --6.217248937900877e-15 -``` - -## Example: Multivariate Derivatives +## Multivariate Derivatives Consider a quadratic function: @@ -124,7 +226,7 @@ julia> jacobian(central_fdm(5, 1), f, x)[1] - a ``` To compute Jacobian--vector products, use `jvp` and `j′vp`: - + ```julia julia> v = randn(3) 3-element Array{Float64,1}: From 584b9633aea523fd1f8b71e2766930669723055a Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 11:30:32 +0100 Subject: [PATCH 36/77] Add sentence about allocations --- README.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/README.md b/README.md index 6d1a6fc8..c0079bf4 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,25 @@ julia> central_fdm(5, 1)(sin, 1) - cos(1) -2.4313884239290928e-14 ``` +Finite difference methods are optimised to minimise allocations: + +```julia +julia> m = central_fdm(5, 1); + +julia> @benchmark $m(sin, 1) +BenchmarkTools.Trial: + memory estimate: 0 bytes + allocs estimate: 0 + -------------- + minimum time: 486.621 ns (0.00% GC) + median time: 507.677 ns (0.00% GC) + mean time: 539.806 ns (0.00% GC) + maximum time: 1.352 μs (0.00% GC) + -------------- + samples: 10000 + evals/sample: 195 +``` + Compute the second derivative of `sin` with a 5th order central method: ```julia From 7dd89448385e096c5bf6a35948b4cc48214152e4 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 11:36:37 +0100 Subject: [PATCH 37/77] Update multivariate examples --- README.md | 62 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index c0079bf4..f2e7b92c 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ At some point in the future they might merge, or one might depend on the other. Right now here are the differences: - FiniteDifferences.jl supports basically any type, where as FiniteDiff.jl supports only array-ish types - - FiniteDifferences.jl supports higher order approximation + - FiniteDifferences.jl supports higher order approximation and step size adaptation - FiniteDiff.jl is carefully optimized to minimize allocations - FiniteDiff.jl supports coloring vectors for efficient calculation of sparse Jacobians @@ -191,7 +191,7 @@ FiniteDifferenceMethod: coefficients: [-0.35714285714285715, 0.3, 0.05714285714285714] julia> method(sin, 1) - cos(1) --8.423706177040913e-11 +-3.701483564100272e-13 ``` ## Multivariate Derivatives @@ -200,10 +200,10 @@ Consider a quadratic function: ```julia julia> a = randn(3, 3); a = a * a' -3×3 Array{Float64,2}: - 8.0663 -1.12965 1.68556 - -1.12965 3.55005 -3.10405 - 1.68556 -3.10405 3.77251 +3×3 Matrix{Float64}: + 4.31995 -2.80614 0.0829128 + -2.80614 3.91982 0.764388 + 0.0829128 0.764388 1.18055 julia> f(x) = 0.5 * x' * a * x ``` @@ -211,37 +211,43 @@ julia> f(x) = 0.5 * x' * a * x Compute the gradient: ```julia +julia> x = randn(3) +3-element Vector{Float64}: + -0.18563161988700727 + -0.4659836395595666 + 2.304584409826511 + julia> grad(central_fdm(5, 1), f, x)[1] - a * x -3-element Array{Float64,1}: - -1.2612133559741778e-12 - -3.526068326209497e-13 - -2.3305801732931286e-12 +3-element Vector{Float64}: + 4.1744385725905886e-14 + -6.611378111642807e-14 + -8.615330671091215e-14 ``` Compute the Jacobian: ```julia julia> jacobian(central_fdm(5, 1), f, x)[1] - (a * x)' -1×3 Array{Float64,2}: - -1.26121e-12 -3.52607e-13 -2.33058e-12 +1×3 Matrix{Float64}: + 4.17444e-14 -6.61138e-14 -8.61533e-14 ``` The Jacobian can also be computed for non-scalar functions: ```julia julia> a = randn(3, 3) -3×3 Array{Float64,2}: - -0.343783 1.5708 0.723289 - -0.425706 -0.478881 -0.306881 - 1.27326 -0.171606 2.23671 +3×3 Matrix{Float64}: + 0.844846 1.04772 1.0173 + -0.867721 0.154146 -0.938077 + 1.34078 -0.630105 -1.13287 julia> f(x) = a * x julia> jacobian(central_fdm(5, 1), f, x)[1] - a -3×3 Array{Float64,2}: - -2.81331e-13 2.77556e-13 1.28342e-13 - -3.34732e-14 -6.05072e-15 6.05072e-15 - -2.24709e-13 1.88821e-13 1.06581e-14 +3×3 Matrix{Float64}: + 2.91989e-14 1.77636e-15 4.996e-14 + -5.55112e-15 -7.63278e-15 2.4758e-14 + 4.66294e-15 -2.05391e-14 -1.04361e-14 ``` To compute Jacobian--vector products, use `jvp` and `j′vp`: @@ -254,14 +260,14 @@ julia> v = randn(3) -1.4288108966380777 julia> jvp(central_fdm(5, 1), f, (x, v)) - a * v -3-element Array{Float64,1}: - -1.3233858453531866e-13 - 9.547918011776346e-15 - 3.632649736573512e-13 +3-element Vector{Float64}: + -7.993605777301127e-15 + -8.881784197001252e-16 + -3.22519788653608e-14 julia> j′vp(central_fdm(5, 1), f, x, v)[1] - a'x - 3-element Array{Float64,1}: - 3.5704772471945034e-13 - 4.04121180963557e-13 - 1.2807532812075806e-12 +3-element Vector{Float64}: + -2.1316282072803006e-14 + 2.4646951146678475e-14 + 6.661338147750939e-15 ``` From fbff4f9259f9f40801b5b4bade54abb13f41ac19 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 12:00:24 +0100 Subject: [PATCH 38/77] Tighten accuracy tests --- test/methods.jl | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index cb06ac7d..dfd9a1c9 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -40,23 +40,22 @@ # `f`s, `x`s, the derivatives of `f` at `x`, and a factor that loosens tolerances. fs = [ - (f=x -> 0, x=0, d=0, factor=1), - (f=x -> x, x=0, d=1, factor=1), - (f=exp, x=0, d=1, factor=1), - (f=sin, x=0, d=1, factor=1), - (f=cos, x=0, d=0, factor=1), - (f=sinc, x=0, d=0, factor=1), - (f=cosc, x=0, d=-(pi ^ 2) / 3, factor=10) + (f=x -> 0, x=0, d=0, atol=0, atol_central=0), + (f=x -> x, x=0, d=1, atol=5e-15, atol_central=5e-16), + (f=exp, x=0, d=1, atol=5e-13, atol_central=5e-14), + (f=sin, x=0, d=1, atol=1e-14, atol_central=5e-16), + (f=cos, x=0, d=0, atol=5e-14, atol_central=5e-14), + (f=exp, x=1, d=exp(1), atol=5e-12, atol_central=5e-13), + (f=sin, x=1, d=cos(1), atol=5e-13, atol_central=5e-14), + (f=cos, x=1, d=-sin(1), atol=5e-13, atol_central=5e-14), + (f=sinc, x=0, d=0, atol=5e-12, atol_central=5e-14), + (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=1e-14, atol_central=5e-14) ] @testset "f=$(f.f), method=$m" for f in fs, m in methods - @test m(4, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-8 * f.factor - @test m(5, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-8 * f.factor - @test m(6, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-9 * f.factor - @test m(7, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-10 * f.factor - @test m(8, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-10 * f.factor - @test m(9, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-11 * f.factor - @test m(10, 1)(f.f, f.x) ≈ f.d rtol=0 atol=1e-11 * f.factor - @test m(11, 1)(f.f, f.x) ≈ f.d rtol=0 atol=5e-12 * f.factor + atol = m == central_fdm ? f.atol_central : f.atol + @test m(6, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol + @test m(7, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol + m == central_fdm && @test m(14, 1)(f.f, f.x) ≈ f.d atol=5e-15 end end From d2f8036532505e38e7c969da08e8dfcaa7f75a08 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 12:23:46 +0100 Subject: [PATCH 39/77] Adjust tolerances --- test/methods.jl | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index dfd9a1c9..87103098 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -49,13 +49,20 @@ (f=sin, x=1, d=cos(1), atol=5e-13, atol_central=5e-14), (f=cos, x=1, d=-sin(1), atol=5e-13, atol_central=5e-14), (f=sinc, x=0, d=0, atol=5e-12, atol_central=5e-14), - (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=1e-14, atol_central=5e-14) + (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-10, atol_central=5e-11) ] @testset "f=$(f.f), method=$m" for f in fs, m in methods atol = m == central_fdm ? f.atol_central : f.atol @test m(6, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol @test m(7, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol - m == central_fdm && @test m(14, 1)(f.f, f.x) ≈ f.d atol=5e-15 + if m == central_fdm + if f == cosc + # `cosc` is hard. + @test m(14, 1)(f.f, f.x) ≈ f.d atol=1e-13 + else + @test m(14, 1)(f.f, f.x) ≈ f.d atol=5e-15 + end + end end end From 0fa0b20ec7bdada9c8bbd9adc457472938dc80e6 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 12:30:16 +0100 Subject: [PATCH 40/77] Loosen test for cosc --- test/methods.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index 87103098..82eaf296 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -49,7 +49,8 @@ (f=sin, x=1, d=cos(1), atol=5e-13, atol_central=5e-14), (f=cos, x=1, d=-sin(1), atol=5e-13, atol_central=5e-14), (f=sinc, x=0, d=0, atol=5e-12, atol_central=5e-14), - (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-10, atol_central=5e-11) + # `cosc` is hard. There is a separate test for `cosc` below. + (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-9, atol_central=5e-10) ] @testset "f=$(f.f), method=$m" for f in fs, m in methods atol = m == central_fdm ? f.atol_central : f.atol From 76f9ccb803fb79430a78014dd1397db28dcbdcef Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 12:35:56 +0100 Subject: [PATCH 41/77] Fix exception for cosc --- test/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index 82eaf296..d8a10403 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -57,7 +57,7 @@ @test m(6, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol @test m(7, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol if m == central_fdm - if f == cosc + if f.f == cosc # `cosc` is hard. @test m(14, 1)(f.f, f.x) ≈ f.d atol=1e-13 else From 8076594322ff1680a68ce543a9393c68555d3312 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 12:38:49 +0100 Subject: [PATCH 42/77] Fix typos --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f2e7b92c..d38d0405 100644 --- a/README.md +++ b/README.md @@ -96,11 +96,11 @@ julia> forward_fdm(5, 1)(log, 1e-3) - 1000 -3.741856744454708e-7 ``` -This works fine, but the downside is that you're restricted to a one-sided +This works fine, but the downside is that you're restricted to one-sided methods (`forward_fdm`), which tend to perform worse than central methods (`central_fdm`). -The second option is to limit the distance tat the finite difference method is +The second option is to limit the distance that the finite difference method is allowed to evaluate `log` away from `x`. Since `x = 1e-3`, a reasonable value for this limit is `9e-4`: From 0b67236fb185394b02b99d83690557da378d005d Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 12:41:35 +0100 Subject: [PATCH 43/77] Tighten separate test for cosc --- test/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index d8a10403..a8786a9e 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -68,7 +68,7 @@ end @testset "Derivative of cosc at 0 (issue #124)" begin - @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-13 + @test central_fdm(16, 1, adapt=2)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-15 end @testset "Derivative of a constant (issue #125)" begin From dfbdd74b1e1d51560ecfc244bf63e836b694a363 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 13:00:25 +0100 Subject: [PATCH 44/77] Remove separate tests --- test/methods.jl | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index a8786a9e..44070775 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -40,6 +40,7 @@ # `f`s, `x`s, the derivatives of `f` at `x`, and a factor that loosens tolerances. fs = [ + # See #124. (f=x -> 0, x=0, d=0, atol=0, atol_central=0), (f=x -> x, x=0, d=1, atol=5e-15, atol_central=5e-16), (f=exp, x=0, d=1, atol=5e-13, atol_central=5e-14), @@ -49,7 +50,7 @@ (f=sin, x=1, d=cos(1), atol=5e-13, atol_central=5e-14), (f=cos, x=1, d=-sin(1), atol=5e-13, atol_central=5e-14), (f=sinc, x=0, d=0, atol=5e-12, atol_central=5e-14), - # `cosc` is hard. There is a separate test for `cosc` below. + # See #125. The tolerances are lower because `cosc` is hard. (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-9, atol_central=5e-10) ] @testset "f=$(f.f), method=$m" for f in fs, m in methods @@ -67,14 +68,6 @@ end end - @testset "Derivative of cosc at 0 (issue #124)" begin - @test central_fdm(16, 1, adapt=2)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-15 - end - - @testset "Derivative of a constant (issue #125)" begin - @test central_fdm(2, 1)(x -> 0, 0) ≈ 0 atol=1e-10 - end - @testset "Test custom grid" begin @test FiniteDifferenceMethod([-2, 0, 5], 1)(sin, 1) ≈ cos(1) end From 1cda212f73cb869194cdaca17f2ac28b6b41830a Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 15:23:08 +0100 Subject: [PATCH 45/77] Remove caret --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 2a74321f..60b8ccf6 100644 --- a/Project.toml +++ b/Project.toml @@ -12,7 +12,7 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" [compat] ChainRulesCore = "0.9" -Richardson = "^1.2" +Richardson = "1.2" julia = "1" [extras] From 5f22b5af4dcb3f68fa27aa85c36a3bd065f0b2d3 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 15:46:54 +0100 Subject: [PATCH 46/77] Fix Manifest.toml for docs --- docs/Manifest.toml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/Manifest.toml b/docs/Manifest.toml index 61f85cc3..821c36b7 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -36,10 +36,10 @@ deps = ["ArgTools", "LibCURL"] uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" [[FiniteDifferences]] -deps = ["ChainRulesCore", "LinearAlgebra", "Printf", "Random", "Richardson"] -git-tree-sha1 = "e4e4bfc56a703113ba1692e8e9c719ddbb46f47d" +deps = ["ChainRulesCore", "LinearAlgebra", "Printf", "Random", "Richardson", "StaticArrays"] +path = ".." uuid = "26cc04aa-876d-5657-8c51-4c34ba976000" -version = "0.11.6" +version = "0.11.5" [[IOCapture]] deps = ["Logging"] @@ -135,6 +135,16 @@ uuid = "6462fe0b-24de-5631-8697-dd941f90decc" deps = ["LinearAlgebra", "Random"] uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +[[StaticArrays]] +deps = ["LinearAlgebra", "Random", "Statistics"] +git-tree-sha1 = "9da72ed50e94dbff92036da395275ed114e04d49" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.0.1" + +[[Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + [[TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" From d4e74eba53042197ed1ad3cbd4b4859a94f86ae4 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 15:47:04 +0100 Subject: [PATCH 47/77] Rename internal function --- src/methods.jl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 94fc30b7..1a068cf4 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -219,11 +219,11 @@ function (m::FiniteDifferenceMethod{P,Q})( ) where {P,Q,TF<:Function} # Assume that converting to float is desired. x = float(x) - fs = _evals(m, f, x, step) - return _eval_method(m, fs, x, step, m.coefs) + fs = _eval_function(m, f, x, step) + return _compute_estimate(m, fs, x, step, m.coefs) end -function _evals( +function _eval_function( m::FiniteDifferenceMethod, f::TF, x::T, @@ -232,7 +232,7 @@ function _evals( return f.(x .+ T(step) .* m.grid) end -function _eval_method( +function _compute_estimate( m::FiniteDifferenceMethod{P,Q}, fs::SVector{P,TF}, x::T, @@ -361,12 +361,12 @@ function _estimate_magnitudes( x::T ) where {P,Q,TF<:Function,T<:AbstractFloat} step = first(estimate_step(m, f, x)) - fs = _evals(m, f, x, step) + fs = _eval_function(m, f, x, step) # Estimate magnitude of `∇f` in a neighbourhood of `x`. ∇fs = SVector{3}( - _eval_method(m, fs, x, step, m.coefs_neighbourhood[1]), - _eval_method(m, fs, x, step, m.coefs_neighbourhood[2]), - _eval_method(m, fs, x, step, m.coefs_neighbourhood[3]) + _compute_estimate(m, fs, x, step, m.coefs_neighbourhood[1]), + _compute_estimate(m, fs, x, step, m.coefs_neighbourhood[2]), + _compute_estimate(m, fs, x, step, m.coefs_neighbourhood[3]) ) ∇f_magnitude = maximum(maximum.(abs, ∇fs)) # Estimate magnitude of `f` in a neighbourhood of `x`. From 55863b09bf5278f8fa57df929ec425c3b8e9e141 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 15:47:14 +0100 Subject: [PATCH 48/77] Adjust spacing to clarify --- test/methods.jl | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/methods.jl b/test/methods.jl index 44070775..76f92a23 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -41,17 +41,17 @@ # `f`s, `x`s, the derivatives of `f` at `x`, and a factor that loosens tolerances. fs = [ # See #124. - (f=x -> 0, x=0, d=0, atol=0, atol_central=0), - (f=x -> x, x=0, d=1, atol=5e-15, atol_central=5e-16), - (f=exp, x=0, d=1, atol=5e-13, atol_central=5e-14), - (f=sin, x=0, d=1, atol=1e-14, atol_central=5e-16), - (f=cos, x=0, d=0, atol=5e-14, atol_central=5e-14), - (f=exp, x=1, d=exp(1), atol=5e-12, atol_central=5e-13), - (f=sin, x=1, d=cos(1), atol=5e-13, atol_central=5e-14), - (f=cos, x=1, d=-sin(1), atol=5e-13, atol_central=5e-14), - (f=sinc, x=0, d=0, atol=5e-12, atol_central=5e-14), + (f=x -> 0, x=0, d=0, atol=0, atol_central=0), + (f=x -> x, x=0, d=1, atol=5e-15, atol_central=5e-16), + (f=exp, x=0, d=1, atol=5e-13, atol_central=5e-14), + (f=sin, x=0, d=1, atol=1e-14, atol_central=5e-16), + (f=cos, x=0, d=0, atol=5e-14, atol_central=5e-14), + (f=exp, x=1, d=exp(1), atol=5e-12, atol_central=5e-13), + (f=sin, x=1, d=cos(1), atol=5e-13, atol_central=5e-14), + (f=cos, x=1, d=-sin(1), atol=5e-13, atol_central=5e-14), + (f=sinc, x=0, d=0, atol=5e-12, atol_central=5e-14), # See #125. The tolerances are lower because `cosc` is hard. - (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-9, atol_central=5e-10) + (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-9, atol_central=5e-10) ] @testset "f=$(f.f), method=$m" for f in fs, m in methods atol = m == central_fdm ? f.atol_central : f.atol From f49540df88ef7e490da436723e0870685c641fa2 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 15:48:32 +0100 Subject: [PATCH 49/77] Fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d38d0405..315d8d75 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ forward differences and backward differences respectively. The function `log(x)` is only defined for `x > 0`. If we try to use `central_fdm` to estimate the derivative of `log` near `x = 0`, -then we run into `DomainError`s, because `central_fdm` happens evaluates `log` +then we run into `DomainError`s, because `central_fdm` happens to evaluate `log` at some `x < 0`. ```julia From 29dd0e04fcf4742849977417b5378e62b27e8aa6 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 15:59:09 +0100 Subject: [PATCH 50/77] Fix style issues --- src/methods.jl | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 1a068cf4..26a4d448 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -224,10 +224,7 @@ function (m::FiniteDifferenceMethod{P,Q})( end function _eval_function( - m::FiniteDifferenceMethod, - f::TF, - x::T, - step::Real + m::FiniteDifferenceMethod, f::TF, x::T, step::Real, ) where {TF<:Function,T<:AbstractFloat} return f.(x .+ T(step) .* m.grid) end @@ -237,7 +234,7 @@ function _compute_estimate( fs::SVector{P,TF}, x::T, step::Real, - coefs::SVector{P,Float64} + coefs::SVector{P,Float64}, ) where {P,Q,TF,T<:AbstractFloat} # If we substitute `T.(coefs)` in the expression below, then allocations occur. We # therefore perform the broadcasting first. @@ -305,7 +302,7 @@ end function Base.show( io::IO, m::MIME"text/plain", - x::FiniteDifferenceMethod{P, Q} + x::FiniteDifferenceMethod{P, Q}, ) where {P, Q} @printf io "FiniteDifferenceMethod:\n" @printf io " order of method: %d\n" P @@ -334,17 +331,13 @@ estimate of the derivative. error of the finite difference estimate. """ function estimate_step( - m::UnadaptedFiniteDifferenceMethod, - f::TF, - x::T + m::UnadaptedFiniteDifferenceMethod, f::TF, x::T, ) where {TF<:Function,T<:AbstractFloat} step, acc = _compute_step_acc_default(m, x) return _limit_step(m, x, step, acc) end function estimate_step( - m::AdaptedFiniteDifferenceMethod{P,Q}, - f::TF, - x::T + m::AdaptedFiniteDifferenceMethod{P,Q}, f::TF, x::T, ) where {P,Q,TF<:Function,T<:AbstractFloat} ∇f_magnitude, f_magnitude = _estimate_magnitudes(m.bound_estimator, f, x) if ∇f_magnitude == 0.0 || f_magnitude == 0.0 @@ -356,9 +349,7 @@ function estimate_step( end function _estimate_magnitudes( - m::FiniteDifferenceMethod{P,Q}, - f::TF, - x::T + m::FiniteDifferenceMethod{P,Q}, f::TF, x::T, ) where {P,Q,TF<:Function,T<:AbstractFloat} step = first(estimate_step(m, f, x)) fs = _eval_function(m, f, x, step) @@ -380,9 +371,7 @@ function _compute_step_acc_default(m::FiniteDifferenceMethod, x::T) where {T<:Ab end function _compute_step_acc( - m::FiniteDifferenceMethod{P,Q}, - ∇f_magnitude::Real, - f_error::Real + m::FiniteDifferenceMethod{P,Q}, ∇f_magnitude::Real, f_error::Real, ) where {P,Q} # Set the step size by minimising an upper bound on the error of the estimate. C₁ = f_error * m.f_error_mult * m.factor @@ -394,10 +383,7 @@ function _compute_step_acc( end function _limit_step( - m::FiniteDifferenceMethod, - x::T, - step::Real, - acc::Real + m::FiniteDifferenceMethod, x::T, step::Real, acc::Real, ) where {T<:AbstractFloat} # First, limit the step size based on the maximum range. step_max = m.max_range / maximum(abs.(m.grid)) @@ -419,7 +405,8 @@ end for direction in [:forward, :central, :backward] fdm_fun = Symbol(direction, "_fdm") grid_fun = Symbol("_", direction, "_grid") - @eval begin function $fdm_fun( + @eval begin + function $fdm_fun( p::Int, q::Int; adapt::Int=1, From c3856954000576d91058b98e290df450baf81046 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 18:46:15 +0100 Subject: [PATCH 51/77] Fix 1.0 compatibility --- src/methods.jl | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 26a4d448..31ace12f 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -172,15 +172,21 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and (0.001065235154086019, 1.9541865128909085e-13) ``` """ -function (m::FiniteDifferenceMethod)(f::TF, x::Real) where TF<:Function - # Assume that converting to float is desired. - x = float(x) - step = first(estimate_step(m, f, x)) - return m(f, x, step) -end -function (m::FiniteDifferenceMethod{P,0})(f::TF, x::Real) where {P,TF<:Function} - # The automatic step size calculation fails if `Q == 0`, so handle that edge case. - return f(x) +# We loop over all concrete subtypes of `FiniteDifferenceMethod` for 1.0 compatibility. +for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) + @eval begin + function (m::$T)(f::TF, x::Real) where TF<:Function + # Assume that converting to float is desired. + x = float(x) + step = first(estimate_step(m, f, x)) + return m(f, x, step) + end + function (m::$T{P,0})(f::TF, x::Real) where {P,TF<:Function} + # The automatic step size calculation fails if `Q == 0`, so handle that edge + # case. + return f(x) + end + end end """ @@ -214,13 +220,16 @@ julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. -1.7741363933510002e-13 ``` """ -function (m::FiniteDifferenceMethod{P,Q})( - f::TF, x::Real, step::Real, -) where {P,Q,TF<:Function} - # Assume that converting to float is desired. - x = float(x) - fs = _eval_function(m, f, x, step) - return _compute_estimate(m, fs, x, step, m.coefs) +# We loop over all concrete subtypes of `FiniteDifferenceMethod` for 1.0 compatibility. +for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) + @eval begin + function (m::$T{P,Q})(f::TF, x::Real, step::Real) where {P,Q,TF<:Function} + # Assume that converting to float is desired. + x = float(x) + fs = _eval_function(m, f, x, step) + return _compute_estimate(m, fs, x, step, m.coefs) + end + end end function _eval_function( From 48a15d041e4e50e172b2027fcfb00af75110062b Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 18:49:02 +0100 Subject: [PATCH 52/77] Fix docs --- docs/Manifest.toml | 39 +++++++-------------------------------- 1 file changed, 7 insertions(+), 32 deletions(-) diff --git a/docs/Manifest.toml b/docs/Manifest.toml index 821c36b7..db7ead35 100644 --- a/docs/Manifest.toml +++ b/docs/Manifest.toml @@ -1,11 +1,5 @@ # This file is machine-generated - editing it directly is not advised -[[ArgTools]] -uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" - -[[Artifacts]] -uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" - [[Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" @@ -19,6 +13,10 @@ version = "0.9.24" deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" +[[Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + [[DocStringExtensions]] deps = ["LibGit2", "Markdown", "Pkg", "Test"] git-tree-sha1 = "50ddf44c53698f5e784bbebb3f4b21c5807401b1" @@ -31,10 +29,6 @@ git-tree-sha1 = "a4875e0763112d6d017126f3944f4133abb342ae" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" version = "0.25.5" -[[Downloads]] -deps = ["ArgTools", "LibCURL"] -uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" - [[FiniteDifferences]] deps = ["ChainRulesCore", "LinearAlgebra", "Printf", "Random", "Richardson", "StaticArrays"] path = ".." @@ -57,14 +51,6 @@ git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.1" -[[LibCURL]] -deps = ["LibCURL_jll", "MozillaCACerts_jll"] -uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" - -[[LibCURL_jll]] -deps = ["Libdl"] -uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" - [[LibGit2]] deps = ["Printf"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" @@ -86,9 +72,6 @@ uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" [[Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" -[[MozillaCACerts_jll]] -uuid = "14a3606d-f60d-562e-9121-12d972cd8159" - [[MuladdMacro]] git-tree-sha1 = "c6190f9a7fc5d9d5915ab29f2134421b12d24a68" uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221" @@ -101,7 +84,7 @@ uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" version = "1.0.15" [[Pkg]] -deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs"] +deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" [[Printf]] @@ -109,7 +92,7 @@ deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +deps = ["InteractiveUtils", "Markdown", "Sockets"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" [[Random]] @@ -145,16 +128,8 @@ version = "1.0.1" deps = ["LinearAlgebra", "SparseArrays"] uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" -[[TOML]] -deps = ["Dates"] -uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" - -[[Tar]] -deps = ["ArgTools", "SHA"] -uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" - [[Test]] -deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [[UUIDs]] From deecc5e32d0e1e86394a3c40f2e612dfafa5c151 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 18:58:12 +0100 Subject: [PATCH 53/77] Generalise method --- src/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index 31ace12f..5442fa6d 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -135,7 +135,7 @@ function FiniteDifferenceMethod( f_error_mult ) end -function FiniteDifferenceMethod(grid::Vector{Int}, q::Int; kw_args...) +function FiniteDifferenceMethod(grid::AbstractVector{Int}, q::Int; kw_args...) return FiniteDifferenceMethod(SVector{length(grid)}(grid), q; kw_args...) end From 91db8cbb1cc3dcf978af2240495a8e7b97e6a28b Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 18:58:21 +0100 Subject: [PATCH 54/77] Fix docs --- docs/src/index.md | 262 ++++++++++++++++++++++++++++++++++++++---- docs/src/pages/api.md | 6 +- 2 files changed, 246 insertions(+), 22 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index 4bb727e2..315d8d75 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,51 +1,273 @@ # FiniteDifferences.jl: Finite Difference Methods +[![CI](https://github.com/JuliaDiff/FiniteDifferences.jl/workflows/CI/badge.svg?branch=master)](https://github.com/JuliaDiff/FiniteDifferences.jl/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/JuliaDiff/FiniteDifferences.jl.svg?branch=master)](https://travis-ci.org/JuliaDiff/FiniteDifferences.jl) [![codecov.io](https://codecov.io/github/JuliaDiff/FiniteDifferences.jl/coverage.svg?branch=master)](https://codecov.io/github/JuliaDiff/FiniteDifferences.jl?branch=master) -[![Latest Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://www.juliadiff.org/FiniteDifferences.jl/latest/) +[![PkgEval](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/F/FiniteDifferences.svg)](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html) -FiniteDifferences.jl approximates derivatives of functions using finite difference methods. +[![Latest Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://juliadiff.github.io/FiniteDifferences.jl/latest/) +[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle) +[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) -## Examples +FiniteDifferences.jl estimates derivatives with [finite differences](https://en.wikipedia.org/wiki/Finite_difference). + +See also the Python package [FDM](https://github.com/wesselb/fdm). + +#### FiniteDiff.jl vs FiniteDifferences.jl +[FiniteDiff.jl](https://github.com/JuliaDiff/FiniteDiff.jl) and [FiniteDifferences.jl](https://github.com/JuliaDiff/FiniteDifferences.jl) +are similar libraries: both calculate approximate derivatives numerically. +You should definately use one or the other, rather than the legacy [Calculus.jl](https://github.com/JuliaMath/Calculus.jl) finite differencing, or reimplementing it yourself. +At some point in the future they might merge, or one might depend on the other. +Right now here are the differences: + + - FiniteDifferences.jl supports basically any type, where as FiniteDiff.jl supports only array-ish types + - FiniteDifferences.jl supports higher order approximation and step size adaptation + - FiniteDiff.jl is carefully optimized to minimize allocations + - FiniteDiff.jl supports coloring vectors for efficient calculation of sparse Jacobians + + +#### FDM.jl +This package was formerly called FDM.jl. We recommend users of FDM.jl [update to FiniteDifferences.jl](https://github.com/JuliaDiff/FiniteDifferences.jl/issues/37). + +## Scalar Derivatives Compute the first derivative of `sin` with a 5th order central method: ```julia julia> central_fdm(5, 1)(sin, 1) - cos(1) --1.247890679678676e-13 +-2.4313884239290928e-14 +``` + +Finite difference methods are optimised to minimise allocations: + +```julia +julia> m = central_fdm(5, 1); + +julia> @benchmark $m(sin, 1) +BenchmarkTools.Trial: + memory estimate: 0 bytes + allocs estimate: 0 + -------------- + minimum time: 486.621 ns (0.00% GC) + median time: 507.677 ns (0.00% GC) + mean time: 539.806 ns (0.00% GC) + maximum time: 1.352 μs (0.00% GC) + -------------- + samples: 10000 + evals/sample: 195 ``` + Compute the second derivative of `sin` with a 5th order central method: ```julia julia> central_fdm(5, 2)(sin, 1) + sin(1) -9.747314066999024e-12 +-8.767431225464861e-11 +``` + +To obtain better accuracy, you can increase the order of the method: + +```julia +julia> central_fdm(12, 2)(sin, 1) + sin(1) +5.240252676230739e-14 +``` + +The functions `forward_fdm` and `backward_fdm` can be used to construct +forward differences and backward differences respectively. + +### Dealing with Singularities + +The function `log(x)` is only defined for `x > 0`. +If we try to use `central_fdm` to estimate the derivative of `log` near `x = 0`, +then we run into `DomainError`s, because `central_fdm` happens to evaluate `log` +at some `x < 0`. + +```julia +julia> central_fdm(5, 1)(log, 1e-3) +ERROR: DomainError with -0.02069596546590111 +``` + +To deal with this situation, you have two options. + +The first option is to use `forward_fdm`, which only evaluates `log` at inputs +greater or equal to `x`: + +```julia +julia> forward_fdm(5, 1)(log, 1e-3) - 1000 +-3.741856744454708e-7 +``` + +This works fine, but the downside is that you're restricted to one-sided +methods (`forward_fdm`), which tend to perform worse than central methods +(`central_fdm`). + +The second option is to limit the distance that the finite difference method is +allowed to evaluate `log` away from `x`. Since `x = 1e-3`, a reasonable value +for this limit is `9e-4`: + +```julia +julia> central_fdm(5, 1, max_range=9e-4)(log, 1e-3) - 1000 +-4.027924660476856e-10 +``` + +Another commonly encountered example is `logdet`, which is only defined +for positive-definite matrices. +Here you can use a forward method in combination with a positive-definite +deviation from `x`: + +```julia +julia> x = diagm([1.0, 2.0, 3.0]); v = Matrix(1.0I, 3, 3); + +julia> forward_fdm(5, 1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x)) +-4.222400207254395e-12 +``` + +A range-limited central method is also possible: + +```julia +julia> central_fdm(5, 1, max_range=9e-1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x)) +-1.283417816466681e-13 +``` + +### Richardson Extrapolation + +The finite difference methods defined in this package can be extrapolated using +[Richardson extrapolation](https://github.com/JuliaMath/Richardson.jl). +This can offer superior numerical accuracy: +Richardson extrapolation attempts polynomial extrapolation of the finite +difference estimate as a function of the step size until a convergence criterion +is reached. + +```julia +julia> extrapolate_fdm(central_fdm(2, 1), sin, 1)[1] - cos(1) +1.6653345369377348e-14 +``` + +Similarly, you can estimate higher order derivatives: + +```julia +julia> extrapolate_fdm(central_fdm(5, 4), sin, 1)[1] - sin(1) +-1.626274487942503e-5 +``` + +In this case, the accuracy can be improved by lowering (making closer to `1`) +the [contraction rate](https://github.com/JuliaMath/Richardson.jl#usage): + +```julia +julia> extrapolate_fdm(central_fdm(5, 4), sin, 1, contract=0.8)[1] - sin(1) +2.0725743343774639e-10 +``` + +This performs similarly to a `10`th order central method: + +```julia +julia> central_fdm(10, 4)(sin, 1) - sin(1) +-1.0301381969668455e-10 +``` + +If you really want, you can even extrapolate the `10`th order central method, +but that provides no further gains: + +```julia +julia> extrapolate_fdm(central_fdm(10, 4), sin, 1, contract=0.8)[1] - sin(1) +5.673617131662922e-10 +``` + +In this case, the central method can be pushed to a high order to obtain +improved accuracy: + +```julia +julia> central_fdm(20, 4)(sin, 1) - sin(1) +-5.2513549064769904e-14 ``` -Construct a FiniteDifferences on a custom grid: +### A Finite Difference Method on a Custom Grid ```julia -julia> method, report = fdm([-2, 0, 5], 1, report=true) -(FiniteDifferences.method, FiniteDifferencesReport: +julia> method = FiniteDifferenceMethod([-2, 0, 5], 1) +FiniteDifferenceMethod: order of method: 3 order of derivative: 1 grid: [-2, 0, 5] - coefficients: [-0.357143, 0.3, 0.0571429] - roundoff error: 2.22e-16 - bounds on derivatives: 1.00e+00 - step size: 3.62e-06 - accuracy: 6.57e-11 -) + coefficients: [-0.35714285714285715, 0.3, 0.05714285714285714] julia> method(sin, 1) - cos(1) --2.05648831297367e-11 +-3.701483564100272e-13 ``` -Compute a directional derivative: +## Multivariate Derivatives + +Consider a quadratic function: + +```julia +julia> a = randn(3, 3); a = a * a' +3×3 Matrix{Float64}: + 4.31995 -2.80614 0.0829128 + -2.80614 3.91982 0.764388 + 0.0829128 0.764388 1.18055 + +julia> f(x) = 0.5 * x' * a * x +``` + +Compute the gradient: + +```julia +julia> x = randn(3) +3-element Vector{Float64}: + -0.18563161988700727 + -0.4659836395595666 + 2.304584409826511 + +julia> grad(central_fdm(5, 1), f, x)[1] - a * x +3-element Vector{Float64}: + 4.1744385725905886e-14 + -6.611378111642807e-14 + -8.615330671091215e-14 +``` + +Compute the Jacobian: ```julia -julia> f(x) = sum(x) -f (generic function with 1 method) +julia> jacobian(central_fdm(5, 1), f, x)[1] - (a * x)' +1×3 Matrix{Float64}: + 4.17444e-14 -6.61138e-14 -8.61533e-14 +``` + +The Jacobian can also be computed for non-scalar functions: + +```julia +julia> a = randn(3, 3) +3×3 Matrix{Float64}: + 0.844846 1.04772 1.0173 + -0.867721 0.154146 -0.938077 + 1.34078 -0.630105 -1.13287 + +julia> f(x) = a * x + +julia> jacobian(central_fdm(5, 1), f, x)[1] - a +3×3 Matrix{Float64}: + 2.91989e-14 1.77636e-15 4.996e-14 + -5.55112e-15 -7.63278e-15 2.4758e-14 + 4.66294e-15 -2.05391e-14 -1.04361e-14 +``` + +To compute Jacobian--vector products, use `jvp` and `j′vp`: + +```julia +julia> v = randn(3) +3-element Array{Float64,1}: + -1.290782164377614 + -0.37701592844250903 + -1.4288108966380777 + +julia> jvp(central_fdm(5, 1), f, (x, v)) - a * v +3-element Vector{Float64}: + -7.993605777301127e-15 + -8.881784197001252e-16 + -3.22519788653608e-14 -julia> central_fdm(5, 1)(ε -> f([1, 1, 1] + ε * [1, 2, 3]), 0) - 6 --2.922107000813412e-13 +julia> j′vp(central_fdm(5, 1), f, x, v)[1] - a'x +3-element Vector{Float64}: + -2.1316282072803006e-14 + 2.4646951146678475e-14 + 6.661338147750939e-15 ``` diff --git a/docs/src/pages/api.md b/docs/src/pages/api.md index cf70b88f..a3332b24 100644 --- a/docs/src/pages/api.md +++ b/docs/src/pages/api.md @@ -2,14 +2,16 @@ ```@docs FiniteDifferenceMethod -FiniteDifferenceMethod(::AbstractVector, ::Int; ::Int) FiniteDifferences.estimate_step -forward_fdm central_fdm +forward_fdm backward_fdm extrapolate_fdm assert_approx_equal +FiniteDifferences.UnadaptedFiniteDifferenceMethod +FiniteDifferences.AdaptedFiniteDifferenceMethod FiniteDifferences.DEFAULT_CONDITION +FiniteDifferences.DEFAULT_FACTOR ``` ## Gradients From 4d0b31e8f15343a4ba0d2ea755a8335b1cac0165 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Tue, 5 Jan 2021 19:02:53 +0100 Subject: [PATCH 55/77] Fix docstring --- src/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index 5442fa6d..651c956a 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -92,7 +92,7 @@ end """ FiniteDifferenceMethod( - grid::Vector{Int}, + grid::AbstractVector{Int}, q::Int; condition::Real=DEFAULT_CONDITION, factor::Real=DEFAULT_FACTOR, From 1b3d51226af2ed0cacf9f88d73efb312217f9a5b Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Thu, 7 Jan 2021 09:17:32 +0100 Subject: [PATCH 56/77] Increase minor version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 60b8ccf6..5db699ac 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "FiniteDifferences" uuid = "26cc04aa-876d-5657-8c51-4c34ba976000" -version = "0.11.5" +version = "0.12.0" [deps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" From 7672c631e43fe7b2d5f5a0a85a1ad05f5e06fd19 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Fri, 8 Jan 2021 15:31:48 +0100 Subject: [PATCH 57/77] Remove weird indent --- src/FiniteDifferences.jl | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/src/FiniteDifferences.jl b/src/FiniteDifferences.jl index b0de5f90..58b8a2cd 100644 --- a/src/FiniteDifferences.jl +++ b/src/FiniteDifferences.jl @@ -1,18 +1,19 @@ module FiniteDifferences - using ChainRulesCore - using LinearAlgebra - using Printf - using Random - using Richardson - using StaticArrays +using ChainRulesCore +using LinearAlgebra +using Printf +using Random +using Richardson +using StaticArrays - export to_vec, grad, jacobian, jvp, j′vp +export to_vec, grad, jacobian, jvp, j′vp + +include("rand_tangent.jl") +include("difference.jl") +include("methods.jl") +include("numerics.jl") +include("to_vec.jl") +include("grad.jl") - include("rand_tangent.jl") - include("difference.jl") - include("methods.jl") - include("numerics.jl") - include("to_vec.jl") - include("grad.jl") end From cfbe4a176cde81afa33f6819b48179d6ac9852ec Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Fri, 8 Jan 2021 15:33:33 +0100 Subject: [PATCH 58/77] Use ballocated --- test/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/methods.jl b/test/methods.jl index 76f92a23..3c8b132a 100644 --- a/test/methods.jl +++ b/test/methods.jl @@ -74,7 +74,7 @@ @testset "Test allocations" begin m = central_fdm(5, 2, adapt=2) - @test @benchmark($m(sin, 1); samples=1, evals=1).allocs == 0 + @test @ballocated($m(sin, 1)) == 0 end # Integration test to ensure that Integer-output functions can be tested. From d7cc43d70b0f69a3e82a2cc80fbc3f633960b1d3 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Fri, 8 Jan 2021 15:35:21 +0100 Subject: [PATCH 59/77] Clarify wording around contraction rate --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 315d8d75..575fad38 100644 --- a/README.md +++ b/README.md @@ -149,8 +149,9 @@ julia> extrapolate_fdm(central_fdm(5, 4), sin, 1)[1] - sin(1) -1.626274487942503e-5 ``` -In this case, the accuracy can be improved by lowering (making closer to `1`) -the [contraction rate](https://github.com/JuliaMath/Richardson.jl#usage): +In this case, the accuracy can be improved by making the +[contraction rate](https://github.com/JuliaMath/Richardson.jl#usage) closer to +`1`: ```julia julia> extrapolate_fdm(central_fdm(5, 4), sin, 1, contract=0.8)[1] - sin(1) From 9bcb2d23b52a9ac074d06c2537f3de6d22518ec6 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Fri, 8 Jan 2021 15:40:45 +0100 Subject: [PATCH 60/77] Automatically update index.md in docs --- docs/make.jl | 9 +++++++++ docs/src/index.md | 5 +++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index fe93b010..a6bd4d2b 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,5 +1,14 @@ using Documenter, FiniteDifferences +# Copy the README. +repo_root = dirname(dirname(@__FILE__)) +cp( + joinpath(repo_root, "README.md"), + joinpath(repo_root, "docs", "src", "index.md"); + # `index.md` will already exist, so we set `force=true` to overwrite it. + force=true +) + makedocs( modules=[FiniteDifferences], format=Documenter.HTML(prettyurls=get(ENV, "CI", nothing) == "true"), diff --git a/docs/src/index.md b/docs/src/index.md index 315d8d75..575fad38 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -149,8 +149,9 @@ julia> extrapolate_fdm(central_fdm(5, 4), sin, 1)[1] - sin(1) -1.626274487942503e-5 ``` -In this case, the accuracy can be improved by lowering (making closer to `1`) -the [contraction rate](https://github.com/JuliaMath/Richardson.jl#usage): +In this case, the accuracy can be improved by making the +[contraction rate](https://github.com/JuliaMath/Richardson.jl#usage) closer to +`1`: ```julia julia> extrapolate_fdm(central_fdm(5, 4), sin, 1, contract=0.8)[1] - sin(1) From d6d14efc187a0584c04518187a81adf6a8cb815a Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Fri, 8 Jan 2021 15:42:45 +0100 Subject: [PATCH 61/77] Clarify example in README --- README.md | 4 ++-- docs/src/index.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 575fad38..1a1e43a0 100644 --- a/README.md +++ b/README.md @@ -60,14 +60,14 @@ BenchmarkTools.Trial: Compute the second derivative of `sin` with a 5th order central method: ```julia -julia> central_fdm(5, 2)(sin, 1) + sin(1) +julia> central_fdm(5, 2)(sin, 1) - (-sin(1)) -8.767431225464861e-11 ``` To obtain better accuracy, you can increase the order of the method: ```julia -julia> central_fdm(12, 2)(sin, 1) + sin(1) +julia> central_fdm(12, 2)(sin, 1) - (-sin(1)) 5.240252676230739e-14 ``` diff --git a/docs/src/index.md b/docs/src/index.md index 575fad38..1a1e43a0 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -60,14 +60,14 @@ BenchmarkTools.Trial: Compute the second derivative of `sin` with a 5th order central method: ```julia -julia> central_fdm(5, 2)(sin, 1) + sin(1) +julia> central_fdm(5, 2)(sin, 1) - (-sin(1)) -8.767431225464861e-11 ``` To obtain better accuracy, you can increase the order of the method: ```julia -julia> central_fdm(12, 2)(sin, 1) + sin(1) +julia> central_fdm(12, 2)(sin, 1) - (-sin(1)) 5.240252676230739e-14 ``` From be06452dc0eaee047e081f740f2b40908b793399 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Fri, 8 Jan 2021 16:04:20 +0100 Subject: [PATCH 62/77] Update comparison with FiniteDiff --- README.md | 2 +- docs/src/index.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1a1e43a0..aa45f484 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ Right now here are the differences: - FiniteDifferences.jl supports basically any type, where as FiniteDiff.jl supports only array-ish types - FiniteDifferences.jl supports higher order approximation and step size adaptation - - FiniteDiff.jl is carefully optimized to minimize allocations + - FiniteDiff.jl supports caching and in-place computation - FiniteDiff.jl supports coloring vectors for efficient calculation of sparse Jacobians diff --git a/docs/src/index.md b/docs/src/index.md index 1a1e43a0..aa45f484 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -22,7 +22,7 @@ Right now here are the differences: - FiniteDifferences.jl supports basically any type, where as FiniteDiff.jl supports only array-ish types - FiniteDifferences.jl supports higher order approximation and step size adaptation - - FiniteDiff.jl is carefully optimized to minimize allocations + - FiniteDiff.jl supports caching and in-place computation - FiniteDiff.jl supports coloring vectors for efficient calculation of sparse Jacobians From 0058b46ae7c060ac20f39ca438b560e879b60183 Mon Sep 17 00:00:00 2001 From: Wessel Date: Sat, 9 Jan 2021 11:24:47 +0100 Subject: [PATCH 63/77] Update src/methods.jl Co-authored-by: Lyndon White --- src/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index 651c956a..2251e408 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -172,7 +172,7 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and (0.001065235154086019, 1.9541865128909085e-13) ``` """ -# We loop over all concrete subtypes of `FiniteDifferenceMethod` for 1.0 compatibility. +# We loop over all concrete subtypes of `FiniteDifferenceMethod` for Julia v1.0 compatibility. for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) @eval begin function (m::$T)(f::TF, x::Real) where TF<:Function From e69d7e2bbf85ab94f947be5d82579b8021d644e3 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 11:30:40 +0100 Subject: [PATCH 64/77] Document return value of NaN in estimate_step --- src/methods.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index 2251e408..b8c30cce 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -337,7 +337,8 @@ estimate of the derivative. # Returns - `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimated step size and an estimate of the - error of the finite difference estimate. + error of the finite difference estimate. The error will be `NaN` if the method failed + to estimate the error. """ function estimate_step( m::UnadaptedFiniteDifferenceMethod, f::TF, x::T, From 9012b7e107baec3c409488971cdc6cf2e10f1c80 Mon Sep 17 00:00:00 2001 From: Wessel Date: Sat, 9 Jan 2021 11:31:50 +0100 Subject: [PATCH 65/77] Update src/methods.jl Co-authored-by: Lyndon White --- src/methods.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index b8c30cce..a1a097e5 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -224,8 +224,7 @@ julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) @eval begin function (m::$T{P,Q})(f::TF, x::Real, step::Real) where {P,Q,TF<:Function} - # Assume that converting to float is desired. - x = float(x) + x = float(x) # Assume that converting to float is desired, if not already fs = _eval_function(m, f, x, step) return _compute_estimate(m, fs, x, step, m.coefs) end From 0d67075cb2231a39eb7b6a4e3b858f8b8a4ad615 Mon Sep 17 00:00:00 2001 From: Wessel Date: Sat, 9 Jan 2021 11:31:59 +0100 Subject: [PATCH 66/77] Update src/methods.jl Co-authored-by: Lyndon White --- src/methods.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index a1a097e5..fff95613 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -176,8 +176,7 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) @eval begin function (m::$T)(f::TF, x::Real) where TF<:Function - # Assume that converting to float is desired. - x = float(x) + x = float(x) # Assume that converting to float is desired, if not already step = first(estimate_step(m, f, x)) return m(f, x, step) end From b893bc1b6d244a3d851def3a32badd54f7caa354 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 11:34:48 +0100 Subject: [PATCH 67/77] Improve comments --- src/methods.jl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index fff95613..df04f6a0 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -176,7 +176,7 @@ julia> FiniteDifferences.estimate_step(fdm, sin, 1.0) # Computes step size and for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) @eval begin function (m::$T)(f::TF, x::Real) where TF<:Function - x = float(x) # Assume that converting to float is desired, if not already + x = float(x) # Assume that converting to float is desired, if it isn't already. step = first(estimate_step(m, f, x)) return m(f, x, step) end @@ -223,7 +223,7 @@ julia> fdm(sin, 1, 1e-3) - cos(1) # Check the error. for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod) @eval begin function (m::$T{P,Q})(f::TF, x::Real, step::Real) where {P,Q,TF<:Function} - x = float(x) # Assume that converting to float is desired, if not already + x = float(x) # Assume that converting to float is desired, if it isn't already. fs = _eval_function(m, f, x, step) return _compute_estimate(m, fs, x, step, m.coefs) end @@ -244,7 +244,8 @@ function _compute_estimate( coefs::SVector{P,Float64}, ) where {P,Q,TF,T<:AbstractFloat} # If we substitute `T.(coefs)` in the expression below, then allocations occur. We - # therefore perform the broadcasting first. + # therefore perform the broadcasting first. See + # https://github.com/JuliaLang/julia/issues/39151. coefs = T.(coefs) return sum(fs .* coefs) ./ T(step)^Q end From 88029f150692f16cc7d34f43e72c833855eb2b14 Mon Sep 17 00:00:00 2001 From: Wessel Date: Sat, 9 Jan 2021 11:35:21 +0100 Subject: [PATCH 68/77] Update src/methods.jl Co-authored-by: Lyndon White --- src/methods.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index df04f6a0..24d600fe 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -271,7 +271,7 @@ function _coefs(grid, p, q) end const _COEFFS_MULTS_CACHE = Dict{ - Tuple{SVector,Integer}, + Tuple{SVector,Integer}, #keys: (grid, q) Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64} }() From ec27a2be172ea867326c92ee093d561328e3af35 Mon Sep 17 00:00:00 2001 From: Wessel Date: Sat, 9 Jan 2021 11:35:30 +0100 Subject: [PATCH 69/77] Update src/methods.jl Co-authored-by: Lyndon White --- src/methods.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index 24d600fe..d3dccdbe 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -272,7 +272,8 @@ end const _COEFFS_MULTS_CACHE = Dict{ Tuple{SVector,Integer}, #keys: (grid, q) - Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64} + # values: (coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult) + Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64} }() # Compute coefficients for the method and cache the result. From 418b48ea14a482a7aa3cec6083abc83aeca6aff7 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 11:35:58 +0100 Subject: [PATCH 70/77] Fix spacing and capitalise --- src/methods.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index d3dccdbe..fe4f0fc0 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -271,9 +271,9 @@ function _coefs(grid, p, q) end const _COEFFS_MULTS_CACHE = Dict{ - Tuple{SVector,Integer}, #keys: (grid, q) - # values: (coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult) - Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64} + Tuple{SVector,Integer}, # Keys: (grid, q) + # Values: (coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult) + Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64} }() # Compute coefficients for the method and cache the result. From 02b7f37cdb99f995130904a2d71a223978e0d99a Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 11:44:49 +0100 Subject: [PATCH 71/77] Elaborate on coefs_neighbourhood --- src/methods.jl | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/methods.jl b/src/methods.jl index fe4f0fc0..d2350e13 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -59,7 +59,22 @@ end } <: FiniteDifferenceMethod{P,Q} A finite difference method that estimates a `Q`th order derivative from `P` function -evaluations. This method does dynamically adapt its step size. +evaluations. + +This method dynamically adapts its step size. The adaptation works by explicitly estimating +the truncation error and round-off error, and choosing the step size to optimally balance +those. The truncation error is given by the magnitude of the `P`th order derivative, which +will be estimated with another finite difference method (`E`). This finite difference +method, `bound_estimator`, will be tasked with estimating the `P`th order derivative in a +_neighbourhood_, not just at some `x`. To do this, it will use a careful reweighting of the +function evaluations to estimate the `P`th order derivative at, in the case of a central +method, `x - h`, `x`, and `x + h`, where `h` is the step size. The coeffients for this +estimate, the _neighbourhood estimate_, are given by the three sets of coeffients in +`bound_estimator.coefs_neighbourhood`. The round-off error is estimated by the round-off +error of the function evaluations performed by `bound_estimator`. The trunction error is +amplified by `condition`, and the round-off error is amplified by `factor`. The quantities +`∇f_magnitude_mult` and `f_error_mult` are precomputed quantities that facilitate the +step size adaptation procedure. # Fields - `grid::SVector{P,Int}`: Multiples of the step size that the function will be evaluated at. From 8146faef6cad44760f0a66d620a64be0afbbf67f Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 11:47:18 +0100 Subject: [PATCH 72/77] Do not store docs/src/index.md in repo --- .gitignore | 6 +- docs/make.jl | 3 +- docs/src/index.md | 274 ---------------------------------------------- 3 files changed, 7 insertions(+), 276 deletions(-) delete mode 100644 docs/src/index.md diff --git a/.gitignore b/.gitignore index 80642e1f..6caafe51 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,10 @@ *.jl.cov *.jl.*.cov *.jl.mem +/Manifest.toml + +# Docs: docs/build/ docs/site/ -/Manifest.toml +# `docs/src/index.md` will be automatically generated by `docs/make.jl`. +docs/src/index.md diff --git a/docs/make.jl b/docs/make.jl index a6bd4d2b..b03a7b50 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -5,7 +5,8 @@ repo_root = dirname(dirname(@__FILE__)) cp( joinpath(repo_root, "README.md"), joinpath(repo_root, "docs", "src", "index.md"); - # `index.md` will already exist, so we set `force=true` to overwrite it. + # `index.md` may already exist if `make.jl` was run previously, so we set `force=true` + # to overwrite it. force=true ) diff --git a/docs/src/index.md b/docs/src/index.md deleted file mode 100644 index aa45f484..00000000 --- a/docs/src/index.md +++ /dev/null @@ -1,274 +0,0 @@ -# FiniteDifferences.jl: Finite Difference Methods - -[![CI](https://github.com/JuliaDiff/FiniteDifferences.jl/workflows/CI/badge.svg?branch=master)](https://github.com/JuliaDiff/FiniteDifferences.jl/actions?query=workflow%3ACI) -[![Build Status](https://travis-ci.org/JuliaDiff/FiniteDifferences.jl.svg?branch=master)](https://travis-ci.org/JuliaDiff/FiniteDifferences.jl) -[![codecov.io](https://codecov.io/github/JuliaDiff/FiniteDifferences.jl/coverage.svg?branch=master)](https://codecov.io/github/JuliaDiff/FiniteDifferences.jl?branch=master) -[![PkgEval](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/F/FiniteDifferences.svg)](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html) - -[![Latest Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://juliadiff.github.io/FiniteDifferences.jl/latest/) -[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle) -[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor's%20Guide-blueviolet)](https://github.com/SciML/ColPrac) - -FiniteDifferences.jl estimates derivatives with [finite differences](https://en.wikipedia.org/wiki/Finite_difference). - -See also the Python package [FDM](https://github.com/wesselb/fdm). - -#### FiniteDiff.jl vs FiniteDifferences.jl -[FiniteDiff.jl](https://github.com/JuliaDiff/FiniteDiff.jl) and [FiniteDifferences.jl](https://github.com/JuliaDiff/FiniteDifferences.jl) -are similar libraries: both calculate approximate derivatives numerically. -You should definately use one or the other, rather than the legacy [Calculus.jl](https://github.com/JuliaMath/Calculus.jl) finite differencing, or reimplementing it yourself. -At some point in the future they might merge, or one might depend on the other. -Right now here are the differences: - - - FiniteDifferences.jl supports basically any type, where as FiniteDiff.jl supports only array-ish types - - FiniteDifferences.jl supports higher order approximation and step size adaptation - - FiniteDiff.jl supports caching and in-place computation - - FiniteDiff.jl supports coloring vectors for efficient calculation of sparse Jacobians - - -#### FDM.jl -This package was formerly called FDM.jl. We recommend users of FDM.jl [update to FiniteDifferences.jl](https://github.com/JuliaDiff/FiniteDifferences.jl/issues/37). - -## Scalar Derivatives - -Compute the first derivative of `sin` with a 5th order central method: - -```julia -julia> central_fdm(5, 1)(sin, 1) - cos(1) --2.4313884239290928e-14 -``` - -Finite difference methods are optimised to minimise allocations: - -```julia -julia> m = central_fdm(5, 1); - -julia> @benchmark $m(sin, 1) -BenchmarkTools.Trial: - memory estimate: 0 bytes - allocs estimate: 0 - -------------- - minimum time: 486.621 ns (0.00% GC) - median time: 507.677 ns (0.00% GC) - mean time: 539.806 ns (0.00% GC) - maximum time: 1.352 μs (0.00% GC) - -------------- - samples: 10000 - evals/sample: 195 -``` - -Compute the second derivative of `sin` with a 5th order central method: - -```julia -julia> central_fdm(5, 2)(sin, 1) - (-sin(1)) --8.767431225464861e-11 -``` - -To obtain better accuracy, you can increase the order of the method: - -```julia -julia> central_fdm(12, 2)(sin, 1) - (-sin(1)) -5.240252676230739e-14 -``` - -The functions `forward_fdm` and `backward_fdm` can be used to construct -forward differences and backward differences respectively. - -### Dealing with Singularities - -The function `log(x)` is only defined for `x > 0`. -If we try to use `central_fdm` to estimate the derivative of `log` near `x = 0`, -then we run into `DomainError`s, because `central_fdm` happens to evaluate `log` -at some `x < 0`. - -```julia -julia> central_fdm(5, 1)(log, 1e-3) -ERROR: DomainError with -0.02069596546590111 -``` - -To deal with this situation, you have two options. - -The first option is to use `forward_fdm`, which only evaluates `log` at inputs -greater or equal to `x`: - -```julia -julia> forward_fdm(5, 1)(log, 1e-3) - 1000 --3.741856744454708e-7 -``` - -This works fine, but the downside is that you're restricted to one-sided -methods (`forward_fdm`), which tend to perform worse than central methods -(`central_fdm`). - -The second option is to limit the distance that the finite difference method is -allowed to evaluate `log` away from `x`. Since `x = 1e-3`, a reasonable value -for this limit is `9e-4`: - -```julia -julia> central_fdm(5, 1, max_range=9e-4)(log, 1e-3) - 1000 --4.027924660476856e-10 -``` - -Another commonly encountered example is `logdet`, which is only defined -for positive-definite matrices. -Here you can use a forward method in combination with a positive-definite -deviation from `x`: - -```julia -julia> x = diagm([1.0, 2.0, 3.0]); v = Matrix(1.0I, 3, 3); - -julia> forward_fdm(5, 1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x)) --4.222400207254395e-12 -``` - -A range-limited central method is also possible: - -```julia -julia> central_fdm(5, 1, max_range=9e-1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x)) --1.283417816466681e-13 -``` - -### Richardson Extrapolation - -The finite difference methods defined in this package can be extrapolated using -[Richardson extrapolation](https://github.com/JuliaMath/Richardson.jl). -This can offer superior numerical accuracy: -Richardson extrapolation attempts polynomial extrapolation of the finite -difference estimate as a function of the step size until a convergence criterion -is reached. - -```julia -julia> extrapolate_fdm(central_fdm(2, 1), sin, 1)[1] - cos(1) -1.6653345369377348e-14 -``` - -Similarly, you can estimate higher order derivatives: - -```julia -julia> extrapolate_fdm(central_fdm(5, 4), sin, 1)[1] - sin(1) --1.626274487942503e-5 -``` - -In this case, the accuracy can be improved by making the -[contraction rate](https://github.com/JuliaMath/Richardson.jl#usage) closer to -`1`: - -```julia -julia> extrapolate_fdm(central_fdm(5, 4), sin, 1, contract=0.8)[1] - sin(1) -2.0725743343774639e-10 -``` - -This performs similarly to a `10`th order central method: - -```julia -julia> central_fdm(10, 4)(sin, 1) - sin(1) --1.0301381969668455e-10 -``` - -If you really want, you can even extrapolate the `10`th order central method, -but that provides no further gains: - -```julia -julia> extrapolate_fdm(central_fdm(10, 4), sin, 1, contract=0.8)[1] - sin(1) -5.673617131662922e-10 -``` - -In this case, the central method can be pushed to a high order to obtain -improved accuracy: - -```julia -julia> central_fdm(20, 4)(sin, 1) - sin(1) --5.2513549064769904e-14 -``` - -### A Finite Difference Method on a Custom Grid - -```julia -julia> method = FiniteDifferenceMethod([-2, 0, 5], 1) -FiniteDifferenceMethod: - order of method: 3 - order of derivative: 1 - grid: [-2, 0, 5] - coefficients: [-0.35714285714285715, 0.3, 0.05714285714285714] - -julia> method(sin, 1) - cos(1) --3.701483564100272e-13 -``` - -## Multivariate Derivatives - -Consider a quadratic function: - -```julia -julia> a = randn(3, 3); a = a * a' -3×3 Matrix{Float64}: - 4.31995 -2.80614 0.0829128 - -2.80614 3.91982 0.764388 - 0.0829128 0.764388 1.18055 - -julia> f(x) = 0.5 * x' * a * x -``` - -Compute the gradient: - -```julia -julia> x = randn(3) -3-element Vector{Float64}: - -0.18563161988700727 - -0.4659836395595666 - 2.304584409826511 - -julia> grad(central_fdm(5, 1), f, x)[1] - a * x -3-element Vector{Float64}: - 4.1744385725905886e-14 - -6.611378111642807e-14 - -8.615330671091215e-14 -``` - -Compute the Jacobian: - -```julia -julia> jacobian(central_fdm(5, 1), f, x)[1] - (a * x)' -1×3 Matrix{Float64}: - 4.17444e-14 -6.61138e-14 -8.61533e-14 -``` - -The Jacobian can also be computed for non-scalar functions: - -```julia -julia> a = randn(3, 3) -3×3 Matrix{Float64}: - 0.844846 1.04772 1.0173 - -0.867721 0.154146 -0.938077 - 1.34078 -0.630105 -1.13287 - -julia> f(x) = a * x - -julia> jacobian(central_fdm(5, 1), f, x)[1] - a -3×3 Matrix{Float64}: - 2.91989e-14 1.77636e-15 4.996e-14 - -5.55112e-15 -7.63278e-15 2.4758e-14 - 4.66294e-15 -2.05391e-14 -1.04361e-14 -``` - -To compute Jacobian--vector products, use `jvp` and `j′vp`: - -```julia -julia> v = randn(3) -3-element Array{Float64,1}: - -1.290782164377614 - -0.37701592844250903 - -1.4288108966380777 - -julia> jvp(central_fdm(5, 1), f, (x, v)) - a * v -3-element Vector{Float64}: - -7.993605777301127e-15 - -8.881784197001252e-16 - -3.22519788653608e-14 - -julia> j′vp(central_fdm(5, 1), f, x, v)[1] - a'x -3-element Vector{Float64}: - -2.1316282072803006e-14 - 2.4646951146678475e-14 - 6.661338147750939e-15 -``` From 3cf218f52e8f89b9f56e7ba414e74e28dd9e54db Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 11:48:27 +0100 Subject: [PATCH 73/77] Fix docstring --- src/methods.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index d2350e13..1307a330 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -53,9 +53,7 @@ end """ AdaptedFiniteDifferenceMethod{ - P, - Q, - E<:FiniteDifferenceMethod + P, Q, E<:FiniteDifferenceMethod } <: FiniteDifferenceMethod{P,Q} A finite difference method that estimates a `Q`th order derivative from `P` function From 85a7f95bb9e0a9f9047fc363c57e90980f622c03 Mon Sep 17 00:00:00 2001 From: Wessel Date: Sat, 9 Jan 2021 12:20:11 +0100 Subject: [PATCH 74/77] Update src/methods.jl Co-authored-by: Lyndon White --- src/methods.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index 1307a330..b3d8c130 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -259,8 +259,8 @@ function _compute_estimate( # If we substitute `T.(coefs)` in the expression below, then allocations occur. We # therefore perform the broadcasting first. See # https://github.com/JuliaLang/julia/issues/39151. - coefs = T.(coefs) - return sum(fs .* coefs) ./ T(step)^Q + _coefs = T.(coefs) + return sum(fs .* _coefs) ./ T(step)^Q end # Check the method and derivative orders for consistency. From 5c363ec6c23b6cc24c57ac2c19428d3fa2150cdd Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sat, 9 Jan 2021 12:23:21 +0100 Subject: [PATCH 75/77] Fix typo --- src/methods.jl | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/methods.jl b/src/methods.jl index b3d8c130..3601cddc 100644 --- a/src/methods.jl +++ b/src/methods.jl @@ -62,17 +62,17 @@ evaluations. This method dynamically adapts its step size. The adaptation works by explicitly estimating the truncation error and round-off error, and choosing the step size to optimally balance those. The truncation error is given by the magnitude of the `P`th order derivative, which -will be estimated with another finite difference method (`E`). This finite difference -method, `bound_estimator`, will be tasked with estimating the `P`th order derivative in a -_neighbourhood_, not just at some `x`. To do this, it will use a careful reweighting of the -function evaluations to estimate the `P`th order derivative at, in the case of a central -method, `x - h`, `x`, and `x + h`, where `h` is the step size. The coeffients for this -estimate, the _neighbourhood estimate_, are given by the three sets of coeffients in -`bound_estimator.coefs_neighbourhood`. The round-off error is estimated by the round-off -error of the function evaluations performed by `bound_estimator`. The trunction error is -amplified by `condition`, and the round-off error is amplified by `factor`. The quantities -`∇f_magnitude_mult` and `f_error_mult` are precomputed quantities that facilitate the -step size adaptation procedure. +will be estimated with another finite difference method (`bound_estimator`). This finite +difference method, `bound_estimator`, will be tasked with estimating the `P`th order +derivative in a _neighbourhood_, not just at some `x`. To do this, it will use a careful +reweighting of the function evaluations to estimate the `P`th order derivative at, in the +case of a central method, `x - h`, `x`, and `x + h`, where `h` is the step size. The +coeffients for this estimate, the _neighbourhood estimate_, are given by the three sets of +coeffients in `bound_estimator.coefs_neighbourhood`. The round-off error is estimated by the +round-off error of the function evaluations performed by `bound_estimator`. The trunction +error is amplified by `condition`, and the round-off error is amplified by `factor`. The +quantities `∇f_magnitude_mult` and `f_error_mult` are precomputed quantities that facilitate +the step size adaptation procedure. # Fields - `grid::SVector{P,Int}`: Multiples of the step size that the function will be evaluated at. From 9be39b8f91d31e1dac59c48895c9ad3d6ae7209c Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sun, 10 Jan 2021 13:42:57 +0100 Subject: [PATCH 76/77] Remove compat entry to allow ChainRules int. test --- .github/workflows/IntegrationTestChainRules.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/IntegrationTestChainRules.yml b/.github/workflows/IntegrationTestChainRules.yml index 5b7d047a..aac327d2 100644 --- a/.github/workflows/IntegrationTestChainRules.yml +++ b/.github/workflows/IntegrationTestChainRules.yml @@ -26,5 +26,7 @@ jobs: repository: JuliaDiff/ChainRules.jl path: "ChainRules.jl" - name: Run the tests - run: julia --project="ChainRules.jl" -e "using Pkg; Pkg.develop(PackageSpec(path=\"./\")); Pkg.update(); Pkg.test()" + run: | + julia -e "p = \"ChainRules.jl/Project.toml\"; toml = TOML.parsefile(p); delete!(toml[\"compat\"], \"FiniteDifferences\"); open(io -> TOML.print(io, toml), p, \"w\")" + julia --project="ChainRules.jl" -e "using Pkg; Pkg.develop(PackageSpec(path=\"./\")); Pkg.update(); Pkg.test()" shell: bash From 0e29e4cbf78a8dce9a8fd305da2bfa8c2bc9a888 Mon Sep 17 00:00:00 2001 From: Wessel Bruinsma Date: Sun, 10 Jan 2021 15:02:07 +0100 Subject: [PATCH 77/77] Revert "Remove compat entry to allow ChainRules int. test" This reverts commit 9be39b8f91d31e1dac59c48895c9ad3d6ae7209c. --- .github/workflows/IntegrationTestChainRules.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/IntegrationTestChainRules.yml b/.github/workflows/IntegrationTestChainRules.yml index aac327d2..5b7d047a 100644 --- a/.github/workflows/IntegrationTestChainRules.yml +++ b/.github/workflows/IntegrationTestChainRules.yml @@ -26,7 +26,5 @@ jobs: repository: JuliaDiff/ChainRules.jl path: "ChainRules.jl" - name: Run the tests - run: | - julia -e "p = \"ChainRules.jl/Project.toml\"; toml = TOML.parsefile(p); delete!(toml[\"compat\"], \"FiniteDifferences\"); open(io -> TOML.print(io, toml), p, \"w\")" - julia --project="ChainRules.jl" -e "using Pkg; Pkg.develop(PackageSpec(path=\"./\")); Pkg.update(); Pkg.test()" + run: julia --project="ChainRules.jl" -e "using Pkg; Pkg.develop(PackageSpec(path=\"./\")); Pkg.update(); Pkg.test()" shell: bash