Skip to content

WIP: Remove add_tiny and add estimate_magitude #125

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Dec 3, 2020
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "FiniteDifferences"
uuid = "26cc04aa-876d-5657-8c51-4c34ba976000"
version = "0.11.4"
version = "0.11.5"

[deps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
Expand Down
50 changes: 36 additions & 14 deletions src/methods.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,33 @@
export FiniteDifferenceMethod, fdm, backward_fdm, forward_fdm, central_fdm, extrapolate_fdm

"""
add_tiny(x::Union{AbstractFloat, Integer})
estimate_magitude(f, x::T) where T<:AbstractFloat

Add a tiny number, 10^{-40}, to a real floating point number `x`, preserving the type. If
`x` is an `Integer`, it is promoted to a suitable floating point type.
Estimate the magnitude of `f` in a neighbourhood of `x`, assuming that the outputs of `f`
have a "typical" order of magnitude. The result should be interpreted as a very rough
estimate. This function deals with the case that `f(x) = 0`.
"""
add_tiny(x::T) where T<:AbstractFloat = x + convert(T, 1e-40)
add_tiny(x::Integer) = add_tiny(float(x))
function estimate_magitude(f, x::T) where T<:AbstractFloat
M = float(maximum(abs, f(x)))
M > 0 && (return M)
# Ouch, `f(x) = 0`. But it may not be zero around `x`. We conclude that `x` is likely a
# pathological input for `f`. Perturb `x`. Assume that the pertubed value for `x` is
# highly unlikely also a pathological value for `f`.
Δ = convert(T, 0.1) * max(abs(x), one(x))
return float(maximum(abs, f(x + Δ)))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we make this recursive?

Suggested change
return float(maximum(abs, f(x + Δ)))
return estimate_magitude(f, x + Δ)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That would recurse infinitely for the function x -> 0.0. Moreover, a function might just actually be 0 in a neighbour around x.

end

"""
estimate_roundoff_error(f, x::T) where T<:AbstractFloat

Estimate the round-off error of `f(x)`. This function deals with the case that `f(x) = 0`.
"""
function estimate_roundoff_error(f, x::T) where T<:AbstractFloat
# Estimate the round-off error. It can happen that the function is zero around `x`, in
# which case we cannot take `eps(f(x))`. Therefore, we assume a lower bound that is
# equal to `eps(T) / 1000`, which gives `f` four orders of magnitude wiggle room.
return max(eps(estimate_magitude(f, x)), eps(T) / 1000)
end

"""
FiniteDifferences.DEFAULT_CONDITION
Expand Down Expand Up @@ -209,7 +229,7 @@ end

# Estimate the bound on the derivative by amplifying the ∞-norm.
function _make_default_bound_estimator(; condition::Real=DEFAULT_CONDITION)
default_bound_estimator(f, x) = condition * maximum(abs, f(x))
default_bound_estimator(f, x) = condition * estimate_magitude(f, x)
return default_bound_estimator
end

Expand Down Expand Up @@ -256,17 +276,18 @@ function estimate_step(
) where T<:AbstractFloat
p = length(m.coefs)
q = m.q
f_x = float(f(x))

# Estimate the bound and round-off error.
ε = add_tiny(maximum(eps, f_x)) * factor
M = add_tiny(m.bound_estimator(f, x))
# Estimate the round-off error.
ε = estimate_roundoff_error(f, x) * factor

# Estimate the bound on the derivatives.
M = m.bound_estimator(f, x)

# Set the step size by minimising an upper bound on the error of the estimate.
C₁ = ε * sum(abs, m.coefs)
C₂ = M * sum(n -> abs(m.coefs[n] * m.grid[n]^p), eachindex(m.coefs)) / factorial(p)
# Type inference fails on this, so we annotate it, which gives big performance benefits.
h::T = convert(T, min((q / (p - q) * C₁ / C₂)^(1 / p), max_step))
h::T = convert(T, min((q / (p - q) * (C₁ / C₂))^(1 / p), max_step))

# Estimate the accuracy of the method.
accuracy = h^(-q) * C₁ + h^(p - q) * C₂
Expand All @@ -292,7 +313,7 @@ for direction in [:forward, :central, :backward]
grid,
q,
coefs,
_make_adaptive_bound_estimator($fdm_fun, p, adapt, condition, geom=geom),
_make_adaptive_bound_estimator($fdm_fun, p, q, adapt, condition, geom=geom),
)
end

Expand Down Expand Up @@ -327,16 +348,17 @@ end

function _make_adaptive_bound_estimator(
constructor::Function,
p::Int,
q::Int,
adapt::Int,
condition::Int;
kw_args...
)
if adapt >= 1
estimate_derivative = constructor(
q + 1, q, adapt=adapt - 1, condition=condition; kw_args...
p + 1, p, adapt=adapt - 1, condition=condition; kw_args...
)
return (f, x) -> maximum(abs, estimate_derivative(f, x))
return (f, x) -> estimate_magitude(x′ -> estimate_derivative(f, x′), x)
else
return _make_default_bound_estimator(condition=condition)
end
Expand Down
47 changes: 39 additions & 8 deletions test/methods.jl
Original file line number Diff line number Diff line change
@@ -1,15 +1,37 @@
using FiniteDifferences: add_tiny
import FiniteDifferences: estimate_magitude, estimate_roundoff_error

@testset "Methods" begin
@testset "estimate_magitude" begin
f64(x::Float64) = x
f64_int(x::Float64) = Int(10x)
@test estimate_magitude(f64, 0.0) === 0.1
@test estimate_magitude(f64, 1.0) === 1.0
@test estimate_magitude(f64_int, 0.0) === 1.0
@test estimate_magitude(f64_int, 1.0) === 10.0

f32(x::Float32) = x
f32_int(x::Float32) = Int(10 * x)
@test estimate_magitude(f32, 0f0) === 0.1f0
@test estimate_magitude(f32, 1f0) === 1f0
# In this case, the `Int` is converted with `float`, so we expect a `Float64`.
@test estimate_magitude(f32_int, 0f0) === 1.0
@test estimate_magitude(f32_int, 1f0) === 10.0
end

@testset "estimate_roundoff_error" begin
# `Float64`s:
@test estimate_roundoff_error(identity, 1.0) == eps(1.0)
# Pertubation from `estimate_magitude`:
@test estimate_roundoff_error(identity, 0.0) == eps(0.1)

@testset "add_tiny" begin
@test add_tiny(convert(Float64, 5)) isa Float64
@test add_tiny(convert(Float32, 5)) isa Float32
@test add_tiny(convert(Float16, 5)) isa Float16
# `Float32`s:
@test estimate_roundoff_error(identity, 1f0) == eps(1f0)
# Pertubation from `estimate_magitude`:
@test estimate_roundoff_error(identity, 0.0f0) == eps(0.1f0)

@test add_tiny(convert(Int, 5)) isa Float64
@test add_tiny(convert(UInt, 5)) isa Float64
@test add_tiny(convert(Bool, 1)) isa Float64
# Test lower bound of `eps(T) / 1000`.
@test estimate_roundoff_error(x -> 1e-100, 0.0) == eps(1.0) / 1000
@test estimate_roundoff_error(x -> 1f-100, 0f0) == eps(1f0) / 1000
end

# The different approaches to approximating the gradient to try.
Expand Down Expand Up @@ -131,4 +153,13 @@ using FiniteDifferences: add_tiny
end
end
end

@testset "Derivative of cosc at 0 (#124)" begin
@test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-9
@test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-14
end

@testset "Derivative of a constant (#125)" begin
@test central_fdm(2, 1)(x -> 0, 0) ≈ 0 atol=1e-10
end
end