diff --git a/.gitignore b/.gitignore
index 80642e1f..6caafe51 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,10 @@
 *.jl.cov
 *.jl.*.cov
 *.jl.mem
+/Manifest.toml
+
+# Docs:
 docs/build/
 docs/site/
-/Manifest.toml
+# `docs/src/index.md` will be automatically generated by `docs/make.jl`.
+docs/src/index.md
diff --git a/Project.toml b/Project.toml
index 22ddf5dc..5db699ac 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,6 +1,6 @@
 name = "FiniteDifferences"
 uuid = "26cc04aa-876d-5657-8c51-4c34ba976000"
-version = "0.11.7"
+version = "0.12.0"
 
 [deps]
 ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
@@ -8,6 +8,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
 Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
 Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
 Richardson = "708f8203-808e-40c0-ba2d-98a6953ed40d"
+StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
 
 [compat]
 ChainRulesCore = "0.9"
@@ -15,9 +16,9 @@ Richardson = "1.2"
 julia = "1"
 
 [extras]
+BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
 Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
-StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
 Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
 
 [targets]
-test = ["Random", "StaticArrays", "Test"]
+test = ["Random", "Test", "BenchmarkTools"]
diff --git a/README.md b/README.md
index 4a0c306e..aa45f484 100644
--- a/README.md
+++ b/README.md
@@ -21,16 +21,15 @@ At some point in the future they might merge, or one might depend on the other.
 Right now here are the differences:
 
  - FiniteDifferences.jl supports basically any type, where as FiniteDiff.jl supports only array-ish types
- - FiniteDifferences.jl supports higher order approximation
- - FiniteDiff.jl is carefully optimized to minimize allocations
+ - FiniteDifferences.jl supports higher order approximation and step size adaptation
+ - FiniteDiff.jl supports caching and in-place computation
  - FiniteDiff.jl supports coloring vectors for efficient calculation of sparse Jacobians
 
 
 #### FDM.jl
 This package was formerly called FDM.jl. We recommend users of FDM.jl [update to FiniteDifferences.jl](https://github.com/JuliaDiff/FiniteDifferences.jl/issues/37).
 
-
-## Example: Scalar Derivatives
+## Scalar Derivatives
 
 Compute the first derivative of `sin` with a 5th order central method:
 
@@ -39,17 +38,150 @@ julia> central_fdm(5, 1)(sin, 1) - cos(1)
 -2.4313884239290928e-14
 ```
 
+Finite difference methods are optimised to minimise allocations:
+
+```julia
+julia> m = central_fdm(5, 1);
+
+julia> @benchmark $m(sin, 1)
+BenchmarkTools.Trial:
+  memory estimate:  0 bytes
+  allocs estimate:  0
+  --------------
+  minimum time:     486.621 ns (0.00% GC)
+  median time:      507.677 ns (0.00% GC)
+  mean time:        539.806 ns (0.00% GC)
+  maximum time:     1.352 μs (0.00% GC)
+  --------------
+  samples:          10000
+  evals/sample:     195
+```
+
 Compute the second derivative of `sin` with a 5th order central method:
 
 ```julia
-julia> central_fdm(5, 2)(sin, 1) + sin(1)
--4.367495254342657e-11
+julia> central_fdm(5, 2)(sin, 1) - (-sin(1))
+-8.767431225464861e-11
+```
+
+To obtain better accuracy, you can increase the order of the method:
+
+```julia
+julia> central_fdm(12, 2)(sin, 1) - (-sin(1))
+5.240252676230739e-14
 ```
 
 The functions `forward_fdm` and `backward_fdm` can be used to construct
 forward differences and backward differences respectively.
 
-Alternatively, you can construct a finite difference method on a custom grid:
+### Dealing with Singularities
+
+The function `log(x)` is only defined for `x > 0`.
+If we try to use `central_fdm` to estimate the derivative of `log` near `x = 0`,
+then we run into `DomainError`s, because `central_fdm` happens to evaluate `log`
+at some `x < 0`.
+
+```julia
+julia> central_fdm(5, 1)(log, 1e-3)
+ERROR: DomainError with -0.02069596546590111
+```
+
+To deal with this situation, you have two options.
+
+The first option is to use `forward_fdm`, which only evaluates `log` at inputs
+greater or equal to `x`:
+
+```julia
+julia> forward_fdm(5, 1)(log, 1e-3) - 1000
+-3.741856744454708e-7
+```
+
+This works fine, but the downside is that you're restricted to one-sided
+methods (`forward_fdm`), which tend to perform worse than central methods
+(`central_fdm`).
+
+The second option is to limit the distance that the finite difference method is
+allowed to evaluate `log` away from `x`. Since `x = 1e-3`, a reasonable value
+for this limit is `9e-4`:
+
+```julia
+julia> central_fdm(5, 1, max_range=9e-4)(log, 1e-3) - 1000
+-4.027924660476856e-10
+```
+
+Another commonly encountered example is `logdet`, which is only defined
+for positive-definite matrices.
+Here you can use a forward method in combination with a positive-definite
+deviation from `x`:
+
+```julia
+julia> x = diagm([1.0, 2.0, 3.0]); v = Matrix(1.0I, 3, 3);
+
+julia> forward_fdm(5, 1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x))
+-4.222400207254395e-12
+```
+
+A range-limited central method is also possible:
+
+```julia
+julia> central_fdm(5, 1, max_range=9e-1)(ε -> logdet(x .+ ε .* v), 0) - sum(1 ./ diag(x))
+-1.283417816466681e-13
+```
+
+### Richardson Extrapolation
+
+The finite difference methods defined in this package can be extrapolated using
+[Richardson extrapolation](https://github.com/JuliaMath/Richardson.jl).
+This can offer superior numerical accuracy:
+Richardson extrapolation attempts polynomial extrapolation of the finite
+difference estimate as a function of the step size until a convergence criterion
+is reached.
+
+```julia
+julia> extrapolate_fdm(central_fdm(2, 1), sin, 1)[1] - cos(1)
+1.6653345369377348e-14
+```
+
+Similarly, you can estimate higher order derivatives:
+
+```julia
+julia> extrapolate_fdm(central_fdm(5, 4), sin, 1)[1] - sin(1)
+-1.626274487942503e-5
+```
+
+In this case, the accuracy can be improved by making the
+[contraction rate](https://github.com/JuliaMath/Richardson.jl#usage) closer to
+`1`:
+
+```julia
+julia> extrapolate_fdm(central_fdm(5, 4), sin, 1, contract=0.8)[1] - sin(1)
+2.0725743343774639e-10
+```
+
+This performs similarly to a `10`th order central method:
+
+```julia
+julia> central_fdm(10, 4)(sin, 1) - sin(1)
+-1.0301381969668455e-10
+```
+
+If you really want, you can even extrapolate the `10`th order central method,
+but that provides no further gains:
+
+```julia
+julia> extrapolate_fdm(central_fdm(10, 4), sin, 1, contract=0.8)[1] - sin(1)
+5.673617131662922e-10
+```
+
+In this case, the central method can be pushed to a high order to obtain
+improved accuracy:
+
+```julia
+julia> central_fdm(20, 4)(sin, 1) - sin(1)
+-5.2513549064769904e-14
+```
+
+### A Finite Difference Method on a Custom Grid
 
 ```julia
 julia> method = FiniteDifferenceMethod([-2, 0, 5], 1)
@@ -60,29 +192,19 @@ FiniteDifferenceMethod:
   coefficients:          [-0.35714285714285715, 0.3, 0.05714285714285714]
 
 julia> method(sin, 1) - cos(1)
--8.423706177040913e-11
-```
-
-Compute a directional derivative:
-
-```julia
-julia> f(x) = sum(x)
-f (generic function with 1 method)
-
-julia> central_fdm(5, 1)(ε -> f([1, 1, 1] .+ ε .* [1, 2, 3]), 0) - 6
--6.217248937900877e-15
+-3.701483564100272e-13
 ```
 
-## Example: Multivariate Derivatives
+## Multivariate Derivatives
 
 Consider a quadratic function:
 
 ```julia
 julia> a = randn(3, 3); a = a * a'
-3×3 Array{Float64,2}:
-  8.0663   -1.12965   1.68556
- -1.12965   3.55005  -3.10405
-  1.68556  -3.10405   3.77251
+3×3 Matrix{Float64}:
+  4.31995    -2.80614   0.0829128
+ -2.80614     3.91982   0.764388
+  0.0829128   0.764388  1.18055
 
 julia> f(x) = 0.5 * x' * a * x
 ```
@@ -90,41 +212,47 @@ julia> f(x) = 0.5 * x' * a * x
 Compute the gradient:
 
 ```julia
+julia> x = randn(3)
+3-element Vector{Float64}:
+ -0.18563161988700727
+ -0.4659836395595666
+  2.304584409826511
+
 julia> grad(central_fdm(5, 1), f, x)[1] - a * x
-3-element Array{Float64,1}:
- -1.2612133559741778e-12
- -3.526068326209497e-13
- -2.3305801732931286e-12
+3-element Vector{Float64}:
+  4.1744385725905886e-14
+ -6.611378111642807e-14
+ -8.615330671091215e-14
 ```
 
 Compute the Jacobian:
 
 ```julia
 julia> jacobian(central_fdm(5, 1), f, x)[1] - (a * x)'
-1×3 Array{Float64,2}:
- -1.26121e-12  -3.52607e-13  -2.33058e-12
+1×3 Matrix{Float64}:
+ 4.17444e-14  -6.61138e-14  -8.61533e-14
 ```
 
 The Jacobian can also be computed for non-scalar functions:
 
 ```julia
 julia> a = randn(3, 3)
-3×3 Array{Float64,2}:
- -0.343783   1.5708     0.723289
- -0.425706  -0.478881  -0.306881
-  1.27326   -0.171606   2.23671
+3×3 Matrix{Float64}:
+  0.844846   1.04772    1.0173
+ -0.867721   0.154146  -0.938077
+  1.34078   -0.630105  -1.13287
 
 julia> f(x) = a * x
 
 julia> jacobian(central_fdm(5, 1), f, x)[1] - a
-3×3 Array{Float64,2}:
- -2.81331e-13   2.77556e-13  1.28342e-13
- -3.34732e-14  -6.05072e-15  6.05072e-15
- -2.24709e-13   1.88821e-13  1.06581e-14
+3×3 Matrix{Float64}:
+  2.91989e-14   1.77636e-15   4.996e-14
+ -5.55112e-15  -7.63278e-15   2.4758e-14
+  4.66294e-15  -2.05391e-14  -1.04361e-14
 ```
 
 To compute Jacobian--vector products, use `jvp` and `j′vp`:
- 
+
 ```julia
 julia> v = randn(3)
 3-element Array{Float64,1}:
@@ -133,14 +261,14 @@ julia> v = randn(3)
  -1.4288108966380777
 
 julia> jvp(central_fdm(5, 1), f, (x, v)) - a * v
-3-element Array{Float64,1}:
- -1.3233858453531866e-13
-  9.547918011776346e-15
-  3.632649736573512e-13
+3-element Vector{Float64}:
+ -7.993605777301127e-15
+ -8.881784197001252e-16
+ -3.22519788653608e-14
 
 julia> j′vp(central_fdm(5, 1), f, x, v)[1] - a'x
- 3-element Array{Float64,1}:
-  3.5704772471945034e-13
-  4.04121180963557e-13
-  1.2807532812075806e-12
+3-element Vector{Float64}:
+ -2.1316282072803006e-14
+  2.4646951146678475e-14
+  6.661338147750939e-15
 ```
diff --git a/docs/Manifest.toml b/docs/Manifest.toml
index 8a54eae9..db7ead35 100644
--- a/docs/Manifest.toml
+++ b/docs/Manifest.toml
@@ -4,10 +4,10 @@
 uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
 
 [[ChainRulesCore]]
-deps = ["MuladdMacro"]
-git-tree-sha1 = "9907341fe861268ddd0fc60be260633756b126a2"
+deps = ["LinearAlgebra", "MuladdMacro", "SparseArrays"]
+git-tree-sha1 = "15081c431bb25848ad9b0d172a65794f3a3e197a"
 uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
-version = "0.9.4"
+version = "0.9.24"
 
 [[Dates]]
 deps = ["Printf"]
@@ -19,21 +19,27 @@ uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
 
 [[DocStringExtensions]]
 deps = ["LibGit2", "Markdown", "Pkg", "Test"]
-git-tree-sha1 = "c5714d9bcdba66389612dc4c47ed827c64112997"
+git-tree-sha1 = "50ddf44c53698f5e784bbebb3f4b21c5807401b1"
 uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
-version = "0.8.2"
+version = "0.8.3"
 
 [[Documenter]]
-deps = ["Base64", "Dates", "DocStringExtensions", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"]
-git-tree-sha1 = "1c593d1efa27437ed9dd365d1143c594b563e138"
+deps = ["Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"]
+git-tree-sha1 = "a4875e0763112d6d017126f3944f4133abb342ae"
 uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
-version = "0.25.1"
+version = "0.25.5"
 
 [[FiniteDifferences]]
-deps = ["ChainRulesCore", "LinearAlgebra", "Printf", "Random", "Richardson"]
+deps = ["ChainRulesCore", "LinearAlgebra", "Printf", "Random", "Richardson", "StaticArrays"]
 path = ".."
 uuid = "26cc04aa-876d-5657-8c51-4c34ba976000"
-version = "0.10.8"
+version = "0.11.5"
+
+[[IOCapture]]
+deps = ["Logging"]
+git-tree-sha1 = "377252859f740c217b936cebcd918a44f9b53b59"
+uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
+version = "0.1.1"
 
 [[InteractiveUtils]]
 deps = ["Markdown"]
@@ -41,9 +47,9 @@ uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
 
 [[JSON]]
 deps = ["Dates", "Mmap", "Parsers", "Unicode"]
-git-tree-sha1 = "b34d7cef7b337321e97d22242c3c2b91f476748e"
+git-tree-sha1 = "81690084b6198a2e1da36fcfda16eeca9f9f24e4"
 uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
-version = "0.21.0"
+version = "0.21.1"
 
 [[LibGit2]]
 deps = ["Printf"]
@@ -72,10 +78,10 @@ uuid = "46d2c3a1-f734-5fdb-9937-b9b9aeba4221"
 version = "0.2.2"
 
 [[Parsers]]
-deps = ["Dates", "Test"]
-git-tree-sha1 = "10134f2ee0b1978ae7752c41306e131a684e1f06"
+deps = ["Dates"]
+git-tree-sha1 = "50c9a9ed8c714945e01cd53a21007ed3865ed714"
 uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
-version = "1.0.7"
+version = "1.0.15"
 
 [[Pkg]]
 deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
@@ -95,9 +101,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
 
 [[Richardson]]
 deps = ["LinearAlgebra"]
-git-tree-sha1 = "74d2cf4de9eda38175c3f94bd94c755a023d5623"
+git-tree-sha1 = "e03ca566bec93f8a3aeb059c8ef102f268a38949"
 uuid = "708f8203-808e-40c0-ba2d-98a6953ed40d"
-version = "1.1.0"
+version = "1.4.0"
 
 [[SHA]]
 uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
@@ -108,6 +114,20 @@ uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
 [[Sockets]]
 uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
 
+[[SparseArrays]]
+deps = ["LinearAlgebra", "Random"]
+uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
+
+[[StaticArrays]]
+deps = ["LinearAlgebra", "Random", "Statistics"]
+git-tree-sha1 = "9da72ed50e94dbff92036da395275ed114e04d49"
+uuid = "90137ffa-7385-5640-81b9-e52037218182"
+version = "1.0.1"
+
+[[Statistics]]
+deps = ["LinearAlgebra", "SparseArrays"]
+uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
+
 [[Test]]
 deps = ["Distributed", "InteractiveUtils", "Logging", "Random"]
 uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
diff --git a/docs/make.jl b/docs/make.jl
index fe93b010..b03a7b50 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -1,5 +1,15 @@
 using Documenter, FiniteDifferences
 
+# Copy the README.
+repo_root = dirname(dirname(@__FILE__))
+cp(
+    joinpath(repo_root, "README.md"),
+    joinpath(repo_root, "docs", "src", "index.md");
+    # `index.md` may already exist if `make.jl` was run previously, so we set `force=true`
+    # to overwrite it.
+    force=true
+)
+
 makedocs(
     modules=[FiniteDifferences],
     format=Documenter.HTML(prettyurls=get(ENV, "CI", nothing) == "true"),
diff --git a/docs/src/index.md b/docs/src/index.md
deleted file mode 100644
index 4bb727e2..00000000
--- a/docs/src/index.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# FiniteDifferences.jl: Finite Difference Methods
-
-[![Build Status](https://travis-ci.org/JuliaDiff/FiniteDifferences.jl.svg?branch=master)](https://travis-ci.org/JuliaDiff/FiniteDifferences.jl)
-[![codecov.io](https://codecov.io/github/JuliaDiff/FiniteDifferences.jl/coverage.svg?branch=master)](https://codecov.io/github/JuliaDiff/FiniteDifferences.jl?branch=master)
-[![Latest Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://www.juliadiff.org/FiniteDifferences.jl/latest/)
-
-FiniteDifferences.jl approximates derivatives of functions using finite difference methods.
-
-## Examples
-
-Compute the first derivative of `sin` with a 5th order central method:
-
-```julia
-julia> central_fdm(5, 1)(sin, 1) - cos(1)
--1.247890679678676e-13
-```
-Compute the second derivative of `sin` with a 5th order central method:
-
-```julia
-julia> central_fdm(5, 2)(sin, 1) + sin(1)
-9.747314066999024e-12
-```
-
-Construct a FiniteDifferences on a custom grid:
-
-```julia
-julia> method, report = fdm([-2, 0, 5], 1, report=true)
-(FiniteDifferences.method, FiniteDifferencesReport:
-  order of method:       3
-  order of derivative:   1
-  grid:                  [-2, 0, 5]
-  coefficients:          [-0.357143, 0.3, 0.0571429]
-  roundoff error:        2.22e-16
-  bounds on derivatives: 1.00e+00
-  step size:             3.62e-06
-  accuracy:              6.57e-11
-)
-
-julia> method(sin, 1) - cos(1)
--2.05648831297367e-11
-```
-
-Compute a directional derivative:
-
-```julia
-julia> f(x) = sum(x)
-f (generic function with 1 method)
-
-julia> central_fdm(5, 1)(ε -> f([1, 1, 1] + ε * [1, 2, 3]), 0) - 6
--2.922107000813412e-13
-```
diff --git a/docs/src/pages/api.md b/docs/src/pages/api.md
index cf70b88f..a3332b24 100644
--- a/docs/src/pages/api.md
+++ b/docs/src/pages/api.md
@@ -2,14 +2,16 @@
 
 ```@docs
 FiniteDifferenceMethod
-FiniteDifferenceMethod(::AbstractVector, ::Int; ::Int)
 FiniteDifferences.estimate_step
-forward_fdm
 central_fdm
+forward_fdm
 backward_fdm
 extrapolate_fdm
 assert_approx_equal
+FiniteDifferences.UnadaptedFiniteDifferenceMethod
+FiniteDifferences.AdaptedFiniteDifferenceMethod
 FiniteDifferences.DEFAULT_CONDITION
+FiniteDifferences.DEFAULT_FACTOR
 ```
 
 ## Gradients
diff --git a/src/FiniteDifferences.jl b/src/FiniteDifferences.jl
index bc67c23d..58b8a2cd 100644
--- a/src/FiniteDifferences.jl
+++ b/src/FiniteDifferences.jl
@@ -1,17 +1,19 @@
 module FiniteDifferences
 
-    using ChainRulesCore
-    using LinearAlgebra
-    using Printf
-    using Random
-    using Richardson
+using ChainRulesCore
+using LinearAlgebra
+using Printf
+using Random
+using Richardson
+using StaticArrays
 
-    export to_vec, grad, jacobian, jvp, j′vp
+export to_vec, grad, jacobian, jvp, j′vp
+
+include("rand_tangent.jl")
+include("difference.jl")
+include("methods.jl")
+include("numerics.jl")
+include("to_vec.jl")
+include("grad.jl")
 
-    include("rand_tangent.jl")
-    include("difference.jl")
-    include("methods.jl")
-    include("numerics.jl")
-    include("to_vec.jl")
-    include("grad.jl")
 end
diff --git a/src/methods.jl b/src/methods.jl
index 4b8e03b6..3601cddc 100644
--- a/src/methods.jl
+++ b/src/methods.jl
@@ -1,102 +1,159 @@
 export FiniteDifferenceMethod, fdm, backward_fdm, forward_fdm, central_fdm, extrapolate_fdm
 
 """
-    estimate_magitude(f, x::T) where T<:AbstractFloat
+    FiniteDifferences.DEFAULT_CONDITION
 
-Estimate the magnitude of `f` in a neighbourhood of `x`, assuming that the outputs of `f`
-have a "typical" order of magnitude. The result should be interpreted as a very rough
-estimate. This function deals with the case that `f(x) = 0`.
+The default value for the [condition number](https://en.wikipedia.org/wiki/Condition_number)
+of finite difference method. The condition number specifies the amplification of the ∞-norm
+when passed to the function's derivative.
 """
-function estimate_magitude(f, x::T) where T<:AbstractFloat
-    M = float(maximum(abs, f(x)))
-    M > 0 && (return M)
-    # Ouch, `f(x) = 0`. But it may not be zero around `x`. We conclude that `x` is likely a
-    # pathological input for `f`. Perturb `x`. Assume that the pertubed value for `x` is
-    # highly unlikely also a pathological value for `f`.
-    Δ = convert(T, 0.1) * max(abs(x), one(x))
-    return float(maximum(abs, f(x + Δ)))
-end
+const DEFAULT_CONDITION = 10
 
 """
-    estimate_roundoff_error(f, x::T) where T<:AbstractFloat
+    FiniteDifferences.DEFAULT_FACTOR
 
-Estimate the round-off error of `f(x)`. This function deals with the case that `f(x) = 0`.
+The default factor number. The factor number specifies the multiple to amplify the estimated
+round-off errors by.
 """
-function estimate_roundoff_error(f, x::T) where T<:AbstractFloat
-    # Estimate the round-off error. It can happen that the function is zero around `x`, in
-    # which case we cannot take `eps(f(x))`. Therefore, we assume a lower bound that is
-    # equal to `eps(T) / 1000`, which gives `f` four orders of magnitude wiggle room.
-    return max(eps(estimate_magitude(f, x)), eps(T) / 1000)
-end
+const DEFAULT_FACTOR = 1
 
-"""
-    FiniteDifferences.DEFAULT_CONDITION
+abstract type FiniteDifferenceMethod{P,Q} end
 
-The default [condition number](https://en.wikipedia.org/wiki/Condition_number) used when
-computing bounds. It provides amplification of the ∞-norm when passed to the function's
-derivatives.
 """
-const DEFAULT_CONDITION = 100
+    UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q}
 
+A finite difference method that estimates a `Q`th order derivative from `P` function
+evaluations. This method does not dynamically adapt its step size.
+
+# Fields
+- `grid::SVector{P,Int}`: Multiples of the step size that the function will be evaluated at.
+- `coefs::SVector{P,Float64}`: Coefficients corresponding to the grid functions that the
+    function evaluations will be weighted by.
+- `coefs_neighbourhood::NTuple{3,SVector{P,Float64}}`: Sets of coefficients used for
+    estimating the magnitude of the derivative in a neighbourhood.
+- `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to
+    perform adaptation for this finite difference method.
+- `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref).
+- `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref).
+- `max_range::Float64`: Maximum distance that a function is evaluated from the input at
+    which the derivative is estimated.
+- `∇f_magnitude_mult::Float64`: Internally computed quantity.
+- `f_error_mult::Float64`: Internally computed quantity.
 """
-    FiniteDifferenceMethod{G<:AbstractVector, C<:AbstractVector, E<:Function}
+struct UnadaptedFiniteDifferenceMethod{P,Q} <: FiniteDifferenceMethod{P,Q}
+    grid::SVector{P,Int}
+    coefs::SVector{P,Float64}
+    coefs_neighbourhood::NTuple{3,SVector{P,Float64}}
+    condition::Float64
+    factor::Float64
+    max_range::Float64
+    ∇f_magnitude_mult::Float64
+    f_error_mult::Float64
+end
 
-A finite difference method.
+"""
+    AdaptedFiniteDifferenceMethod{
+        P, Q, E<:FiniteDifferenceMethod
+    } <: FiniteDifferenceMethod{P,Q}
+
+A finite difference method that estimates a `Q`th order derivative from `P` function
+evaluations.
+
+This method dynamically adapts its step size. The adaptation works by explicitly estimating
+the truncation error and round-off error, and choosing the step size to optimally balance
+those. The truncation error is given by the magnitude of the `P`th order derivative, which
+will be estimated with another finite difference method (`bound_estimator`). This finite
+difference method, `bound_estimator`, will be tasked with estimating the `P`th order
+derivative in a _neighbourhood_, not just at some `x`. To do this, it will use a careful
+reweighting of the function evaluations to estimate the `P`th order derivative at, in the
+case of a central method, `x - h`, `x`, and `x + h`, where `h` is the step size. The
+coeffients for this estimate, the _neighbourhood estimate_, are given by the three sets of
+coeffients in `bound_estimator.coefs_neighbourhood`. The round-off error is estimated by the
+round-off error of the function evaluations performed by `bound_estimator`. The trunction
+error is amplified by `condition`, and the round-off error is amplified by `factor`. The
+quantities `∇f_magnitude_mult` and `f_error_mult` are precomputed quantities that facilitate
+the step size adaptation procedure.
 
 # Fields
-- `grid::G`: Multiples of the step size that the function will be evaluated at.
-- `q::Int`: Order of derivative to estimate.
-- `coefs::C`: Coefficients corresponding to the grid functions that the function evaluations
-    will be weighted by.
-- `bound_estimator::Function`: A function that takes in the function and the evaluation
-    point and returns a bound on the magnitude of the `length(grid)`th derivative.
+- `grid::SVector{P,Int}`: Multiples of the step size that the function will be evaluated at.
+- `coefs::SVector{P,Float64}`: Coefficients corresponding to the grid functions that the
+    function evaluations will be weighted by.
+- `coefs_neighbourhood::NTuple{3,SVector{P,Float64}}`: Sets of coefficients used for
+    estimating the magnitude of the derivative in a neighbourhood.
+- `condition::Float64`: Condition number. See See [`DEFAULT_CONDITION`](@ref).
+- `factor::Float64`: Factor number. See See [`DEFAULT_FACTOR`](@ref).
+- `max_range::Float64`: Maximum distance that a function is evaluated from the input at
+    which the derivative is estimated.
+- `∇f_magnitude_mult::Float64`: Internally computed quantity.
+- `f_error_mult::Float64`: Internally computed quantity.
+- `bound_estimator::FiniteDifferenceMethod`: A finite difference method that is tuned to
+    perform adaptation for this finite difference method.
 """
-struct FiniteDifferenceMethod{G<:AbstractVector, C<:AbstractVector, E<:Function}
-    grid::G
-    q::Int
-    coefs::C
+struct AdaptedFiniteDifferenceMethod{
+    P, Q, E<:FiniteDifferenceMethod
+} <: FiniteDifferenceMethod{P,Q}
+    grid::SVector{P,Int}
+    coefs::SVector{P,Float64}
+    coefs_neighbourhood::NTuple{3,SVector{P,Float64}}
+    condition::Float64
+    factor::Float64
+    max_range::Float64
+    ∇f_magnitude_mult::Float64
+    f_error_mult::Float64
     bound_estimator::E
 end
 
 """
     FiniteDifferenceMethod(
-        grid::AbstractVector,
+        grid::AbstractVector{Int},
         q::Int;
-        condition::Real=DEFAULT_CONDITION
+        condition::Real=DEFAULT_CONDITION,
+        factor::Real=DEFAULT_FACTOR,
+        max_range::Real=Inf
     )
 
 Construct a finite difference method.
 
 # Arguments
-- `grid::Abstract`: The grid. See [`FiniteDifferenceMethod`](@ref).
+- `grid::Vector{Int}`: The grid. See [`AdaptedFiniteDifferenceMethod`](@ref) or
+    [`UnadaptedFiniteDifferenceMethod`](@ref).
 - `q::Int`: Order of the derivative to estimate.
+
+# Keywords
 - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref).
+- `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref).
+- `max_range::Real=Inf`: Maximum distance that a function is evaluated from the input at
+    which the derivative is estimated.
 
 # Returns
 - `FiniteDifferenceMethod`: Specified finite difference method.
 """
 function FiniteDifferenceMethod(
-    grid::AbstractVector,
+    grid::SVector{P,Int},
     q::Int;
-    condition::Real=DEFAULT_CONDITION
-)
-    p = length(grid)
-    _check_p_q(p, q)
-    return FiniteDifferenceMethod(
+    condition::Real=DEFAULT_CONDITION,
+    factor::Real=DEFAULT_FACTOR,
+    max_range::Real=Inf,
+) where P
+    _check_p_q(P, q)
+    coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q)
+    return UnadaptedFiniteDifferenceMethod{P,q}(
         grid,
-        q,
-        _coefs(grid, q),
-        _make_default_bound_estimator(condition=condition)
+        coefs,
+        coefs_neighbourhood,
+        condition,
+        factor,
+        max_range,
+        ∇f_magnitude_mult,
+        f_error_mult
     )
 end
+function FiniteDifferenceMethod(grid::AbstractVector{Int}, q::Int; kw_args...)
+    return FiniteDifferenceMethod(SVector{length(grid)}(grid), q; kw_args...)
+end
 
 """
-    (m::FiniteDifferenceMethod)(
-        f::Function,
-        x::T;
-        factor::Real=1,
-        max_step::Real=0.1 * max(abs(x), one(x))
-    ) where T<:AbstractFloat
+    (m::FiniteDifferenceMethod)(f::Function, x::T) where T<:AbstractFloat
 
 Estimate the derivative of `f` at `x` using the finite differencing method `m` and an
 automatically determined step size.
@@ -105,11 +162,6 @@ automatically determined step size.
 - `f::Function`: Function to estimate derivative of.
 - `x::T`: Input to estimate derivative at.
 
-# Keywords
-- `factor::Real=1`: Factor to amplify the estimated round-off error by. This can be used
-    to force a more conservative step size.
-- `max_step::Real=0.1 * max(abs(x), one(x))`: Maximum step size.
-
 # Returns
 - Estimate of the derivative.
 
@@ -124,34 +176,33 @@ FiniteDifferenceMethod:
   coefficients:          [0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333]
 
 julia> fdm(sin, 1)
-0.5403023058681155
+0.5403023058681607
 
 julia> fdm(sin, 1) - cos(1)  # Check the error.
--2.4313884239290928e-14
+2.098321516541546e-14
 
 julia> FiniteDifferences.estimate_step(fdm, sin, 1.0)  # Computes step size and estimates the error.
-(0.0010632902144695163, 1.9577610541734626e-13)
+(0.001065235154086019, 1.9541865128909085e-13)
 ```
 """
-@inline function (m::FiniteDifferenceMethod)(f::Function, x::Real; kw_args...)
-    # Assume that converting to float is desired.
-    return _call_method(m, f, float(x); kw_args...)
-end
-@inline function _call_method(
-    m::FiniteDifferenceMethod,
-    f::Function,
-    x::T;
-    factor::Real=1,
-    max_step::Real=0.1 * max(abs(x), one(x))
-) where T<:AbstractFloat
-    # The automatic step size calculation fails if `m.q == 0`, so handle that edge case.
-    iszero(m.q) && return f(x)
-    h, _ = estimate_step(m, f, x, factor=factor, max_step=max_step)
-    return _eval_method(m, f, x, h)
+# We loop over all concrete subtypes of `FiniteDifferenceMethod` for Julia v1.0 compatibility.
+for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod)
+    @eval begin
+        function (m::$T)(f::TF, x::Real) where TF<:Function
+            x = float(x)  # Assume that converting to float is desired, if it isn't already.
+            step = first(estimate_step(m, f, x))
+            return m(f, x, step)
+        end
+        function (m::$T{P,0})(f::TF, x::Real) where {P,TF<:Function}
+            # The automatic step size calculation fails if `Q == 0`, so handle that edge
+            # case.
+            return f(x)
+        end
+    end
 end
 
 """
-    (m::FiniteDifferenceMethod)(f::Function, x::T, h::Real) where T<:AbstractFloat
+    (m::FiniteDifferenceMethod)(f::Function, x::T, step::Real) where T<:AbstractFloat
 
 Estimate the derivative of `f` at `x` using the finite differencing method `m` and a given
 step size.
@@ -159,7 +210,7 @@ step size.
 # Arguments
 - `f::Function`: Function to estimate derivative of.
 - `x::T`: Input to estimate derivative at.
-- `h::Real`: Step size.
+- `step::Real`: Step size.
 
 # Returns
 - Estimate of the derivative.
@@ -175,26 +226,41 @@ FiniteDifferenceMethod:
   coefficients:          [0.08333333333333333, -0.6666666666666666, 0.0, 0.6666666666666666, -0.08333333333333333]
 
 julia> fdm(sin, 1, 1e-3)
-0.5403023058679624
+ 0.5403023058679624
 
 julia> fdm(sin, 1, 1e-3) - cos(1)  # Check the error.
 -1.7741363933510002e-13
 ```
 """
-@inline function (m::FiniteDifferenceMethod)(f::Function, x::Real, h::Real)
-    # Assume that converting to float is desired.
-    return _eval_method(m, f, float(x), h)
+# We loop over all concrete subtypes of `FiniteDifferenceMethod` for 1.0 compatibility.
+for T in (UnadaptedFiniteDifferenceMethod, AdaptedFiniteDifferenceMethod)
+    @eval begin
+        function (m::$T{P,Q})(f::TF, x::Real, step::Real) where {P,Q,TF<:Function}
+            x = float(x)  # Assume that converting to float is desired, if it isn't already.
+            fs = _eval_function(m, f, x, step)
+            return _compute_estimate(m, fs, x, step, m.coefs)
+        end
+    end
 end
-@inline function _eval_method(
-    m::FiniteDifferenceMethod,
-    f::Function,
+
+function _eval_function(
+    m::FiniteDifferenceMethod, f::TF, x::T, step::Real,
+) where {TF<:Function,T<:AbstractFloat}
+    return f.(x .+ T(step) .* m.grid)
+end
+
+function _compute_estimate(
+    m::FiniteDifferenceMethod{P,Q},
+    fs::SVector{P,TF},
     x::T,
-    h::Real
-) where T<:AbstractFloat
-    return sum(
-        i -> convert(T, m.coefs[i]) * f(T(x + h * m.grid[i])),
-        eachindex(m.grid)
-    ) / h^m.q
+    step::Real,
+    coefs::SVector{P,Float64},
+) where {P,Q,TF,T<:AbstractFloat}
+    # If we substitute `T.(coefs)` in the expression below, then allocations occur. We
+    # therefore perform the broadcasting first. See
+    # https://github.com/JuliaLang/julia/issues/39151.
+    _coefs = T.(coefs)
+    return sum(fs .* _coefs) ./ T(step)^Q
 end
 
 # Check the method and derivative orders for consistency.
@@ -202,41 +268,67 @@ function _check_p_q(p::Integer, q::Integer)
     q >= 0 || throw(DomainError(q, "order of derivative (`q`) must be non-negative"))
     q < p || throw(DomainError(
         (q, p),
-        "order of the method (q) must be strictly greater than that of the derivative (p)",
+        "order of the method (`p`) must be strictly greater than that of the derivative " *
+        "(`q`)",
     ))
-    # Check whether the method can be computed. We require the factorial of the method order
-    # to be computable with regular `Int`s, but `factorial` will after 20, so 20 is the
-    # largest we can allow.
-    p > 20 && throw(DomainError(p, "order of the method (`p`) is too large to be computed"))
-    return
 end
 
-const _COEFFS_CACHE = Dict{Tuple{AbstractVector{<:Real}, Integer}, Vector{Float64}}()
+function _coefs(grid, p, q)
+    # For high precision on the `\`, we use `Rational`, and to prevent overflows we use
+    # `BigInt`. At the end we go to `Float64` for fast floating point math, rather than
+    # rational math.
+    C = [Rational{BigInt}(g^i) for i in 0:(p - 1), g in grid]
+    x = zeros(Rational{BigInt}, p)
+    x[q + 1] = factorial(big(q))
+    return SVector{p}(Float64.(C \ x))
+end
+
+const _COEFFS_MULTS_CACHE = Dict{
+    Tuple{SVector,Integer},  # Keys: (grid, q)
+    # Values: (coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult)
+    Tuple{SVector,Tuple{Vararg{SVector}},Float64,Float64}
+}()
 
 # Compute coefficients for the method and cache the result.
-function _coefs(grid::AbstractVector{<:Real}, q::Integer)
-    return get!(_COEFFS_CACHE, (grid, q)) do
-        p = length(grid)
-        # For high precision on the `\`, we use `Rational`, and to prevent overflows we use
-        # `Int128`. At the end we go to `Float64` for fast floating point math, rather than
-        # rational math.
-        C = [Rational{Int128}(g^i) for i in 0:(p - 1), g in grid]
-        x = zeros(Rational{Int128}, p)
-        x[q + 1] = factorial(q)
-        return Float64.(C \ x)
+function _coefs_mults(grid::SVector{P, Int}, q::Integer) where P
+    return get!(_COEFFS_MULTS_CACHE, (grid, q)) do
+        # Compute coefficients for derivative estimate.
+        coefs = _coefs(grid, P, q)
+        # Compute coefficients for a neighbourhood estimate.
+        if all(grid .>= 0)
+            coefs_neighbourhood = (
+                _coefs(grid .- 2, P, q),
+                _coefs(grid .- 1, P, q),
+                _coefs(grid, P, q)
+            )
+        elseif all(grid .<= 0)
+            coefs_neighbourhood = (
+                _coefs(grid, P, q),
+                _coefs(grid .+ 1, P, q),
+                _coefs(grid .+ 2, P, q)
+            )
+        else
+            coefs_neighbourhood = (
+                _coefs(grid .- 1, P, q),
+                _coefs(grid, P, q),
+                _coefs(grid .+ 1, P, q)
+            )
+        end
+        # Compute multipliers.
+        ∇f_magnitude_mult = sum(abs.(coefs .* grid .^ P)) / factorial(big(P))
+        f_error_mult = sum(abs.(coefs))
+        return coefs, coefs_neighbourhood, ∇f_magnitude_mult, f_error_mult
     end
 end
 
-# Estimate the bound on the derivative by amplifying the ∞-norm.
-function _make_default_bound_estimator(; condition::Real=DEFAULT_CONDITION)
-    default_bound_estimator(f, x) = condition * estimate_magitude(f, x)
-    return default_bound_estimator
-end
-
-function Base.show(io::IO, m::MIME"text/plain", x::FiniteDifferenceMethod)
+function Base.show(
+    io::IO,
+    m::MIME"text/plain",
+    x::FiniteDifferenceMethod{P, Q},
+) where {P, Q}
     @printf io "FiniteDifferenceMethod:\n"
-    @printf io "  order of method:       %d\n" length(x.grid)
-    @printf io "  order of derivative:   %d\n" x.q
+    @printf io "  order of method:       %d\n" P
+    @printf io "  order of derivative:   %d\n" Q
     @printf io "  grid:                  %s\n" x.grid
     @printf io "  coefficients:          %s\n" x.coefs
 end
@@ -245,9 +337,7 @@ end
     function estimate_step(
         m::FiniteDifferenceMethod,
         f::Function,
-        x::T;
-        factor::Real=1,
-        max_step::Real=0.1 * max(abs(x), one(x))
+        x::T
     ) where T<:AbstractFloat
 
 Estimate the step size for a finite difference method `m`. Also estimates the error of the
@@ -258,63 +348,135 @@ estimate of the derivative.
 - `f::Function`: Function to evaluate the derivative of.
 - `x::T`: Point to estimate the derivative at.
 
-# Keywords
-- `factor::Real=1`. Factor to amplify the estimated round-off error by. This can be used
-    to force a more conservative step size.
-- `max_step::Real=0.1 * max(abs(x), one(x))`: Maximum step size.
-
 # Returns
-- `Tuple{T, <:AbstractFloat}`: Estimated step size and an estimate of the error of the
-    finite difference estimate.
+- `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimated step size and an estimate of the
+    error of the finite difference estimate. The error will be `NaN` if the method failed
+    to estimate the error.
 """
 function estimate_step(
-    m::FiniteDifferenceMethod,
-    f::Function,
-    x::T;
-    factor::Real=1,
-    max_step::Real=0.1 * max(abs(x), one(x))
-) where T<:AbstractFloat
-    p = length(m.coefs)
-    q = m.q
+    m::UnadaptedFiniteDifferenceMethod, f::TF, x::T,
+) where {TF<:Function,T<:AbstractFloat}
+    step, acc = _compute_step_acc_default(m, x)
+    return _limit_step(m, x, step, acc)
+end
+function estimate_step(
+    m::AdaptedFiniteDifferenceMethod{P,Q}, f::TF, x::T,
+) where {P,Q,TF<:Function,T<:AbstractFloat}
+    ∇f_magnitude, f_magnitude = _estimate_magnitudes(m.bound_estimator, f, x)
+    if ∇f_magnitude == 0.0 || f_magnitude == 0.0
+        step, acc = _compute_step_acc_default(m, x)
+    else
+        step, acc = _compute_step_acc(m, ∇f_magnitude, eps(f_magnitude))
+    end
+    return _limit_step(m, x, step, acc)
+end
 
-    # Estimate the round-off error.
-    ε = estimate_roundoff_error(f, x) * factor
+function _estimate_magnitudes(
+    m::FiniteDifferenceMethod{P,Q}, f::TF, x::T,
+) where {P,Q,TF<:Function,T<:AbstractFloat}
+    step = first(estimate_step(m, f, x))
+    fs = _eval_function(m, f, x, step)
+    # Estimate magnitude of `∇f` in a neighbourhood of `x`.
+    ∇fs = SVector{3}(
+        _compute_estimate(m, fs, x, step, m.coefs_neighbourhood[1]),
+        _compute_estimate(m, fs, x, step, m.coefs_neighbourhood[2]),
+        _compute_estimate(m, fs, x, step, m.coefs_neighbourhood[3])
+    )
+    ∇f_magnitude = maximum(maximum.(abs, ∇fs))
+    # Estimate magnitude of `f` in a neighbourhood of `x`.
+    f_magnitude = maximum(maximum.(abs, fs))
+    return ∇f_magnitude, f_magnitude
+end
 
-    # Estimate the bound on the derivatives.
-    M = m.bound_estimator(f, x)
+function _compute_step_acc_default(m::FiniteDifferenceMethod, x::T) where {T<:AbstractFloat}
+    # Compute a default step size using a heuristic and [`DEFAULT_CONDITION`](@ref).
+    return _compute_step_acc(m, m.condition, eps(T))
+end
 
+function _compute_step_acc(
+    m::FiniteDifferenceMethod{P,Q}, ∇f_magnitude::Real, f_error::Real,
+) where {P,Q}
     # Set the step size by minimising an upper bound on the error of the estimate.
-    C₁ = ε * sum(abs, m.coefs)
-    C₂ = M * sum(n -> abs(m.coefs[n] * m.grid[n]^p), eachindex(m.coefs)) / factorial(p)
-    # Type inference fails on this, so we annotate it, which gives big performance benefits.
-    h::T = convert(T, min((q / (p - q) * (C₁ / C₂))^(1 / p), max_step))
-
+    C₁ = f_error * m.f_error_mult * m.factor
+    C₂ = ∇f_magnitude * m.∇f_magnitude_mult
+    step = (Q / (P - Q) * (C₁ / C₂))^(1 / P)
     # Estimate the accuracy of the method.
-    accuracy = h^(-q) * C₁ + h^(p - q) * C₂
+    acc = C₁ * step^(-Q) + C₂ * step^(P - Q)
+    return step, acc
+end
 
-    return h, accuracy
+function _limit_step(
+    m::FiniteDifferenceMethod, x::T, step::Real, acc::Real,
+) where {T<:AbstractFloat}
+    # First, limit the step size based on the maximum range.
+    step_max = m.max_range / maximum(abs.(m.grid))
+    if step > step_max
+        step = step_max
+        acc = NaN
+    end
+    # Second, prevent very large step sizes, which can occur for high-order methods or
+    # slowly-varying functions.
+    step_default, _ = _compute_step_acc_default(m, x)
+    step_max_default = 1000step_default
+    if step > step_max_default
+        step = step_max_default
+        acc = NaN
+    end
+    return step, acc
 end
 
 for direction in [:forward, :central, :backward]
     fdm_fun = Symbol(direction, "_fdm")
     grid_fun = Symbol("_", direction, "_grid")
-    @eval begin function $fdm_fun(
+    @eval begin
+        function $fdm_fun(
             p::Int,
             q::Int;
             adapt::Int=1,
             condition::Real=DEFAULT_CONDITION,
+            factor::Real=DEFAULT_FACTOR,
+            max_range::Real=Inf,
             geom::Bool=false
         )
             _check_p_q(p, q)
             grid = $grid_fun(p)
             geom && (grid = _exponentiate_grid(grid))
-            coefs = _coefs(grid, q)
-            return FiniteDifferenceMethod(
-                grid,
-                q,
-                coefs,
-                _make_adaptive_bound_estimator($fdm_fun, p, q, adapt, condition, geom=geom),
-            )
+            coefs, coefs_nbhd, ∇f_magnitude_mult, f_error_mult = _coefs_mults(grid, q)
+            if adapt >= 1
+                bound_estimator = $fdm_fun(
+                    # We need to increase the order by two to be able to estimate the
+                    # magnitude of the derivative of `f` in a neighbourhood of `x`.
+                    p + 2,
+                    p;
+                    adapt=adapt - 1,
+                    condition=condition,
+                    factor=factor,
+                    max_range=max_range,
+                    geom=geom
+                )
+                return AdaptedFiniteDifferenceMethod{p, q, typeof(bound_estimator)}(
+                    grid,
+                    coefs,
+                    coefs_nbhd,
+                    condition,
+                    factor,
+                    max_range,
+                    ∇f_magnitude_mult,
+                    f_error_mult,
+                    bound_estimator
+                )
+            else
+                return UnadaptedFiniteDifferenceMethod{p, q}(
+                    grid,
+                    coefs,
+                    coefs_nbhd,
+                    condition,
+                    factor,
+                    max_range,
+                    ∇f_magnitude_mult,
+                    f_error_mult
+                )
+            end
         end
 
         @doc """
@@ -323,11 +485,12 @@ for direction in [:forward, :central, :backward]
         q::Int;
         adapt::Int=1,
         condition::Real=DEFAULT_CONDITION,
+        factor::Real=DEFAULT_FACTOR,
+        max_range::Real=Inf,
         geom::Bool=false
     )
 
-Contruct a finite difference method at a $($(Meta.quot(direction))) grid of `p` linearly
-spaced points.
+Contruct a finite difference method at a $($(Meta.quot(direction))) grid of `p` points.
 
 # Arguments
 - `p::Int`: Number of grid points.
@@ -338,6 +501,9 @@ spaced points.
     `p`th order derivative, which is important for the step size computation. Recurse
     this procedure `adapt` times.
 - `condition::Real`: Condition number. See [`DEFAULT_CONDITION`](@ref).
+- `factor::Real`: Factor number. See [`DEFAULT_FACTOR`](@ref).
+- `max_range::Real=Inf`: Maximum distance that a function is evaluated from the input at
+    which the derivative is estimated.
 - `geom::Bool`: Use geometrically spaced points instead of linearly spaced points.
 
 # Returns
@@ -346,39 +512,27 @@ spaced points.
     end
 end
 
-function _make_adaptive_bound_estimator(
-    constructor::Function,
-    p::Int,
-    q::Int,
-    adapt::Int,
-    condition::Int;
-    kw_args...
-)
-    if adapt >= 1
-        estimate_derivative = constructor(
-            p + 1, p, adapt=adapt - 1, condition=condition; kw_args...
-        )
-        return (f, x) -> estimate_magitude(x′ -> estimate_derivative(f, x′), x)
-    else
-        return _make_default_bound_estimator(condition=condition)
-    end
-end
+_forward_grid(p::Int) = SVector{p}(0:(p - 1))
 
-_forward_grid(p::Int) = collect(0:(p - 1))
-
-_backward_grid(p::Int) = collect((1 - p):0)
+_backward_grid(p::Int) = SVector{p}((1 - p):0)
 
 function _central_grid(p::Int)
     if isodd(p)
-        return collect(div(1 - p, 2):div(p - 1, 2))
+        return SVector{p}(div(1 - p, 2):div(p - 1, 2))
     else
-        return vcat(div(-p, 2):-1, 1:div(p, 2))
+        return SVector{p}((div(-p, 2):-1)..., (1:div(p, 2))...)
     end
 end
 
-_exponentiate_grid(grid::Vector, base::Int=3) = sign.(grid) .* base .^ abs.(grid) ./ base
+function _exponentiate_grid(grid::SVector{P,Int}, base::Int=3) where P
+    return sign.(grid) .* div.(base .^ abs.(grid), base)
+end
 
-function _is_symmetric(vec::Vector; centre_zero::Bool=false, negate_half::Bool=false)
+function _is_symmetric(
+    vec::SVector{P};
+    centre_zero::Bool=false,
+    negate_half::Bool=false
+) where P
     half_sign = negate_half ? -1 : 1
     if isodd(length(vec))
         centre_zero && vec[end ÷ 2 + 1] != 0 && return false
@@ -398,14 +552,14 @@ end
     extrapolate_fdm(
         m::FiniteDifferenceMethod,
         f::Function,
-        x::T,
-        h::Real=0.1 * max(abs(x), one(x));
-        power=nothing,
-        breaktol=Inf,
+        x::Real,
+        initial_step::Real=10,
+        power::Int=1,
+        breaktol::Real=Inf,
         kw_args...
     ) where T<:AbstractFloat
 
-Use Richardson extrapolation to refine a finite difference method.
+Use Richardson extrapolation to extrapolate a finite difference method.
 
 Takes further in keyword arguments for `Richardson.extrapolate`. This method
 automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it defaults
@@ -414,8 +568,8 @@ automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it
 # Arguments
 - `m::FiniteDifferenceMethod`: Finite difference method to estimate the step size for.
 - `f::Function`: Function to evaluate the derivative of.
-- `x::T`: Point to estimate the derivative at.
-- `h::Real=0.1 * max(abs(x), one(x))`: Initial step size.
+- `x::Real`: Point to estimate the derivative at.
+- `initial_step::Real=10`: Initial step size.
 
 # Returns
 - `Tuple{<:AbstractFloat, <:AbstractFloat}`: Estimate of the derivative and error.
@@ -423,12 +577,18 @@ automatically sets `power = 2` if `m` is symmetric and `power = 1`. Moreover, it
 function extrapolate_fdm(
     m::FiniteDifferenceMethod,
     f::Function,
-    x::T,
-    h::Real=0.1 * max(abs(x), one(x));
+    x::Real,
+    initial_step::Real=10;
     power::Int=1,
     breaktol::Real=Inf,
     kw_args...
 ) where T<:AbstractFloat
     (power == 1 && _is_symmetric(m)) && (power = 2)
-    return extrapolate(h -> m(f, x, h), h; power=power, breaktol=breaktol, kw_args...)
+    return extrapolate(
+        step -> m(f, x, step),
+        float(initial_step);
+        power=power,
+        breaktol=breaktol,
+        kw_args...
+    )
 end
diff --git a/test/methods.jl b/test/methods.jl
index 83a1d55f..3c8b132a 100644
--- a/test/methods.jl
+++ b/test/methods.jl
@@ -1,70 +1,80 @@
-import FiniteDifferences: estimate_magitude, estimate_roundoff_error
-
 @testset "Methods" begin
-    @testset "estimate_magitude" begin
-        f64(x::Float64) = x
-        f64_int(x::Float64) = Int(10x)
-        @test estimate_magitude(f64, 0.0) === 0.1
-        @test estimate_magitude(f64, 1.0) === 1.0
-        @test estimate_magitude(f64_int, 0.0) === 1.0
-        @test estimate_magitude(f64_int, 1.0) === 10.0
-
-        f32(x::Float32) = x
-        f32_int(x::Float32) = Int(10 * x)
-        @test estimate_magitude(f32, 0f0) === 0.1f0
-        @test estimate_magitude(f32, 1f0) === 1f0
-        # In this case, the `Int` is converted with `float`, so we expect a `Float64`.
-        @test estimate_magitude(f32_int, 0f0) === 1.0
-        @test estimate_magitude(f32_int, 1f0) === 10.0
-    end
-
-    @testset "estimate_roundoff_error" begin
-        # `Float64`s:
-        @test estimate_roundoff_error(identity, 1.0) == eps(1.0)
-        #   Pertubation from `estimate_magitude`:
-        @test estimate_roundoff_error(identity, 0.0) == eps(0.1)
+    @testset "Correctness" begin
+        # Finite difference methods to test.
+        methods = [forward_fdm, backward_fdm, central_fdm]
+
+        # The different floating point types to try.
+        types = [Float32, Float64]
+
+        # The different functions to evaluate (`.f`), their first derivative at 1 (`.d1`),
+        # and their second derivative at 1 (`.d2`).
+        fs = [
+            (f=sin, d1=cos(1), d2=-sin(1), only_forward=false),
+            (f=exp, d1=exp(1), d2=exp(1), only_forward=false),
+            (f=abs2, d1=2, d2=2, only_forward=false),
+            (f=x -> sqrt(x + 1), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2), only_forward=true),
+        ]
+
+        # Test all combinations of the above settings, i.e. differentiate all functions
+        # using all methods and data types.
+        @testset "f=$(f.f), method=$m, type=$(T)" for f in fs, m in methods, T in types
+            # Check if only `forward_fdm` is allowed.
+            f.only_forward && m != forward_fdm && continue
+
+            @testset "method-order=$order" for order in [1, 2, 3, 4, 5]
+                @test m(order, 0; adapt=2)(f.f, T(1)) isa T
+                @test m(order, 0; adapt=2)(f.f, T(1)) == T(f.f(1))
+            end
 
-        # `Float32`s:
-        @test estimate_roundoff_error(identity, 1f0) == eps(1f0)
-        #   Pertubation from `estimate_magitude`:
-        @test estimate_roundoff_error(identity, 0.0f0) == eps(0.1f0)
+            @test m(10, 1)(f.f, T(1)) isa T
+            @test m(10, 1)(f.f, T(1)) ≈ T(f.d1)
 
-        # Test lower bound of `eps(T) / 1000`.
-        @test estimate_roundoff_error(x -> 1e-100, 0.0) == eps(1.0) / 1000
-        @test estimate_roundoff_error(x -> 1f-100, 0f0) == eps(1f0) / 1000
+            @test m(10, 2)(f.f, T(1)) isa T
+            T == Float64 && @test m(10, 2)(f.f, T(1)) ≈ T(f.d2)
+        end
     end
 
-    # The different approaches to approximating the gradient to try.
-    methods = [forward_fdm, backward_fdm, central_fdm]
-
-    # The different floating point types to try and the associated required relative
-    # tolerance.
-    types = [Float32, Float64]
-
-    # The different functions to evaluate (.f), their first derivative at 1 (.d1),
-    # and second derivative at 1 (.d2).
-    foos = [
-        (f=sin, d1=cos(1), d2=-sin(1)),
-        (f=exp, d1=exp(1), d2=exp(1)),
-        (f=abs2, d1=2, d2=2),
-        (f=x -> sqrt(x + 1), d1=0.5 / sqrt(2), d2=-0.25 / 2^(3/2)),
-    ]
-
-    # Test all combinations of the above settings, i.e. differentiate all functions using
-    # all methods and data types.
-    @testset "foo=$(foo.f), method=$m, type=$(T)" for foo in foos, m in methods, T in types
-        @testset "method-order=$order" for order in [1, 2, 3, 4, 5]
-            @test m(order, 0, adapt=2)(foo.f, T(1)) isa T
-            @test m(order, 0, adapt=2)(foo.f, T(1)) == T(foo.f(1))
+    @testset "Accuracy" begin
+        # Finite difference methods to test.
+        methods = [forward_fdm, backward_fdm, central_fdm]
+
+        # `f`s, `x`s, the derivatives of `f` at `x`, and a factor that loosens tolerances.
+        fs = [
+            # See #124.
+            (f=x -> 0, x=0, d=0,           atol=0,     atol_central=0),
+            (f=x -> x, x=0, d=1,           atol=5e-15, atol_central=5e-16),
+            (f=exp, x=0, d=1,              atol=5e-13, atol_central=5e-14),
+            (f=sin, x=0, d=1,              atol=1e-14, atol_central=5e-16),
+            (f=cos, x=0, d=0,              atol=5e-14, atol_central=5e-14),
+            (f=exp, x=1, d=exp(1),         atol=5e-12, atol_central=5e-13),
+            (f=sin, x=1, d=cos(1),         atol=5e-13, atol_central=5e-14),
+            (f=cos, x=1, d=-sin(1),        atol=5e-13, atol_central=5e-14),
+            (f=sinc, x=0, d=0,             atol=5e-12, atol_central=5e-14),
+            # See #125. The tolerances are lower because `cosc` is hard.
+            (f=cosc, x=0, d=-(pi ^ 2) / 3, atol=5e-9,  atol_central=5e-10)
+        ]
+        @testset "f=$(f.f), method=$m" for f in fs, m in methods
+            atol = m == central_fdm ? f.atol_central : f.atol
+            @test m(6, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol
+            @test m(7, 1)(f.f, f.x) ≈ f.d rtol=0 atol=atol
+            if m == central_fdm
+                if f.f == cosc
+                    # `cosc` is hard.
+                    @test m(14, 1)(f.f, f.x) ≈ f.d atol=1e-13
+                else
+                    @test m(14, 1)(f.f, f.x) ≈ f.d atol=5e-15
+                end
+            end
         end
+    end
 
-        @test m(10, 1)(foo.f, T(1)) isa T
-        @test m(10, 1)(foo.f, T(1)) ≈ T(foo.d1)
+    @testset "Test custom grid" begin
+        @test FiniteDifferenceMethod([-2, 0, 5], 1)(sin, 1) ≈ cos(1)
+    end
 
-        @test m(10, 2)(foo.f, T(1)) isa T
-        if T == Float64
-            @test m(10, 2)(foo.f, T(1)) ≈ T(foo.d2)
-        end
+    @testset "Test allocations" begin
+        m = central_fdm(5, 2, adapt=2)
+        @test @ballocated($m(sin, 1)) == 0
     end
 
     # Integration test to ensure that Integer-output functions can be tested.
@@ -73,21 +83,20 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error
     end
 
     @testset "Adaptation improves estimate" begin
-        @test forward_fdm(5, 1, adapt=0)(log, 0.001) ≈ 997.077814
+        @test abs(forward_fdm(5, 1, adapt=0)(log, 0.001) - 1000) > 1
         @test forward_fdm(5, 1, adapt=1)(log, 0.001) ≈ 1000
     end
 
     @testset "Limiting step size" begin
-        @test !isfinite(central_fdm(5, 1)(abs, 0.001, max_step=0))
+        @test !isfinite(central_fdm(5, 1, max_range=0)(abs, 0.001))
+        @test central_fdm(10, 1, max_range=9e-4)(log, 1e-3) ≈ 1000
         @test central_fdm(5, 1)(abs, 0.001) ≈ 1.0
     end
 
-    @testset "Accuracy at high orders, with high adapt" begin
-        # Regression test against issues with precision during computation of _coeffs
-        # see https://github.com/JuliaDiff/FiniteDifferences.jl/issues/64
-
-        @test central_fdm(9, 5, adapt=4, condition=1)(exp, 1.0) ≈ exp(1) atol=1e-7
-
+    @testset "Accuracy at high orders and high adaptation (issue #64)" begin
+        # Regression test against issues with precision during computation of coefficients.
+        @test central_fdm(9, 5, adapt=4)(exp, 1.0) ≈ exp(1) atol=2e-7
+        @test central_fdm(15, 5, adapt=2)(exp, 1.0) ≈ exp(1) atol=1e-10
         poly(x) = 4x^3 + 3x^2 + 2x + 1
         @test central_fdm(9, 3, adapt=4)(poly, 1.0) ≈ 24 atol=1e-11
     end
@@ -104,27 +113,27 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error
 
     @testset "_is_symmetric" begin
         # Test odd grids:
-        @test FiniteDifferences._is_symmetric([2, 1, 0, 1, 2])
-        @test !FiniteDifferences._is_symmetric([2, 1, 0, 3, 2])
-        @test !FiniteDifferences._is_symmetric([4, 1, 0, 1, 2])
+        @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, 1, 2))
+        @test !FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, 3, 2))
+        @test !FiniteDifferences._is_symmetric(SVector{5}(4, 1, 0, 1, 2))
 
         # Test even grids:
-        @test FiniteDifferences._is_symmetric([2, 1, 1, 2])
-        @test !FiniteDifferences._is_symmetric([2, 1, 3, 2])
-        @test !FiniteDifferences._is_symmetric([4, 1, 1, 2])
+        @test FiniteDifferences._is_symmetric(SVector{4}(2, 1, 1, 2))
+        @test !FiniteDifferences._is_symmetric(SVector{4}(2, 1, 3, 2))
+        @test !FiniteDifferences._is_symmetric(SVector{4}(4, 1, 1, 2))
 
         # Test zero at centre:
-        @test FiniteDifferences._is_symmetric([2, 1, 4, 1, 2])
-        @test !FiniteDifferences._is_symmetric([2, 1, 4, 1, 2], centre_zero=true)
-        @test FiniteDifferences._is_symmetric([2, 1, 1, 2], centre_zero=true)
+        @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, 1, 2))
+        @test !FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, 1, 2), centre_zero=true)
+        @test FiniteDifferences._is_symmetric(SVector{4}(2, 1, 1, 2), centre_zero=true)
 
         # Test negation of a half:
-        @test !FiniteDifferences._is_symmetric([2, 1, -1, -2])
-        @test FiniteDifferences._is_symmetric([2, 1, -1, -2], negate_half=true)
-        @test FiniteDifferences._is_symmetric([2, 1, 0, -1, -2], negate_half=true)
-        @test FiniteDifferences._is_symmetric([2, 1, 4, -1, -2], negate_half=true)
+        @test !FiniteDifferences._is_symmetric(SVector{4}(2, 1, -1, -2))
+        @test FiniteDifferences._is_symmetric(SVector{4}(2, 1, -1, -2), negate_half=true)
+        @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 0, -1, -2), negate_half=true)
+        @test FiniteDifferences._is_symmetric(SVector{5}(2, 1, 4, -1, -2), negate_half=true)
         @test !FiniteDifferences._is_symmetric(
-            [2, 1, 4, -1, -2],
+            SVector{5}(2, 1, 4, -1, -2);
             negate_half=true,
             centre_zero=true
         )
@@ -153,13 +162,4 @@ import FiniteDifferences: estimate_magitude, estimate_roundoff_error
             end
         end
     end
-
-    @testset "Derivative of cosc at 0 (#124)" begin
-        @test central_fdm(5, 1)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=1e-9
-        @test central_fdm(10, 1, adapt=3)(cosc, 0) ≈ -(pi ^ 2) / 3 atol=5e-14
-    end
-
-    @testset "Derivative of a constant (#125)" begin
-        @test central_fdm(2, 1)(x -> 0, 0) ≈ 0 atol=1e-10
-    end
 end
diff --git a/test/runtests.jl b/test/runtests.jl
index b994063a..9caa6b75 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -1,3 +1,4 @@
+using BenchmarkTools
 using ChainRulesCore
 using FiniteDifferences
 using LinearAlgebra