Skip to content

Changed name of parameter 'Evals set', with added documentation #391

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/src/manual.md
Original file line number Diff line number Diff line change
@@ -88,6 +88,7 @@ You can pass the following keyword arguments to `@benchmark`, `@benchmarkable`,
- `samples`: The number of samples to take. Execution will end if this many samples have been collected. Defaults to `BenchmarkTools.DEFAULT_PARAMETERS.samples = 10000`.
- `seconds`: The number of seconds budgeted for the benchmarking process. The trial will terminate if this time is exceeded (regardless of `samples`), but at least one sample will always be taken. In practice, actual runtime can overshoot the budget by the duration of a sample. Defaults to `BenchmarkTools.DEFAULT_PARAMETERS.seconds = 5`.
- `evals`: The number of evaluations per sample. For best results, this should be kept consistent between trials. A good guess for this value can be automatically set on a benchmark via `tune!`, but using `tune!` can be less consistent than setting `evals` manually (which bypasses tuning). Defaults to `BenchmarkTools.DEFAULT_PARAMETERS.evals = 1`. If the function you study mutates its input, it is probably a good idea to set `evals=1` manually.
- `needs_tune`: If `true`, executing this benchmark's trial automatically invokes tuning for number of evaluations irrespective of the evals parameter. Defaults to `BenchmarkTools.DEFAULT_PARAMETERS.needs_tune = true`.
- `overhead`: The estimated loop overhead per evaluation in nanoseconds, which is automatically subtracted from every sample time measurement. The default value is `BenchmarkTools.DEFAULT_PARAMETERS.overhead = 0`. `BenchmarkTools.estimate_overhead` can be called to determine this value empirically (which can then be set as the default value, if you want).
- `gctrial`: If `true`, run `gc()` before executing this benchmark's trial. Defaults to `BenchmarkTools.DEFAULT_PARAMETERS.gctrial = true`.
- `gcsample`: If `true`, run `gc()` before each sample. Defaults to `BenchmarkTools.DEFAULT_PARAMETERS.gcsample = false`.
4 changes: 2 additions & 2 deletions src/execution.jl
Original file line number Diff line number Diff line change
@@ -295,7 +295,7 @@ function tune!(
pad="",
kwargs...,
)
if !p.evals_set
if !p.needs_tune
estimate = ceil(Int, minimum(lineartrial(b, p; kwargs...)))
b.params.evals = guessevals(estimate)
end
@@ -318,7 +318,7 @@ function prunekwargs(args...)
if isa(ex, Expr) && ex.head == :(=)
ex.head = :kw
if ex.args[1] == :evals
push!(params, :(evals_set = true))
push!(params, :(needs_tune = true))
end
end
end
8 changes: 4 additions & 4 deletions src/parameters.jl
Original file line number Diff line number Diff line change
@@ -9,7 +9,7 @@ mutable struct Parameters
seconds::Float64
samples::Int
evals::Int
evals_set::Bool
needs_tune::Bool
overhead::Float64
gctrial::Bool
gcsample::Bool
@@ -23,7 +23,7 @@ function Parameters(;
seconds=DEFAULT_PARAMETERS.seconds,
samples=DEFAULT_PARAMETERS.samples,
evals=DEFAULT_PARAMETERS.evals,
evals_set=DEFAULT_PARAMETERS.evals_set,
needs_tune=DEFAULT_PARAMETERS.needs_tune,
overhead=DEFAULT_PARAMETERS.overhead,
gctrial=DEFAULT_PARAMETERS.gctrial,
gcsample=DEFAULT_PARAMETERS.gcsample,
@@ -34,7 +34,7 @@ function Parameters(;
seconds,
samples,
evals,
evals_set,
needs_tune,
overhead,
gctrial,
gcsample,
@@ -84,7 +84,7 @@ function Base.copy(p::Parameters)
p.seconds,
p.samples,
p.evals,
p.evals_set,
p.needs_tune,
p.overhead,
p.gctrial,
p.gcsample,
2 changes: 1 addition & 1 deletion src/serialization.jl
Original file line number Diff line number Diff line change
@@ -53,7 +53,7 @@ function recover(x::Vector)
if ft <: get(SUPPORTED_TYPES, nameof(ft), Union{})
xsi = recover(fields[fn])
else
xsi = if fn == "evals_set" && !haskey(fields, fn)
xsi = if fn == "needs_tune" && !haskey(fields, fn)
false
elseif fn in ("seconds", "overhead", "time_tolerance", "memory_tolerance") &&
fields[fn] === nothing
4 changes: 2 additions & 2 deletions test/SerializationTests.jl
Original file line number Diff line number Diff line change
@@ -99,14 +99,14 @@ end
@test_throws ArgumentError BenchmarkTools.recover([1])
end

@testset "Backwards Comppatibility with evals_set" begin
@testset "Backwards Comppatibility with needs_tune" begin
json_string = "[{\"Julia\":\"1.11.0-DEV.1116\",\"BenchmarkTools\":\"1.4.0\"},[[\"Parameters\",{\"gctrial\":true,\"time_tolerance\":0.05,\"samples\":10000,\"evals\":1,\"gcsample\":false,\"seconds\":5.0,\"overhead\":0.0,\"memory_tolerance\":0.01}]]]"
json_io = IOBuffer(json_string)

@test BenchmarkTools.load(json_io) ==
[BenchmarkTools.Parameters(5.0, 10000, 1, false, 0.0, true, false, 0.05, 0.01)]

json_string = "[{\"Julia\":\"1.11.0-DEV.1116\",\"BenchmarkTools\":\"1.4.0\"},[[\"Parameters\",{\"gctrial\":true,\"time_tolerance\":0.05,\"evals_set\":true,\"samples\":10000,\"evals\":1,\"gcsample\":false,\"seconds\":5.0,\"overhead\":0.0,\"memory_tolerance\":0.01}]]]"
json_string = "[{\"Julia\":\"1.11.0-DEV.1116\",\"BenchmarkTools\":\"1.4.0\"},[[\"Parameters\",{\"gctrial\":true,\"time_tolerance\":0.05,\"needs_tune\":true,\"samples\":10000,\"evals\":1,\"gcsample\":false,\"seconds\":5.0,\"overhead\":0.0,\"memory_tolerance\":0.01}]]]"
json_io = IOBuffer(json_string)

@test BenchmarkTools.load(json_io) ==
Loading