From 0b2279f301772bbb3651bfd14f56e92090af59e0 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Tue, 23 Apr 2024 10:03:29 +0100 Subject: [PATCH 01/31] moved new Gibbs tests all into a single block --- test/experimental/gibbs.jl | 93 +++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 47 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 29713d2d55..4101788db5 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -24,60 +24,59 @@ const DEMO_MODELS_WITHOUT_DOT_ASSUME = Union{ has_dot_assume(::DEMO_MODELS_WITHOUT_DOT_ASSUME) = false has_dot_assume(::Model) = true -# Likely an issue with not linking correctly. -@testset "Demo models" begin - @testset "$(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS - vns = DynamicPPL.TestUtils.varnames(model) - # Run one sampler on variables starting with `s` and another on variables starting with `m`. - vns_s = filter(vns) do vn - DynamicPPL.getsym(vn) == :s - end - vns_m = filter(vns) do vn - DynamicPPL.getsym(vn) == :m - end +@testset "Gibbs using `condition`" begin + @testset "Demo models" begin + @testset "$(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS + vns = DynamicPPL.TestUtils.varnames(model) + # Run one sampler on variables starting with `s` and another on variables starting with `m`. + vns_s = filter(vns) do vn + DynamicPPL.getsym(vn) == :s + end + vns_m = filter(vns) do vn + DynamicPPL.getsym(vn) == :m + end - samplers = [ - Turing.Experimental.Gibbs( - vns_s => NUTS(), - vns_m => NUTS(), - ), - Turing.Experimental.Gibbs( - vns_s => NUTS(), - vns_m => HMC(0.01, 4), - ) - ] - - if !has_dot_assume(model) - # Add in some MH samplers, which are not compatible with `.~`. - append!( - samplers, - [ - Turing.Experimental.Gibbs( - vns_s => HMC(0.01, 4), - vns_m => MH(), - ), - Turing.Experimental.Gibbs( - vns_s => MH(), - vns_m => HMC(0.01, 4), - ) - ] - ) - end + samplers = [ + Turing.Experimental.Gibbs( + vns_s => NUTS(), + vns_m => NUTS(), + ), + Turing.Experimental.Gibbs( + vns_s => NUTS(), + vns_m => HMC(0.01, 4), + ) + ] + + if !has_dot_assume(model) + # Add in some MH samplers, which are not compatible with `.~`. + append!( + samplers, + [ + Turing.Experimental.Gibbs( + vns_s => HMC(0.01, 4), + vns_m => MH(), + ), + Turing.Experimental.Gibbs( + vns_s => MH(), + vns_m => HMC(0.01, 4), + ) + ] + ) + end - @testset "$sampler" for sampler in samplers - # Check that taking steps performs as expected. - rng = Random.default_rng() - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler)) - check_transition_varnames(transition, vns) - for _ = 1:5 - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler), state) + @testset "$sampler" for sampler in samplers + # Check that taking steps performs as expected. + rng = Random.default_rng() + transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler)) check_transition_varnames(transition, vns) + for _ = 1:5 + transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler), state) + check_transition_varnames(transition, vns) + end end end end -end -@testset "Gibbs using `condition`" begin @testset "demo_assume_dot_observe" begin model = DynamicPPL.TestUtils.demo_assume_dot_observe() From dcad5482db60ec1e3c2bb459db541ff97cf632f5 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Tue, 23 Apr 2024 10:30:22 +0100 Subject: [PATCH 02/31] initial work on making Gibbs work with `externalsampler` --- src/experimental/gibbs.jl | 39 +++++++++++++++++++++++-------- src/mcmc/Inference.jl | 2 ++ src/mcmc/abstractmcmc.jl | 49 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 10 deletions(-) diff --git a/src/experimental/gibbs.jl b/src/experimental/gibbs.jl index 1e51cc9f11..6ce7bf4033 100644 --- a/src/experimental/gibbs.jl +++ b/src/experimental/gibbs.jl @@ -238,6 +238,9 @@ function Gibbs(algs::Pair...) return Gibbs(map(first, algs), map(wrap_algorithm_maybe, map(last, algs))) end +# TODO: Remove when no longer needed. +DynamicPPL.getspace(::Gibbs) = () + struct GibbsState{V<:DynamicPPL.AbstractVarInfo,S} vi::V states::S @@ -252,6 +255,7 @@ function DynamicPPL.initialstep( model::DynamicPPL.Model, spl::DynamicPPL.Sampler{<:Gibbs}, vi_base::DynamicPPL.AbstractVarInfo; + initial_params=nothing, kwargs..., ) alg = spl.alg @@ -260,15 +264,35 @@ function DynamicPPL.initialstep( # 1. Run the model once to get the varnames present + initial values to condition on. vi_base = DynamicPPL.VarInfo(model) + + # Simple way of setting the initial parameters: set them in the `vi_base` + # if they are given so they propagate to the subset varinfos used by each sampler. + if initial_params !== nothing + vi_base = DynamicPPL.unflatten(vi_base, initial_params) + end + + # Create the varinfos for each sampler. varinfos = map(Base.Fix1(DynamicPPL.subset, vi_base) ∘ _maybevec, varnames) + initial_params_all = if initial_params === nothing + fill(nothing, length(varnames)) + else + # Extract from the `vi_base`, which should have the values set correctly from above. + map(vi -> vi[:], varinfos) + end # 2. Construct a varinfo for every vn + sampler combo. - states_and_varinfos = map(samplers, varinfos) do sampler_local, varinfo_local + states_and_varinfos = map(samplers, varinfos, initial_params_all) do sampler_local, varinfo_local, initial_params_local # Construct the conditional model. model_local = make_conditional(model, varinfo_local, varinfos) # Take initial step. - new_state_local = last(AbstractMCMC.step(rng, model_local, sampler_local; kwargs...)) + new_state_local = last(AbstractMCMC.step( + rng, model_local, sampler_local; + # FIXME: This will cause issues if the sampler expects initial params in unconstrained space. + # This is not the case for any samplers in Turing.jl, but will be for external samplers, etc. + initial_params=initial_params_local, + kwargs... + )) # Return the new state and the invlinked `varinfo`. vi_local_state = Turing.Inference.varinfo(new_state_local) @@ -365,12 +389,7 @@ function gibbs_requires_recompute_logprob(model_dst, sampler_dst, sampler_src, s end # TODO: Remove `rng`? -""" - recompute_logprob!!(rng, model, sampler, state) - -Recompute the log-probability of the `model` based on the given `state` and return the resulting state. -""" -function recompute_logprob!!( +function Turing.Inference.recompute_logprob!!( rng::Random.AbstractRNG, model::DynamicPPL.Model, sampler::DynamicPPL.Sampler, @@ -436,7 +455,7 @@ function gibbs_step_inner( state_local, state_previous ) - current_state_local = recompute_logprob!!( + state_local = Turing.Inference.recompute_logprob!!( rng, model_local, sampler_local, @@ -450,7 +469,7 @@ function gibbs_step_inner( rng, model_local, sampler_local, - current_state_local; + state_local; kwargs..., ), ) diff --git a/src/mcmc/Inference.jl b/src/mcmc/Inference.jl index b990dac67c..2c0d1a3970 100644 --- a/src/mcmc/Inference.jl +++ b/src/mcmc/Inference.jl @@ -89,6 +89,8 @@ struct ExternalSampler{S<:AbstractSampler} <: InferenceAlgorithm sampler::S end +DynamicPPL.getspace(::ExternalSampler) = () + """ externalsampler(sampler::AbstractSampler) diff --git a/src/mcmc/abstractmcmc.jl b/src/mcmc/abstractmcmc.jl index 00be16e3b8..afc070ac55 100644 --- a/src/mcmc/abstractmcmc.jl +++ b/src/mcmc/abstractmcmc.jl @@ -12,10 +12,19 @@ function transition_to_turing(f::DynamicPPL.LogDensityFunction, transition) return Transition(f.model, varinfo, transition) end +function varinfo(state::TuringState) + θ = getparams(state.logdensity.model, state.state) + # TODO: Do we need to link here first? + return DynamicPPL.unflatten(state.logdensity.varinfo, θ) +end + # NOTE: Only thing that depends on the underlying sampler. # Something similar should be part of AbstractMCMC at some point: # https://github.com/TuringLang/AbstractMCMC.jl/pull/86 getparams(::DynamicPPL.Model, transition::AdvancedHMC.Transition) = transition.z.θ +function getparams(model::DynamicPPL.Model, state::AdvancedHMC.HMCState) + return getparams(model, state.transition) +end getstats(transition::AdvancedHMC.Transition) = transition.stat getparams(::DynamicPPL.Model, transition::AdvancedMH.Transition) = transition.params @@ -26,6 +35,46 @@ getvarinfo(f::LogDensityProblemsAD.ADGradientWrapper) = getvarinfo(parent(f)) setvarinfo(f::DynamicPPL.LogDensityFunction, varinfo) = Setfield.@set f.varinfo = varinfo setvarinfo(f::LogDensityProblemsAD.ADGradientWrapper, varinfo) = setvarinfo(parent(f), varinfo) +""" + recompute_logprob!!(rng, model, sampler, state) + +Recompute the log-probability of the `model` based on the given `state` and return the resulting state. +""" +function recompute_logprob!!( + rng::Random.AbstractRNG, + model::DynamicPPL.Model, + sampler::DynamicPPL.Sampler{<:ExternalSampler}, + state +) + # Re-using the log-density function from the `state` and updating only the `model` field. + f = state.logdensity + f = Setfield.@set f.model = model + # Recompute the log-probability with the new `model`. + state_inner = recompute_logprob!!( + rng, + AbstractMCMC.LogDensityModel(f), + sampler.alg.sampler, + state.state + ) + return state_to_turing(f, state_inner) +end + +function recompute_logprob!!( + rng::Random.AbstractRNG, + model::AbstractMCMC.LogDensityModel, + sampler::AdvancedHMC.AbstractHMCSampler, + state::AdvancedHMC.HMCState +) + # Construct hamiltionian. + hamiltonian = AdvancedHMC.Hamiltonian(state.metric, model) + # Re-compute the log-probability and gradient. + return Setfield.@set state.transition.z = AdvancedHMC.phasepoint( + hamiltonian, + state.transition.z.θ, + state.transition.z.r, + ) +end + # TODO: Do we also support `resume`, etc? function AbstractMCMC.step( rng::Random.AbstractRNG, From 4a609cb2f8570eafdc79bf1478eabe0ca572fde6 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Sat, 18 May 2024 19:36:58 +0200 Subject: [PATCH 03/31] removed references to Setfield.jl --- src/mcmc/abstractmcmc.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mcmc/abstractmcmc.jl b/src/mcmc/abstractmcmc.jl index 1e33c9fceb..81cfee9a9f 100644 --- a/src/mcmc/abstractmcmc.jl +++ b/src/mcmc/abstractmcmc.jl @@ -55,7 +55,7 @@ function recompute_logprob!!( ) # Re-using the log-density function from the `state` and updating only the `model` field. f = state.logdensity - f = Setfield.@set f.model = model + f = Accessors.@set f.model = model # Recompute the log-probability with the new `model`. state_inner = recompute_logprob!!( rng, @@ -75,7 +75,7 @@ function recompute_logprob!!( # Construct hamiltionian. hamiltonian = AdvancedHMC.Hamiltonian(state.metric, model) # Re-compute the log-probability and gradient. - return Setfield.@set state.transition.z = AdvancedHMC.phasepoint( + return Accessors.@set state.transition.z = AdvancedHMC.phasepoint( hamiltonian, state.transition.z.θ, state.transition.z.r, From fc21894ccdc139ed3afbe2c40147b330ef6e5eb4 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Sat, 18 May 2024 21:57:11 +0100 Subject: [PATCH 04/31] fixed crucial bug in experimental Gibbs sampler --- src/experimental/gibbs.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/experimental/gibbs.jl b/src/experimental/gibbs.jl index dce3c64c2b..f7d8fd848a 100644 --- a/src/experimental/gibbs.jl +++ b/src/experimental/gibbs.jl @@ -78,7 +78,7 @@ function DynamicPPL.dot_tilde_assume(context::GibbsContext, right, left, vns, vi # Short-circuits the tilde assume if `vn` is present in `context`. if has_conditioned_gibbs(context, vns) value = reconstruct_getvalue(right, get_conditioned_gibbs(context, vns)) - return value, broadcast_logpdf(right, values), vi + return value, broadcast_logpdf(right, value), vi end # Otherwise, falls back to the default behavior. @@ -90,8 +90,8 @@ function DynamicPPL.dot_tilde_assume( ) # Short-circuits the tilde assume if `vn` is present in `context`. if has_conditioned_gibbs(context, vns) - values = reconstruct_getvalue(right, get_conditioned_gibbs(context, vns)) - return values, broadcast_logpdf(right, values), vi + value = reconstruct_getvalue(right, get_conditioned_gibbs(context, vns)) + return value, broadcast_logpdf(right, value), vi end # Otherwise, falls back to the default behavior. @@ -144,14 +144,14 @@ end Return a `GibbsContext` with the values extracted from the given `varinfos` treated as conditioned. """ function condition_gibbs(context::DynamicPPL.AbstractContext, varinfo::DynamicPPL.AbstractVarInfo) - return DynamicPPL.condition(context, DynamicPPL.values_as(varinfo, preferred_value_type(varinfo))) + return condition_gibbs(context, DynamicPPL.values_as(varinfo, preferred_value_type(varinfo))) end -function DynamicPPL.condition( +function condition_gibbs( context::DynamicPPL.AbstractContext, varinfo::DynamicPPL.AbstractVarInfo, varinfos::DynamicPPL.AbstractVarInfo... ) - return DynamicPPL.condition(DynamicPPL.condition(context, varinfo), varinfos...) + return condition_gibbs(condition_gibbs(context, varinfo), varinfos...) end # Allow calling this on a `DynamicPPL.Model` directly. function condition_gibbs(model::DynamicPPL.Model, values...) From 99109623bbf74e4ecec4127f2fdaab115bf9923a Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Sat, 18 May 2024 22:35:13 +0100 Subject: [PATCH 05/31] added ground-truth comparison for Gibbs sampler on demo models --- test/experimental/gibbs.jl | 61 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index a6a86095c6..c973a149b2 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -74,6 +74,67 @@ has_dot_assume(::Model) = true check_transition_varnames(transition, vns) end end + + @testset "comparison with (old) Gibbs" begin + # We will run `num_iterations` and then subsample `num_samples` from the chain. + # All the chains should converge easily, so by subsampling to a much lower + # number of samples, we should be left with something we can compare easily to + # "ground truth" samples (using NUTS here, but should implement exact sampling). + num_samples = 1_000 + num_iterations = 5_000 + num_chains = 4 + + # Determine initial parameters to make comparison as fair as possible. + posterior_mean = DynamicPPL.TestUtils.posterior_mean(model) + initial_params = DynamicPPL.TestUtils.update_values!!( + DynamicPPL.VarInfo(model), + posterior_mean, + DynamicPPL.TestUtils.varnames(model), + )[:] + initial_params = fill(initial_params, num_chains) + + # Sampler to use for Gibbs components. + sampler_inner = HMC(0.1, 32) + sampler = Turing.Experimental.Gibbs( + vns_s => sampler_inner, + vns_m => sampler_inner, + ) + chain = sample( + sample( + model, + sampler, + MCMCThreads(), + num_iterations, + num_chains; + progress = false, + initial_params = initial_params, + discard_initial = 1_000, + ), + num_samples, + ) + + # "Ground truth" samples. + # TODO: Replace with closed-form sampling once that is implemented in DynamicPPL. + chain_true = sample( + sample( + model, + NUTS(), + MCMCThreads(), + num_iterations, + num_chains; + progress = false, + initial_params = initial_params, + ), + num_samples, + ) + + # Perform KS test to ensure that the chains are similar. + xs = Array(chain) + xs_true = Array(chain_true) + for i = 1:size(xs, 2) + @test ks_test(xs[:, i], xs_true[:, i]) + end + end end end From b3a4692cd763fe965e3957a782f432fb44f1cdc7 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Sat, 18 May 2024 22:36:14 +0100 Subject: [PATCH 06/31] added convenience method for performing two sample KS test --- test/test_utils/numerical_tests.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 333a3f14aa..f77b9c7e59 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -71,3 +71,7 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) [1.0, 1.0, 2.0, 2.0, 1.0, 4.0], atol=atol, rtol=rtol) end + +function ks_test(xs_left, xs_right; pval = 0.01) + return pvalue(ApproximateTwoSampleKSTest(xs_left, xs_right)) > pval +end From 3e17efca6a8a2fc0f318a7e1c395c833a0a6fbd5 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 20 May 2024 09:56:29 +0100 Subject: [PATCH 07/31] use thinning to avoid OOM issues --- test/experimental/gibbs.jl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index c973a149b2..89f0d467b7 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -80,8 +80,9 @@ has_dot_assume(::Model) = true # All the chains should converge easily, so by subsampling to a much lower # number of samples, we should be left with something we can compare easily to # "ground truth" samples (using NUTS here, but should implement exact sampling). - num_samples = 1_000 - num_iterations = 5_000 + num_samples = 500 + num_iterations = 1_000 + thinning = 10 num_chains = 4 # Determine initial parameters to make comparison as fair as possible. @@ -109,6 +110,7 @@ has_dot_assume(::Model) = true progress = false, initial_params = initial_params, discard_initial = 1_000, + thinning = thinning ), num_samples, ) @@ -124,6 +126,7 @@ has_dot_assume(::Model) = true num_chains; progress = false, initial_params = initial_params, + thinning = thinning, ), num_samples, ) From 429fc8f64e3ce8fd42ab61bad5296436d7ebe306 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 20 May 2024 11:33:05 +0100 Subject: [PATCH 08/31] removed incredibly slow testset that didn't really add much --- test/experimental/gibbs.jl | 13 +++---------- test/test_utils/numerical_tests.jl | 5 +++-- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 89f0d467b7..bd604eb52d 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -135,7 +135,7 @@ has_dot_assume(::Model) = true xs = Array(chain) xs_true = Array(chain_true) for i = 1:size(xs, 2) - @test ks_test(xs[:, i], xs_true[:, i]) + @test two_sample_ks_test(xs[:, i], xs_true[:, i]; pval=1e-3) end end end @@ -165,13 +165,6 @@ has_dot_assume(::Model) = true end end - @testset "gdemo with CSMC & ESS" begin - Random.seed!(100) - alg = Turing.Experimental.Gibbs(@varname(s) => CSMC(15), @varname(m) => ESS()) - chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_gdemo(chain) - end - @testset "multiple varnames" begin rng = Random.default_rng() @@ -224,7 +217,7 @@ has_dot_assume(::Model) = true end # Sample! - chain = sample(MoGtest_default, alg, 1000; progress=true) + chain = sample(MoGtest_default, alg, 1000; progress=false) check_MoGtest_default(chain, atol = 0.2) end @@ -246,7 +239,7 @@ has_dot_assume(::Model) = true end # Sample! - chain = sample(model, alg, 1000; progress=true) + chain = sample(model, alg, 1000; progress=false) check_MoGtest_default_z_vector(chain, atol = 0.2) end end diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index f77b9c7e59..041051becc 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -72,6 +72,7 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) atol=atol, rtol=rtol) end -function ks_test(xs_left, xs_right; pval = 0.01) - return pvalue(ApproximateTwoSampleKSTest(xs_left, xs_right)) > pval +function two_sample_ks_test(xs_left, xs_right; pval = 0.01) + t = ApproximateTwoSampleKSTest(xs_left, xs_right) + return pvalue(t) > pval end From f6af20e12a0e1dea4a25a3d9c579e018730ffd8a Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 20 May 2024 11:36:23 +0100 Subject: [PATCH 09/31] removed now-redundant testset --- test/experimental/gibbs.jl | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index bd604eb52d..dabaab34c8 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -141,30 +141,6 @@ has_dot_assume(::Model) = true end end - @testset "demo_assume_dot_observe" begin - model = DynamicPPL.TestUtils.demo_assume_dot_observe() - - # Sample! - rng = Random.default_rng() - vns = [@varname(s), @varname(m)] - sampler = Turing.Experimental.Gibbs(map(Base.Fix2(Pair, MH()), vns)...) - - @testset "step" begin - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler)) - check_transition_varnames(transition, vns) - for _ = 1:5 - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(sampler), state) - check_transition_varnames(transition, vns) - end - end - - @testset "sample" begin - chain = sample(model, sampler, 1000; progress=false) - @test size(chain, 1) == 1000 - display(mean(chain)) - end - end - @testset "multiple varnames" begin rng = Random.default_rng() From 065eef6eb2d7d8122b4d76d82826ec9b2bf9cb28 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 20 May 2024 12:14:53 +0100 Subject: [PATCH 10/31] use Anderson-Darling test instead of Kolomogorov-Smirnov to better capture tail differences + remove subsampling of chains since it doesn't really matter that when we're using aggressive thinning and test statistics based on comparing order stats --- test/experimental/gibbs.jl | 54 ++++++++++++++---------------- test/test_utils/numerical_tests.jl | 11 ++++-- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index dabaab34c8..e4f2c865ce 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -76,11 +76,6 @@ has_dot_assume(::Model) = true end @testset "comparison with (old) Gibbs" begin - # We will run `num_iterations` and then subsample `num_samples` from the chain. - # All the chains should converge easily, so by subsampling to a much lower - # number of samples, we should be left with something we can compare easily to - # "ground truth" samples (using NUTS here, but should implement exact sampling). - num_samples = 500 num_iterations = 1_000 thinning = 10 num_chains = 4 @@ -101,41 +96,42 @@ has_dot_assume(::Model) = true vns_m => sampler_inner, ) chain = sample( - sample( - model, - sampler, - MCMCThreads(), - num_iterations, - num_chains; - progress = false, - initial_params = initial_params, - discard_initial = 1_000, - thinning = thinning - ), - num_samples, + model, + sampler, + MCMCThreads(), + num_iterations, + num_chains; + progress=false, + initial_params=initial_params, + discard_initial=1_000, + thinning=thinning ) # "Ground truth" samples. # TODO: Replace with closed-form sampling once that is implemented in DynamicPPL. chain_true = sample( - sample( - model, - NUTS(), - MCMCThreads(), - num_iterations, - num_chains; - progress = false, - initial_params = initial_params, - thinning = thinning, - ), - num_samples, + model, + NUTS(), + MCMCThreads(), + num_iterations, + num_chains; + progress=false, + initial_params=initial_params, + thinning=thinning, ) # Perform KS test to ensure that the chains are similar. xs = Array(chain) xs_true = Array(chain_true) for i = 1:size(xs, 2) - @test two_sample_ks_test(xs[:, i], xs_true[:, i]; pval=1e-3) + @test two_sample_ks_test(xs[:, i], xs_true[:, i]) + # Let's make sure that the significance level is not too low by + # checking that the KS test fails for some simple transformations. + # TODO: Replace the heuristic below with closed-form implementations + # of the targets, once they are implemented in DynamicPPL. + @test !two_sample_ks_test(0.9 .* xs_true[:, i], xs_true[:, i]) + @test !two_sample_ks_test(1.1 .* xs_true[:, i], xs_true[:, i]) + @test !two_sample_ks_test(1e-1 .+ xs_true[:, i], xs_true[:, i]) end end end diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 041051becc..392016cc98 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -72,7 +72,12 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) atol=atol, rtol=rtol) end -function two_sample_ks_test(xs_left, xs_right; pval = 0.01) - t = ApproximateTwoSampleKSTest(xs_left, xs_right) - return pvalue(t) > pval +""" + two_sample_ad_test(xs_left, xs_right; α=1e-2) + +Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and `xs_right`. +""" +function two_sample_ks_test(xs_left, xs_right; α=1e-2) + t = KSampleADTest(xs_left, xs_right) + return pvalue(t) > α end From 99f28f9fe5d2584881080a3f759bc1b090d37b6a Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Thu, 6 Jun 2024 09:05:28 +0100 Subject: [PATCH 11/31] more work on testing --- test/experimental/gibbs.jl | 8 ++++---- test/test_utils/numerical_tests.jl | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index e4f2c865ce..1f1417cbe7 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -124,14 +124,14 @@ has_dot_assume(::Model) = true xs = Array(chain) xs_true = Array(chain_true) for i = 1:size(xs, 2) - @test two_sample_ks_test(xs[:, i], xs_true[:, i]) + @test two_sample_ad_test(xs[:, i], xs_true[:, i]) # Let's make sure that the significance level is not too low by # checking that the KS test fails for some simple transformations. # TODO: Replace the heuristic below with closed-form implementations # of the targets, once they are implemented in DynamicPPL. - @test !two_sample_ks_test(0.9 .* xs_true[:, i], xs_true[:, i]) - @test !two_sample_ks_test(1.1 .* xs_true[:, i], xs_true[:, i]) - @test !two_sample_ks_test(1e-1 .+ xs_true[:, i], xs_true[:, i]) + @test !two_sample_ad_test(0.9 .* xs_true[:, i], xs_true[:, i]) + @test !two_sample_ad_test(1.1 .* xs_true[:, i], xs_true[:, i]) + @test !two_sample_ad_test(1e-1 .+ xs_true[:, i], xs_true[:, i]) end end end diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 392016cc98..93f8e80795 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -73,11 +73,11 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) end """ - two_sample_ad_test(xs_left, xs_right; α=1e-2) + two_sample_ad_test(xs_left, xs_right; α=1e-3) Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and `xs_right`. """ -function two_sample_ks_test(xs_left, xs_right; α=1e-2) +function two_sample_ad_test(xs_left, xs_right; α=1e-3) t = KSampleADTest(xs_left, xs_right) return pvalue(t) > α end From b6a907e72d40354d0d0c730e5141e8b75061c870 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Thu, 6 Jun 2024 10:00:28 +0100 Subject: [PATCH 12/31] fixed tests --- test/experimental/gibbs.jl | 2 +- test/test_utils/numerical_tests.jl | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 695ea3a1df..4e94672233 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -2,7 +2,7 @@ module ExperimentalGibbsTests using ..Models: MoGtest_default, MoGtest_default_z_vector, gdemo using ..NumericalTests: check_MoGtest_default, check_MoGtest_default_z_vector, check_gdemo, - check_numerical + check_numerical, two_sample_ad_test using DynamicPPL using Random using Test diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 25538aed47..c2525a5f41 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -3,6 +3,7 @@ module NumericalTests using Distributions using MCMCChains: namesingroup using Test: @test, @testset +using HypothesisTests: HypothesisTests export check_MoGtest_default, check_MoGtest_default_z_vector, check_dist_numerical, check_gdemo, check_numerical @@ -87,8 +88,8 @@ end Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and `xs_right`. """ function two_sample_ad_test(xs_left, xs_right; α=1e-3) - t = KSampleADTest(xs_left, xs_right) - return pvalue(t) > α + t = HypothesisTests.KSampleADTest(xs_left, xs_right) + return HypothesisTests.pvalue(t) > α end end From e1e73868689d872e61bdc6f605413f2cb0d43a19 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Thu, 6 Jun 2024 10:35:52 +0100 Subject: [PATCH 13/31] make failures of `two_sample_ad_tests` a bit more informative --- test/test_utils/numerical_tests.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index c2525a5f41..6a266fe341 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -89,7 +89,13 @@ Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and """ function two_sample_ad_test(xs_left, xs_right; α=1e-3) t = HypothesisTests.KSampleADTest(xs_left, xs_right) - return HypothesisTests.pvalue(t) > α + # Just a way to make the logs a bit more informative in case of failure. + if HypothesisTests.pvalue(t) > α + @test true + else + @warn "Two-sample AD test failed with p-value $(HypothesisTests.pvalue(t))" + @test false + end end end From be1ec7ffbb088423b66f4d0491ad97b354597499 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Thu, 6 Jun 2024 10:39:15 +0100 Subject: [PATCH 14/31] make failrues of `two_sample_ad_test` produce more informative logs --- test/experimental/gibbs.jl | 2 +- test/test_utils/numerical_tests.jl | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 4e94672233..ec213a81b2 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -132,7 +132,7 @@ has_dot_assume(::Model) = true xs = Array(chain) xs_true = Array(chain_true) for i = 1:size(xs, 2) - @test two_sample_ad_test(xs[:, i], xs_true[:, i]) + @test two_sample_ad_test(xs[:, i], xs_true[:, i]; warn_on_fail=true) # Let's make sure that the significance level is not too low by # checking that the KS test fails for some simple transformations. # TODO: Replace the heuristic below with closed-form implementations diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 6a266fe341..8711c4c6e3 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -83,18 +83,27 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) end """ - two_sample_ad_test(xs_left, xs_right; α=1e-3) + two_sample_ad_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and `xs_right`. + +# Arguments +- `xs_left::AbstractVector`: samples from the first distribution. +- `xs_right::AbstractVector`: samples from the second distribution. + +# Keyword arguments +- `α::Real`: significance level for the test. Default: `1e-3`. +- `warn_on_fail::Bool`: whether to warn if the test fails. Default: `false`. + Makes failures a bit more informative. """ -function two_sample_ad_test(xs_left, xs_right; α=1e-3) +function two_sample_ad_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) t = HypothesisTests.KSampleADTest(xs_left, xs_right) # Just a way to make the logs a bit more informative in case of failure. if HypothesisTests.pvalue(t) > α - @test true + true else - @warn "Two-sample AD test failed with p-value $(HypothesisTests.pvalue(t))" - @test false + warn_on_fail && @warn "Two-sample AD test failed with p-value $(HypothesisTests.pvalue(t))" + false end end From 5f3644632c8506b608de6f839c5081acc8cc689e Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Thu, 6 Jun 2024 11:13:59 +0100 Subject: [PATCH 15/31] additional information upon `two_sample_ad_test` failure --- test/test_utils/numerical_tests.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index 8711c4c6e3..ef7598901c 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -103,6 +103,9 @@ function two_sample_ad_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) true else warn_on_fail && @warn "Two-sample AD test failed with p-value $(HypothesisTests.pvalue(t))" + warn_on_fail && @warn "Test statistic: $(HypothesisTests.teststat(t))" + warn_on_fail && @warn "Means of the two samples: $(mean(xs_left)), $(mean(xs_right))" + warn_on_fail && @warn "Variances of the two samples: $(var(xs_left)), $(var(xs_right))" false end end From 3be8f8b6f52964d74ff562ac0e5dc14e8eb3e701 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Thu, 6 Jun 2024 12:01:30 +0100 Subject: [PATCH 16/31] rename `two_sample_ad_test` to `two_sample_test` and use KS test instead --- test/experimental/gibbs.jl | 12 ++++++------ test/test_utils/numerical_tests.jl | 11 ++++++----- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index ec213a81b2..80fb5c18e2 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -2,7 +2,7 @@ module ExperimentalGibbsTests using ..Models: MoGtest_default, MoGtest_default_z_vector, gdemo using ..NumericalTests: check_MoGtest_default, check_MoGtest_default_z_vector, check_gdemo, - check_numerical, two_sample_ad_test + check_numerical, two_sample_test using DynamicPPL using Random using Test @@ -83,7 +83,7 @@ has_dot_assume(::Model) = true end end - @testset "comparison with (old) Gibbs" begin + @testset "comparison with 'gold-standard' samples" begin num_iterations = 1_000 thinning = 10 num_chains = 4 @@ -132,14 +132,14 @@ has_dot_assume(::Model) = true xs = Array(chain) xs_true = Array(chain_true) for i = 1:size(xs, 2) - @test two_sample_ad_test(xs[:, i], xs_true[:, i]; warn_on_fail=true) + @test two_sample_test(xs[:, i], xs_true[:, i]; warn_on_fail=true) # Let's make sure that the significance level is not too low by # checking that the KS test fails for some simple transformations. # TODO: Replace the heuristic below with closed-form implementations # of the targets, once they are implemented in DynamicPPL. - @test !two_sample_ad_test(0.9 .* xs_true[:, i], xs_true[:, i]) - @test !two_sample_ad_test(1.1 .* xs_true[:, i], xs_true[:, i]) - @test !two_sample_ad_test(1e-1 .+ xs_true[:, i], xs_true[:, i]) + @test !two_sample_test(0.9 .* xs_true[:, i], xs_true[:, i]) + @test !two_sample_test(1.1 .* xs_true[:, i], xs_true[:, i]) + @test !two_sample_test(1e-1 .+ xs_true[:, i], xs_true[:, i]) end end end diff --git a/test/test_utils/numerical_tests.jl b/test/test_utils/numerical_tests.jl index ef7598901c..c44c502c11 100644 --- a/test/test_utils/numerical_tests.jl +++ b/test/test_utils/numerical_tests.jl @@ -83,9 +83,11 @@ function check_MoGtest_default_z_vector(chain; atol=0.2, rtol=0.0) end """ - two_sample_ad_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) + two_sample_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) -Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and `xs_right`. +Perform a two-sample hypothesis test on the two samples `xs_left` and `xs_right`. + +Currently the test performed is a Kolmogorov-Smirnov (KS) test. # Arguments - `xs_left::AbstractVector`: samples from the first distribution. @@ -96,14 +98,13 @@ Perform a two-sample Anderson-Darling (AD) test on the two samples `xs_left` and - `warn_on_fail::Bool`: whether to warn if the test fails. Default: `false`. Makes failures a bit more informative. """ -function two_sample_ad_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) - t = HypothesisTests.KSampleADTest(xs_left, xs_right) +function two_sample_test(xs_left, xs_right; α=1e-3, warn_on_fail=false) + t = HypothesisTests.ApproximateTwoSampleKSTest(xs_left, xs_right) # Just a way to make the logs a bit more informative in case of failure. if HypothesisTests.pvalue(t) > α true else warn_on_fail && @warn "Two-sample AD test failed with p-value $(HypothesisTests.pvalue(t))" - warn_on_fail && @warn "Test statistic: $(HypothesisTests.teststat(t))" warn_on_fail && @warn "Means of the two samples: $(mean(xs_left)), $(mean(xs_right))" warn_on_fail && @warn "Variances of the two samples: $(var(xs_left)), $(var(xs_right))" false From dbaf4477b1d7e5cb6e46ddcf68b7eefa4a9eb6fd Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Sun, 16 Jun 2024 22:22:39 +0200 Subject: [PATCH 17/31] added minor test for externalsampler usage --- test/experimental/gibbs.jl | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index e4f2c865ce..d46aefd90e 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -24,8 +24,8 @@ const DEMO_MODELS_WITHOUT_DOT_ASSUME = Union{ has_dot_assume(::DEMO_MODELS_WITHOUT_DOT_ASSUME) = false has_dot_assume(::Model) = true -@testset "Gibbs using `condition`" begin - @testset "Demo models" begin +@timeit_testset "Gibbs using `condition`" begin + @timeit_testset "Demo models" begin @testset "$(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS vns = DynamicPPL.TestUtils.varnames(model) # Run one sampler on variables starting with `s` and another on variables starting with `m`. @@ -137,7 +137,7 @@ has_dot_assume(::Model) = true end end - @testset "multiple varnames" begin + @timeit_testset "multiple varnames" begin rng = Random.default_rng() # With both `s` and `m` as random. @@ -171,7 +171,7 @@ has_dot_assume(::Model) = true end end - @testset "CSMC + ESS" begin + @timeit_testset "CSMC + ESS" begin rng = Random.default_rng() model = MoGtest_default alg = Turing.Experimental.Gibbs( @@ -193,7 +193,7 @@ has_dot_assume(::Model) = true check_MoGtest_default(chain, atol = 0.2) end - @testset "CSMC + ESS (usage of implicit varname)" begin + @timeit_testset "CSMC + ESS (usage of implicit varname)" begin rng = Random.default_rng() model = MoGtest_default_z_vector alg = Turing.Experimental.Gibbs( @@ -214,4 +214,24 @@ has_dot_assume(::Model) = true chain = sample(model, alg, 1000; progress=false) check_MoGtest_default_z_vector(chain, atol = 0.2) end + + @timeit_testset "externsalsampler" begin + @model function demo_gibbs_external() + m1 ~ Normal() + m2 ~ Normal() + + -1 ~ Normal(m1, 1) + +1 ~ Normal(m1 + m2, 1) + + return (; m1, m2) + end + + model = demo_gibbs_external() + sampler = Turing.Experimental.Gibbs( + @varname(m1) => externalsampler(AdvancedMH.RWMH(1)), + @varname(m2) => externalsampler(AdvancedMH.RWMH(1)), + ) + chain = sample(model, sampler, 1000; discard_initial=1000, thinning=10) + check_numerical(chain, [:m1, :m2], [-0.2, 0.6], atol = 0.1) + end end From f44c4079f0f3eebfb37ae747c49cec2a1337549b Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Sun, 16 Jun 2024 22:28:12 +0200 Subject: [PATCH 18/31] also test AdvancedHMC samplers with Gibbs --- test/experimental/gibbs.jl | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index d46aefd90e..01424398f7 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -227,11 +227,17 @@ has_dot_assume(::Model) = true end model = demo_gibbs_external() - sampler = Turing.Experimental.Gibbs( - @varname(m1) => externalsampler(AdvancedMH.RWMH(1)), - @varname(m2) => externalsampler(AdvancedMH.RWMH(1)), - ) - chain = sample(model, sampler, 1000; discard_initial=1000, thinning=10) - check_numerical(chain, [:m1, :m2], [-0.2, 0.6], atol = 0.1) + samplers_inner = [ + externalsampler(AdvancedMH.RWMH(1)), + externalsampler(AdvancedHMC.HMC(1e-1, 32)), + ] + @testset "$(sampler_inner)" for sampler_inner in samplers_inner + sampler = Turing.Experimental.Gibbs( + @varname(m1) => sampler_inner, + @varname(m2) => sampler_inner, + ) + chain = sample(model, sampler, 1000; discard_initial=1000, thinning=10, n_adapts=0) + check_numerical(chain, [:m1, :m2], [-0.2, 0.6], atol=0.1) + end end end From dd86cfaff616f962c0378dc761a3958f4a3125b8 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 17 Jun 2024 09:52:22 +0200 Subject: [PATCH 19/31] forgot to add updates to src/mcmc/abstractmcmc.jl in previous commits --- src/mcmc/abstractmcmc.jl | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/src/mcmc/abstractmcmc.jl b/src/mcmc/abstractmcmc.jl index 81cfee9a9f..4d30a74929 100644 --- a/src/mcmc/abstractmcmc.jl +++ b/src/mcmc/abstractmcmc.jl @@ -17,10 +17,26 @@ function transition_to_turing(f::LogDensityProblemsAD.ADGradientWrapper, transit return transition_to_turing(parent(f), transition) end +_getmodel(f::LogDensityProblemsAD.ADGradientWrapper) = _getmodel(parent(f)) +_getmodel(f::DynamicPPL.LogDensityFunction) = f.model + +# FIXME: We'll have to overload this for every AD backend since some of the AD backends +# will cache certain parts of a given model, e.g. the tape, which results in a discrepancy +# between the primal (forward) and dual (backward). +function _setmodel(f::LogDensityProblemsAD.ADGradientWrapper, model::DynamicPPL.Model) + return Accessors.@set f.ℓ = _setmodel(f.ℓ, model) +end +function _setmodel(f::DynamicPPL.LogDensityFunction, model::DynamicPPL.Model) + return Accessors.@set f.model = model +end + +_varinfo(f::LogDensityProblemsAD.ADGradientWrapper) = _varinfo(parent(f)) +_varinfo(f::DynamicPPL.LogDensityFunction) = f.varinfo + function varinfo(state::TuringState) - θ = getparams(state.logdensity.model, state.state) + θ = getparams(_getmodel(state.logdensity), state.state) # TODO: Do we need to link here first? - return DynamicPPL.unflatten(state.logdensity.varinfo, θ) + return DynamicPPL.unflatten(_varinfo(state.logdensity), θ) end # NOTE: Only thing that depends on the underlying sampler. @@ -48,14 +64,14 @@ end Recompute the log-probability of the `model` based on the given `state` and return the resulting state. """ function recompute_logprob!!( - rng::Random.AbstractRNG, + rng::Random.AbstractRNG, # TODO: Do we need the `rng` here? model::DynamicPPL.Model, sampler::DynamicPPL.Sampler{<:ExternalSampler}, state ) - # Re-using the log-density function from the `state` and updating only the `model` field. - f = state.logdensity - f = Accessors.@set f.model = model + # Re-using the log-density function from the `state` and updating only the `model` field, + # since the `model` might now contain different conditioning values. + f = _setmodel(state.logdensity, model) # Recompute the log-probability with the new `model`. state_inner = recompute_logprob!!( rng, @@ -82,6 +98,16 @@ function recompute_logprob!!( ) end +function recompute_logprob!!( + rng::Random.AbstractRNG, + model::AbstractMCMC.LogDensityModel, + sampler::AdvancedMH.MetropolisHastings, + state::AdvancedMH.Transition, +) + logdensity = model.logdensity + return Accessors.@set state.lp = LogDensityProblems.logdensity(logdensity, state.params) +end + # TODO: Do we also support `resume`, etc? function AbstractMCMC.step( rng::Random.AbstractRNG, From bdc61feb7c5ca26a2bc4f389e21df5b473168fee Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 17 Jun 2024 09:06:23 +0100 Subject: [PATCH 20/31] removed usage of `timeit_testset` macro --- test/experimental/gibbs.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index a7973e15c6..a1ae2968c5 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -32,8 +32,8 @@ const DEMO_MODELS_WITHOUT_DOT_ASSUME = Union{ has_dot_assume(::DEMO_MODELS_WITHOUT_DOT_ASSUME) = false has_dot_assume(::Model) = true -@timeit_testset "Gibbs using `condition`" begin - @timeit_testset "Demo models" begin +@testset "Gibbs using `condition`" begin + @testset "Demo models" begin @testset "$(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS vns = DynamicPPL.TestUtils.varnames(model) # Run one sampler on variables starting with `s` and another on variables starting with `m`. @@ -145,7 +145,7 @@ has_dot_assume(::Model) = true end end - @timeit_testset "multiple varnames" begin + @testset "multiple varnames" begin rng = Random.default_rng() # With both `s` and `m` as random. @@ -179,7 +179,7 @@ has_dot_assume(::Model) = true end end - @timeit_testset "CSMC + ESS" begin + @testset "CSMC + ESS" begin rng = Random.default_rng() model = MoGtest_default alg = Turing.Experimental.Gibbs( @@ -201,7 +201,7 @@ has_dot_assume(::Model) = true check_MoGtest_default(chain, atol = 0.2) end - @timeit_testset "CSMC + ESS (usage of implicit varname)" begin + @testset "CSMC + ESS (usage of implicit varname)" begin rng = Random.default_rng() model = MoGtest_default_z_vector alg = Turing.Experimental.Gibbs( @@ -223,7 +223,7 @@ has_dot_assume(::Model) = true check_MoGtest_default_z_vector(chain, atol = 0.2) end - @timeit_testset "externsalsampler" begin + @testset "externsalsampler" begin @model function demo_gibbs_external() m1 ~ Normal() m2 ~ Normal() From d76243e05fabfaf0d5563e47b89480c619e9b217 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 17 Jun 2024 10:36:22 +0100 Subject: [PATCH 21/31] added temporary fix for externalsampler that needs to be removed once DPPL has been updated --- test/experimental/gibbs.jl | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index a1ae2968c5..2c62cdaa03 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -7,6 +7,30 @@ using DynamicPPL using Random using Test using Turing +using Turing.Inference: AdvancedHMC, AdvancedMH + +# FIXME: Remove once https://github.com/TuringLang/DynamicPPL.jl/pull/621 has gone through. +# HACK: Have to provide the `context` explicitly, otherwise `model.context` +# will be used, which itself can contain things like `ConditionContext`. +# This in turn means that if we later do something like +# +# Accessors.@set f.model = condition(model, x=new_value) +# +# will result in a `LogDensityFunction` which evaluates using _both_ +# the "new" `ConditionContext` with `x=new_value` and the original +# `ConditionContext` containing the original value for `x`. +# By specifying that we only want to respect the _leaf_ context, we will +# only run into this issue if we do something like the above with a +# leaf context (though it's really unclear if this would be desireable +# to support). +# TODO: Make the `LogDensityFunction` take `nothing` in place of the context +# by default, in which case we simply defer the choice of context to `model.context`. +function DynamicPPL.LogDensityFunction(model::DynamicPPL.Model) + return DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model)) +end +function DynamicPPL.LogDensityFunction(model::DynamicPPL.Model, varinfo::DynamicPPL.AbstractVarInfo) + return DynamicPPL.LogDensityFunction(model, varinfo, DynamicPPL.leafcontext(model.context)) +end function check_transition_varnames( transition::Turing.Inference.Transition, From 14f5c8981d20b84e3eb97800177cd695c397d516 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 17 Jun 2024 10:36:42 +0100 Subject: [PATCH 22/31] minor reorg of two testsets --- test/experimental/gibbs.jl | 50 +++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 2c62cdaa03..316b4ef401 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -172,34 +172,40 @@ has_dot_assume(::Model) = true @testset "multiple varnames" begin rng = Random.default_rng() - # With both `s` and `m` as random. - model = gdemo(1.5, 2.0) - vns = (@varname(s), @varname(m)) - alg = Turing.Experimental.Gibbs(vns => MH()) + @testset "with both `s` and `m` as random" begin + model = gdemo(1.5, 2.0) + vns = (@varname(s), @varname(m)) + alg = Turing.Experimental.Gibbs(vns => MH()) - # `step` - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(alg)) - check_transition_varnames(transition, vns) - for _ = 1:5 - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(alg), state) + # `step` + transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(alg)) check_transition_varnames(transition, vns) - end + for _ in 1:5 + transition, state = AbstractMCMC.step( + rng, model, DynamicPPL.Sampler(alg), state + ) + check_transition_varnames(transition, vns) + end - # `sample` - chain = sample(model, alg, 10_000; progress=false) - check_numerical(chain, [:s, :m], [49 / 24, 7 / 6], atol = 0.4) + # `sample` + chain = sample(model, alg, 10_000; progress=false) + check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.4) + end - # Without `m` as random. - model = gdemo(1.5, 2.0) | (m = 7 / 6,) - vns = (@varname(s),) - alg = Turing.Experimental.Gibbs(vns => MH()) + @testset "without `m` as random" begin + model = gdemo(1.5, 2.0) | (m=7 / 6,) + vns = (@varname(s),) + alg = Turing.Experimental.Gibbs(vns => MH()) - # `step` - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(alg)) - check_transition_varnames(transition, vns) - for _ = 1:5 - transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(alg), state) + # `step` + transition, state = AbstractMCMC.step(rng, model, DynamicPPL.Sampler(alg)) check_transition_varnames(transition, vns) + for _ in 1:5 + transition, state = AbstractMCMC.step( + rng, model, DynamicPPL.Sampler(alg), state + ) + check_transition_varnames(transition, vns) + end end end From 5893d54df2ed385d4164c7d9cb60b39f10348a33 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Tue, 18 Jun 2024 08:23:27 +0100 Subject: [PATCH 23/31] set random seeds more aggressively in an attempt to make tests more reproducible --- test/experimental/gibbs.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 316b4ef401..591df196d0 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -127,6 +127,7 @@ has_dot_assume(::Model) = true vns_s => sampler_inner, vns_m => sampler_inner, ) + Random.seed!(42) chain = sample( model, sampler, @@ -141,6 +142,7 @@ has_dot_assume(::Model) = true # "Ground truth" samples. # TODO: Replace with closed-form sampling once that is implemented in DynamicPPL. + Random.seed!(42) chain_true = sample( model, NUTS(), @@ -188,6 +190,7 @@ has_dot_assume(::Model) = true end # `sample` + Random.seed!(42) chain = sample(model, alg, 10_000; progress=false) check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.4) end @@ -227,6 +230,7 @@ has_dot_assume(::Model) = true end # Sample! + Random.seed!(42) chain = sample(MoGtest_default, alg, 1000; progress=false) check_MoGtest_default(chain, atol = 0.2) end @@ -249,6 +253,7 @@ has_dot_assume(::Model) = true end # Sample! + Random.seed!(42) chain = sample(model, alg, 1000; progress=false) check_MoGtest_default_z_vector(chain, atol = 0.2) end @@ -274,6 +279,7 @@ has_dot_assume(::Model) = true @varname(m1) => sampler_inner, @varname(m2) => sampler_inner, ) + Random.seed!(42) chain = sample(model, sampler, 1000; discard_initial=1000, thinning=10, n_adapts=0) check_numerical(chain, [:m1, :m2], [-0.2, 0.6], atol=0.1) end From 4f30ea50f19b9a98b9c47d00bf3ae934a6d409a0 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Tue, 18 Jun 2024 21:53:01 +0100 Subject: [PATCH 24/31] removed hack, awaiting PR to DynamicPPL --- test/experimental/gibbs.jl | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index 591df196d0..c3e4bae9a2 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -9,29 +9,6 @@ using Test using Turing using Turing.Inference: AdvancedHMC, AdvancedMH -# FIXME: Remove once https://github.com/TuringLang/DynamicPPL.jl/pull/621 has gone through. -# HACK: Have to provide the `context` explicitly, otherwise `model.context` -# will be used, which itself can contain things like `ConditionContext`. -# This in turn means that if we later do something like -# -# Accessors.@set f.model = condition(model, x=new_value) -# -# will result in a `LogDensityFunction` which evaluates using _both_ -# the "new" `ConditionContext` with `x=new_value` and the original -# `ConditionContext` containing the original value for `x`. -# By specifying that we only want to respect the _leaf_ context, we will -# only run into this issue if we do something like the above with a -# leaf context (though it's really unclear if this would be desireable -# to support). -# TODO: Make the `LogDensityFunction` take `nothing` in place of the context -# by default, in which case we simply defer the choice of context to `model.context`. -function DynamicPPL.LogDensityFunction(model::DynamicPPL.Model) - return DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model)) -end -function DynamicPPL.LogDensityFunction(model::DynamicPPL.Model, varinfo::DynamicPPL.AbstractVarInfo) - return DynamicPPL.LogDensityFunction(model, varinfo, DynamicPPL.leafcontext(model.context)) -end - function check_transition_varnames( transition::Turing.Inference.Transition, parent_varnames From 89bc2e1f0cc805ea4b5ce9681061d06fa86ecbd7 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Wed, 26 Jun 2024 21:47:58 +0100 Subject: [PATCH 25/31] renamed `_getmodel` to `getmodel`, `_setmodel` to `setmodel`, and `_varinfo` to `varinfo_from_logdensityfn` --- src/mcmc/abstractmcmc.jl | 57 ++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/src/mcmc/abstractmcmc.jl b/src/mcmc/abstractmcmc.jl index 4d30a74929..9d2ebb9ea8 100644 --- a/src/mcmc/abstractmcmc.jl +++ b/src/mcmc/abstractmcmc.jl @@ -17,24 +17,41 @@ function transition_to_turing(f::LogDensityProblemsAD.ADGradientWrapper, transit return transition_to_turing(parent(f), transition) end -_getmodel(f::LogDensityProblemsAD.ADGradientWrapper) = _getmodel(parent(f)) -_getmodel(f::DynamicPPL.LogDensityFunction) = f.model +""" + getmodel(f) + +Return the `DynamicPPL.Model` wrapped in the given log-density function `f`. +""" +getmodel(f::LogDensityProblemsAD.ADGradientWrapper) = getmodel(parent(f)) +getmodel(f::DynamicPPL.LogDensityFunction) = f.model # FIXME: We'll have to overload this for every AD backend since some of the AD backends # will cache certain parts of a given model, e.g. the tape, which results in a discrepancy # between the primal (forward) and dual (backward). -function _setmodel(f::LogDensityProblemsAD.ADGradientWrapper, model::DynamicPPL.Model) - return Accessors.@set f.ℓ = _setmodel(f.ℓ, model) +""" + setmodel(f, model) + +Set the `DynamicPPL.Model` in the given log-density function `f` to `model`. + +!!! warning + Note that if `f` is a `LogDensityProblemsAD.ADGradientWrapper` wrapping a + `DynamicPPL.LogDensityFunction`, performing an update of the `model` in `f` + might require recompilation of the gradient tape, depending on the AD backend. +""" +function setmodel(f::LogDensityProblemsAD.ADGradientWrapper, model::DynamicPPL.Model) + return Accessors.@set f.ℓ = setmodel(f.ℓ, model) end -function _setmodel(f::DynamicPPL.LogDensityFunction, model::DynamicPPL.Model) +function setmodel(f::DynamicPPL.LogDensityFunction, model::DynamicPPL.Model) return Accessors.@set f.model = model end -_varinfo(f::LogDensityProblemsAD.ADGradientWrapper) = _varinfo(parent(f)) +function varinfo_from_logdensityfn(f::LogDensityProblemsAD.ADGradientWrapper) + return varinfo_from_logdensityfn(parent(f)) +end _varinfo(f::DynamicPPL.LogDensityFunction) = f.varinfo function varinfo(state::TuringState) - θ = getparams(_getmodel(state.logdensity), state.state) + θ = getparams(getmodel(state.logdensity), state.state) # TODO: Do we need to link here first? return DynamicPPL.unflatten(_varinfo(state.logdensity), θ) end @@ -67,17 +84,14 @@ function recompute_logprob!!( rng::Random.AbstractRNG, # TODO: Do we need the `rng` here? model::DynamicPPL.Model, sampler::DynamicPPL.Sampler{<:ExternalSampler}, - state + state, ) # Re-using the log-density function from the `state` and updating only the `model` field, # since the `model` might now contain different conditioning values. - f = _setmodel(state.logdensity, model) + f = setmodel(state.logdensity, model) # Recompute the log-probability with the new `model`. state_inner = recompute_logprob!!( - rng, - AbstractMCMC.LogDensityModel(f), - sampler.alg.sampler, - state.state + rng, AbstractMCMC.LogDensityModel(f), sampler.alg.sampler, state.state ) return state_to_turing(f, state_inner) end @@ -86,15 +100,13 @@ function recompute_logprob!!( rng::Random.AbstractRNG, model::AbstractMCMC.LogDensityModel, sampler::AdvancedHMC.AbstractHMCSampler, - state::AdvancedHMC.HMCState + state::AdvancedHMC.HMCState, ) # Construct hamiltionian. hamiltonian = AdvancedHMC.Hamiltonian(state.metric, model) # Re-compute the log-probability and gradient. return Accessors.@set state.transition.z = AdvancedHMC.phasepoint( - hamiltonian, - state.transition.z.θ, - state.transition.z.r, + hamiltonian, state.transition.z.θ, state.transition.z.r ) end @@ -115,7 +127,7 @@ function AbstractMCMC.step( sampler_wrapper::Sampler{<:ExternalSampler}; initial_state=nothing, initial_params=nothing, - kwargs... + kwargs..., ) alg = sampler_wrapper.alg sampler = alg.sampler @@ -145,7 +157,12 @@ function AbstractMCMC.step( ) else transition_inner, state_inner = AbstractMCMC.step( - rng, AbstractMCMC.LogDensityModel(f), sampler, initial_state; initial_params, kwargs... + rng, + AbstractMCMC.LogDensityModel(f), + sampler, + initial_state; + initial_params, + kwargs..., ) end # Update the `state` @@ -157,7 +174,7 @@ function AbstractMCMC.step( model::DynamicPPL.Model, sampler_wrapper::Sampler{<:ExternalSampler}, state::TuringState; - kwargs... + kwargs..., ) sampler = sampler_wrapper.alg.sampler f = state.logdensity From 3d3c9441a1b02647801cf102a184e34d69115bac Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Wed, 26 Jun 2024 22:02:07 +0100 Subject: [PATCH 26/31] missed some instances during rnenaming --- src/mcmc/abstractmcmc.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mcmc/abstractmcmc.jl b/src/mcmc/abstractmcmc.jl index 9d2ebb9ea8..539074fbdf 100644 --- a/src/mcmc/abstractmcmc.jl +++ b/src/mcmc/abstractmcmc.jl @@ -48,12 +48,12 @@ end function varinfo_from_logdensityfn(f::LogDensityProblemsAD.ADGradientWrapper) return varinfo_from_logdensityfn(parent(f)) end -_varinfo(f::DynamicPPL.LogDensityFunction) = f.varinfo +varinfo_from_logdensityfn(f::DynamicPPL.LogDensityFunction) = f.varinfo function varinfo(state::TuringState) θ = getparams(getmodel(state.logdensity), state.state) # TODO: Do we need to link here first? - return DynamicPPL.unflatten(_varinfo(state.logdensity), θ) + return DynamicPPL.unflatten(varinfo_from_logdensityfn(state.logdensity), θ) end # NOTE: Only thing that depends on the underlying sampler. From e1f1a0ed2cef2b703d25fd4ebb75d449fc57ec0b Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Wed, 10 Jul 2024 18:54:33 +0100 Subject: [PATCH 27/31] fixed missing merge in initial step for experimental `Gibbs` --- src/experimental/gibbs.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/experimental/gibbs.jl b/src/experimental/gibbs.jl index f7d8fd848a..596e6e283b 100644 --- a/src/experimental/gibbs.jl +++ b/src/experimental/gibbs.jl @@ -308,7 +308,7 @@ function DynamicPPL.initialstep( varinfos = map(last, states_and_varinfos) # Update the base varinfo from the first varinfo and replace it. - varinfos_new = DynamicPPL.setindex!!(varinfos, vi_base, 1) + varinfos_new = DynamicPPL.setindex!!(varinfos, merge(vi_base, first(varinfos)), 1) # Merge the updated initial varinfo with the rest of the varinfos + update the logp. vi = DynamicPPL.setlogp!!( reduce(merge, varinfos_new), From 7c4368e3c71586036bec64a05f70d7d343248c8f Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 15 Jul 2024 08:32:35 +0100 Subject: [PATCH 28/31] Always reconstruct `ADGradientWrapper` using the `adype` available in `ExternalSampler` --- src/mcmc/abstractmcmc.jl | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/mcmc/abstractmcmc.jl b/src/mcmc/abstractmcmc.jl index 539074fbdf..65aaa177b7 100644 --- a/src/mcmc/abstractmcmc.jl +++ b/src/mcmc/abstractmcmc.jl @@ -25,11 +25,8 @@ Return the `DynamicPPL.Model` wrapped in the given log-density function `f`. getmodel(f::LogDensityProblemsAD.ADGradientWrapper) = getmodel(parent(f)) getmodel(f::DynamicPPL.LogDensityFunction) = f.model -# FIXME: We'll have to overload this for every AD backend since some of the AD backends -# will cache certain parts of a given model, e.g. the tape, which results in a discrepancy -# between the primal (forward) and dual (backward). """ - setmodel(f, model) + setmodel(f, model[, adtype]) Set the `DynamicPPL.Model` in the given log-density function `f` to `model`. @@ -38,8 +35,20 @@ Set the `DynamicPPL.Model` in the given log-density function `f` to `model`. `DynamicPPL.LogDensityFunction`, performing an update of the `model` in `f` might require recompilation of the gradient tape, depending on the AD backend. """ -function setmodel(f::LogDensityProblemsAD.ADGradientWrapper, model::DynamicPPL.Model) - return Accessors.@set f.ℓ = setmodel(f.ℓ, model) +function setmodel( + f::LogDensityProblemsAD.ADGradientWrapper, + model::DynamicPPL.Model, + adtype::ADTypes.AbstractADType +) + # TODO: Should we handle `SciMLBase.NoAD`? + # For an `ADGradientWrapper` we do the following: + # 1. Update the `Model` in the underlying `LogDensityFunction`. + # 2. Re-construct the `ADGradientWrapper` using `ADgradient` using the provided `adtype` + # to ensure that the recompilation of gradient tapes, etc. also occur. For example, + # ReverseDiff.jl in compiled mode will cache the compiled tape, which means that just + # replacing the corresponding field with the new model won't be sufficient to obtain + # the correct gradients. + return LogDensityProblemsAD.ADgradient(adtype, setmodel(parent(f), model)) end function setmodel(f::DynamicPPL.LogDensityFunction, model::DynamicPPL.Model) return Accessors.@set f.model = model @@ -88,7 +97,7 @@ function recompute_logprob!!( ) # Re-using the log-density function from the `state` and updating only the `model` field, # since the `model` might now contain different conditioning values. - f = setmodel(state.logdensity, model) + f = setmodel(state.logdensity, model, sampler.alg.adtype) # Recompute the log-probability with the new `model`. state_inner = recompute_logprob!!( rng, AbstractMCMC.LogDensityModel(f), sampler.alg.sampler, state.state From 06357c6e8cf7c3bf3d29f7a3fc58ff919878b6e4 Mon Sep 17 00:00:00 2001 From: Tor Erlend Fjelde Date: Mon, 15 Jul 2024 09:11:59 +0100 Subject: [PATCH 29/31] Test Gibbs with different adtype in externalsampler to ensure that works --- test/experimental/gibbs.jl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/experimental/gibbs.jl b/test/experimental/gibbs.jl index c3e4bae9a2..0f0740f14a 100644 --- a/test/experimental/gibbs.jl +++ b/test/experimental/gibbs.jl @@ -8,6 +8,8 @@ using Random using Test using Turing using Turing.Inference: AdvancedHMC, AdvancedMH +using ForwardDiff: ForwardDiff +using ReverseDiff: ReverseDiff function check_transition_varnames( transition::Turing.Inference.Transition, @@ -249,7 +251,9 @@ has_dot_assume(::Model) = true model = demo_gibbs_external() samplers_inner = [ externalsampler(AdvancedMH.RWMH(1)), - externalsampler(AdvancedHMC.HMC(1e-1, 32)), + externalsampler(AdvancedHMC.HMC(1e-1, 32), adtype=AutoForwardDiff()), + externalsampler(AdvancedHMC.HMC(1e-1, 32), adtype=AutoReverseDiff()), + externalsampler(AdvancedHMC.HMC(1e-1, 32), adtype=AutoReverseDiff(compile=true)), ] @testset "$(sampler_inner)" for sampler_inner in samplers_inner sampler = Turing.Experimental.Gibbs( From 02f9fada210a01fa16cd1f46cb3ab0c81e7dbaa3 Mon Sep 17 00:00:00 2001 From: Hong Ge <3279477+yebai@users.noreply.github.com> Date: Mon, 15 Jul 2024 21:12:58 +0100 Subject: [PATCH 30/31] Update Project.toml --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 5bf3336f31..f92f66de7d 100644 --- a/Project.toml +++ b/Project.toml @@ -63,7 +63,7 @@ Distributions = "0.23.3, 0.24, 0.25" DistributionsAD = "0.6" DocStringExtensions = "0.8, 0.9" DynamicHMC = "3.4" -DynamicPPL = "0.28" +DynamicPPL = "0.28.1" Compat = "4.15.0" EllipticalSliceSampling = "0.5, 1, 2" ForwardDiff = "0.10.3" From 30ab9e0ee3172da8c62e3745699e85e25f898d76 Mon Sep 17 00:00:00 2001 From: Hong Ge <3279477+yebai@users.noreply.github.com> Date: Mon, 15 Jul 2024 21:13:29 +0100 Subject: [PATCH 31/31] Update Project.toml --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index f92f66de7d..bc6ddaedb6 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Turing" uuid = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" -version = "0.33" +version = "0.33.2" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"