diff --git a/GeneralisedFilters/src/models/hierarchical.jl b/GeneralisedFilters/src/models/hierarchical.jl index ecdc8a6..d458489 100644 --- a/GeneralisedFilters/src/models/hierarchical.jl +++ b/GeneralisedFilters/src/models/hierarchical.jl @@ -29,24 +29,28 @@ function AbstractMCMC.sample( ) outer_dyn, inner_model = model.outer_dyn, model.inner_model - zs = Vector{eltype(inner_model.dyn)}(undef, T) - xs = Vector{eltype(outer_dyn)}(undef, T) + zs = OffsetVector(Vector{eltype(inner_model.dyn)}(undef, T + 1), -1) + xs = OffsetVector(Vector{eltype(outer_dyn)}(undef, T + 1), -1) ys = Vector{eltype(inner_model.obs)}(undef, T) # Simulate outer dynamics - x0 = simulate(rng, outer_dyn; kwargs...) - z0 = simulate(rng, inner_model.dyn; new_outer=x0, kwargs...) + xs[0] = simulate(rng, outer_dyn; kwargs...) + zs[0] = simulate(rng, inner_model.dyn; new_outer=xs[0], kwargs...) for t in 1:T - prev_x = t == 1 ? x0 : xs[t - 1] - prev_z = t == 1 ? z0 : zs[t - 1] - xs[t] = simulate(rng, model.outer_dyn, t, prev_x; kwargs...) + xs[t] = simulate(rng, model.outer_dyn, t, xs[t - 1]; kwargs...) zs[t] = simulate( - rng, inner_model.dyn, t, prev_z; prev_outer=prev_x, new_outer=xs[t], kwargs... + rng, + inner_model.dyn, + t, + zs[t - 1]; + prev_outer=xs[t - 1], + new_outer=xs[t], + kwargs..., ) ys[t] = simulate(rng, inner_model.obs, t, zs[t]; new_outer=xs[t], kwargs...) end - return x0, z0, xs, zs, ys + return xs, zs, ys end ## Methods to make HierarchicalSSM compatible with the bootstrap filter diff --git a/GeneralisedFilters/test/runtests.jl b/GeneralisedFilters/test/runtests.jl index ea8dfa2..7d50230 100644 --- a/GeneralisedFilters/test/runtests.jl +++ b/GeneralisedFilters/test/runtests.jl @@ -21,7 +21,7 @@ include("resamplers.jl") for Dy in Dys rng = StableRNG(1234) model = GeneralisedFilters.GFTest.create_linear_gaussian_model(rng, Dx, Dy) - _, _, ys = sample(rng, model, 1) + _, ys = sample(rng, model, 1) filtered, ll = GeneralisedFilters.filter(rng, model, KalmanFilter(), ys) @@ -61,7 +61,7 @@ end for Dy in Dys rng = StableRNG(1234) model = GeneralisedFilters.GFTest.create_linear_gaussian_model(rng, Dx, Dy) - _, _, ys = sample(rng, model, 2) + _, ys = sample(rng, model, 2) states, ll = GeneralisedFilters.smooth(rng, model, KalmanSmoother(), ys) @@ -88,7 +88,7 @@ end rng = StableRNG(1234) model = GeneralisedFilters.GFTest.create_linear_gaussian_model(rng, 1, 1) - _, _, ys = sample(rng, model, 10) + _, ys = sample(rng, model, 10) bf = BF(2^12; threshold=0.8) bf_state, llbf = GeneralisedFilters.filter(rng, model, bf, ys) @@ -128,7 +128,7 @@ end rng = StableRNG(1234) model = GeneralisedFilters.GFTest.create_linear_gaussian_model(rng, 1, 1) - _, _, ys = sample(rng, model, 10) + _, ys = sample(rng, model, 10) algo = PF(2^10, LinearGaussianProposal(); threshold=0.6) kf_states, kf_ll = GeneralisedFilters.filter(rng, model, KalmanFilter(), ys) @@ -212,7 +212,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs ) - _, _, ys = sample(rng, full_model, T) + _, ys = sample(rng, full_model, T) # Ground truth Kalman filtering kf_states, kf_ll = GeneralisedFilters.filter(rng, full_model, KalmanFilter(), ys) @@ -255,7 +255,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs, ET ) - _, _, ys = sample(rng, full_model, T) + _, ys = sample(rng, full_model, T) # Ground truth Kalman filtering kf_state, kf_ll = GeneralisedFilters.filter(full_model, KalmanFilter(), ys) @@ -290,7 +290,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, 1, 1, 1 ) - _, _, ys = sample(rng, full_model, T) + _, ys = sample(rng, full_model, T) # Manually create tree to force expansion on second step particle_type = GeneralisedFilters.RaoBlackwellisedParticle{ @@ -325,7 +325,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs ) - _, _, ys = sample(rng, full_model, T) + _, ys = sample(rng, full_model, T) # Ground truth Kalman filtering kf_states, kf_ll = GeneralisedFilters.filter(rng, full_model, KalmanFilter(), ys) @@ -373,7 +373,7 @@ end rng = StableRNG(SEED) model = GeneralisedFilters.GFTest.create_linear_gaussian_model(rng, 1, 1) - _, _, ys = sample(rng, model, K) + _, ys = sample(rng, model, K) ref_traj = OffsetVector([rand(rng, 1) for _ in 0:K], -1) @@ -413,7 +413,7 @@ end rng = StableRNG(SEED) model = GeneralisedFilters.GFTest.create_linear_gaussian_model(rng, Dx, Dy) - _, _, ys = sample(rng, model, K) + _, ys = sample(rng, model, K) # Kalman smoother state, ks_ll = GeneralisedFilters.smooth( @@ -477,7 +477,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs, T; static_arrays=true ) - _, _, ys = sample(rng, full_model, K) + _, ys = sample(rng, full_model, K) # Kalman smoother state, _ = GeneralisedFilters.smooth( @@ -562,7 +562,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs, T ) - _, _, ys = sample(rng, full_model, K) + _, ys = sample(rng, full_model, K) # Generate random reference trajectory ref_trajectory = [CuArray(rand(rng, T, D_outer, 1)) for _ in 0:K] @@ -595,7 +595,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs, T ) - _, _, ys = sample(rng, full_model, K) + _, ys = sample(rng, full_model, K) # Manually create tree to force expansion on second step M = N_particles * 2 - 1 @@ -642,7 +642,7 @@ end full_model, hier_model = GeneralisedFilters.GFTest.create_dummy_linear_gaussian_model( rng, D_outer, D_inner, D_obs, T ) - _, _, ys = sample(rng, full_model, K) + _, ys = sample(rng, full_model, K) # Kalman smoother state, _ = GeneralisedFilters.smooth( diff --git a/SSMProblems/Project.toml b/SSMProblems/Project.toml index e445d32..9919615 100644 --- a/SSMProblems/Project.toml +++ b/SSMProblems/Project.toml @@ -1,14 +1,12 @@ name = "SSMProblems" uuid = "26aad666-b158-4e64-9d35-0e672562fa48" -authors = [ - "FredericWantiez ", - "THargreaves " -] -version = "0.5.2" +authors = ["FredericWantiez ", "THargreaves "] +version = "0.6.0" [deps] AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +OffsetArrays = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [extras] diff --git a/SSMProblems/examples/kalman-filter/script.jl b/SSMProblems/examples/kalman-filter/script.jl index 4cf7114..91c8535 100644 --- a/SSMProblems/examples/kalman-filter/script.jl +++ b/SSMProblems/examples/kalman-filter/script.jl @@ -157,7 +157,7 @@ model = StateSpaceModel(dyn, obs); # functions we defined above. rng = MersenneTwister(SEED); -x0, xs, ys = sample(rng, model, T); +xs, ys = sample(rng, model, T); # We can then run the Kalman filter and plot the filtering results against the ground truth. @@ -165,8 +165,8 @@ x_filts, P_filts = AbstractMCMC.sample(model, KalmanFilter(), ys); # Plot trajectory for first dimension p = plot(; title="First Dimension Kalman Filter Estimates", xlabel="Step", ylabel="Value") -plot!(p, 1:T, first.(xs); label="Truth") -scatter!(p, 1:T, first.(ys); label="Observations") +plot!(p, first.(xs); label="Truth") +scatter!(p, first.(ys); label="Observations") plot!( p, 1:T, diff --git a/SSMProblems/src/utils/forward_simulation.jl b/SSMProblems/src/utils/forward_simulation.jl index cdafc10..d6dde17 100644 --- a/SSMProblems/src/utils/forward_simulation.jl +++ b/SSMProblems/src/utils/forward_simulation.jl @@ -1,6 +1,9 @@ """Forward simulation of state space models.""" +using OffsetArrays: OffsetVector + import AbstractMCMC: sample + export sample """ @@ -8,8 +11,9 @@ export sample Simulate a trajectory of length `T` from the state space model. -Returns a tuple `(x0, xs, ys)` where `x0` is the initial state, `xs` is a vector of latent states, -and `ys` is a vector of observations. +Returns a tuple `(xs, ys)` where `xs` is an (offset) vector of latent states, and `ys` is a +vector of observations. The latent states are indexed from `0` to `T`, where `xs[0]` is the +initial state. """ function sample( rng::AbstractRNG, model::StateSpaceModel{<:Real,LD,OP}, T::Integer; kwargs... @@ -17,16 +21,16 @@ function sample( T_dyn = eltype(LD) T_obs = eltype(OP) - xs = Vector{T_dyn}(undef, T) + xs = OffsetVector(Vector{T_dyn}(undef, T + 1), -1) ys = Vector{T_obs}(undef, T) - x0 = simulate(rng, model.dyn; kwargs...) + xs[0] = simulate(rng, model.dyn; kwargs...) for t in 1:T - xs[t] = simulate(rng, model.dyn, t, t == 1 ? x0 : xs[t - 1]; kwargs...) + xs[t] = simulate(rng, model.dyn, t, xs[t - 1]; kwargs...) ys[t] = simulate(rng, model.obs, t, xs[t]; kwargs...) end - return x0, xs, ys + return xs, ys end """ @@ -36,4 +40,4 @@ Simulate a trajectory using the default random number generator. """ function sample(model::AbstractStateSpaceModel, T::Integer; kwargs...) return sample(default_rng(), model, T; kwargs...) -end \ No newline at end of file +end diff --git a/research/maximum_likelihood/script.jl b/research/maximum_likelihood/script.jl index 41404a6..a76f420 100644 --- a/research/maximum_likelihood/script.jl +++ b/research/maximum_likelihood/script.jl @@ -25,7 +25,7 @@ end # data generation process rng = MersenneTwister(1234) true_model = toy_model(1.0) -_, _, ys = sample(rng, true_model, 1000) +_, ys = sample(rng, true_model, 1000) # evaluate and return the log evidence function logℓ(θ, data) diff --git a/research/variational_filter/script.jl b/research/variational_filter/script.jl index b53c134..93ec50f 100644 --- a/research/variational_filter/script.jl +++ b/research/variational_filter/script.jl @@ -37,7 +37,7 @@ end rng = MersenneTwister(1234) true_model = toy_model(Float32, 10, 10) -_, _, ys = sample(rng, true_model, 100) +_, ys = sample(rng, true_model, 100) # ## Deep Gaussian Proposal