diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml index 5e865d93a3..d0e00b45f8 100644 --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -10,7 +10,7 @@ ignore = [ "src/mcmc/abstractmcmc.jl", "test/experimental/gibbs.jl", "test/test_utils/numerical_tests.jl", - # https://github.com/TuringLang/Turing.jl/pull/2218/files + # https://github.com/TuringLang/Turing.jl/pull/2218/files "src/mcmc/Inference.jl", "test/mcmc/Inference.jl", # https://github.com/TuringLang/Turing.jl/pull/1887 # Enzyme PR diff --git a/.github/workflows/Tests.yml b/.github/workflows/Tests.yml index 4f8ac4dcc5..8de296e5ee 100644 --- a/.github/workflows/Tests.yml +++ b/.github/workflows/Tests.yml @@ -67,21 +67,43 @@ jobs: echo "Julia version: ${{ matrix.version }}" echo "Number of threads: ${{ matrix.num_threads }}" echo "Test arguments: ${{ matrix.test-args }}" + - name: (De)activate coverage analysis + run: echo "COVERAGE=${{ matrix.version == '1' && matrix.os == 'ubuntu-latest' && matrix.num_threads == 2 }}" >> "$GITHUB_ENV" + shell: bash - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v2 with: version: '${{ matrix.version }}' arch: ${{ matrix.arch }} - - uses: actions/cache@v4 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test- - ${{ runner.os }}- - - uses: julia-actions/julia-buildpkg@latest + - uses: julia-actions/cache@v1 + - uses: julia-actions/julia-buildpkg@v1 + # TODO: Use julia-actions/julia-runtest when test_args are supported + # Custom calls of Pkg.test tend to miss features such as e.g. adjustments for CompatHelper PRs + # Ref https://github.com/julia-actions/julia-runtest/pull/73 - name: Call Pkg.test - run: julia --color=yes --depwarn=yes --check-bounds=yes --threads=${{ matrix.num_threads }} --project=@. -e 'import Pkg; Pkg.test(; test_args=ARGS)' -- ${{ matrix.test-args }} + run: julia --color=yes --inline=yes --depwarn=yes --check-bounds=yes --threads=${{ matrix.num_threads }} --project=@. -e 'import Pkg; Pkg.test(; coverage=parse(Bool, ENV["COVERAGE"]), test_args=ARGS)' -- ${{ matrix.test-args }} + - uses: julia-actions/julia-processcoverage@v1 + if: ${{ env.COVERAGE }} + - uses: codecov/codecov-action@v4 + if: ${{ env.COVERAGE }} + with: + fail_ci_if_error: true + token: ${{ secrets.CODECOV_TOKEN }} + file: lcov.info + - uses: coverallsapp/github-action@v2 + if: ${{ env.COVERAGE }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + path-to-lcov: lcov.info + flag-name: run-${{ join(matrix.*, '-') }} + parallel: true + + finish: + needs: test + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Coveralls Finished + uses: coverallsapp/github-action@v2 + with: + parallel-finished: true diff --git a/README.md b/README.md index 4ea12d52e9..d4096aa5c8 100644 --- a/README.md +++ b/README.md @@ -15,13 +15,6 @@ https://turinglang.org/docs/ See [releases](https://github.com/TuringLang/Turing.jl/releases). -## Want to contribute? - -Turing was originally created and is now managed by Hong Ge. Current and past Turing team members include [Hong Ge](http://mlg.eng.cam.ac.uk/hong/), [Kai Xu](http://mlg.eng.cam.ac.uk/?portfolio=kai-xu), [Martin Trapp](http://martint.blog), [Mohamed Tarek](https://github.com/mohamed82008), [Cameron Pfiffer](https://business.uoregon.edu/faculty/cameron-pfiffer), [Tor Fjelde](http://retiredparkingguard.com/about.html). -You can see the complete list on Github: https://github.com/TuringLang/Turing.jl/graphs/contributors. - -Turing is an open source project so if you feel you have some relevant skills and are interested in contributing, please get in touch. See the [Contributing](https://turinglang.org/dev/docs/contributing/guide) page for details on the process. You can contribute by opening issues on Github, implementing things yourself, and making a pull request. We would also appreciate example models written using Turing. - ## Issues and Discussions Issues related to bugs and feature requests are welcome on the [issues page](https://github.com/TuringLang/Turing.jl/issues), while discussions and questions about statistical applications and theory should place on the [Discussions page](https://github.com/TuringLang/Turing.jl/discussions) or [our channel](https://julialang.slack.com/messages/turing/) (`#turing`) in the Julia Slack chat. If you do not have an invitation to Julia's Slack, you can get one by going [here](https://julialang.org/slack/). diff --git a/src/mcmc/Inference.jl b/src/mcmc/Inference.jl index 2d8f7aaed2..5f05121e74 100644 --- a/src/mcmc/Inference.jl +++ b/src/mcmc/Inference.jl @@ -240,6 +240,15 @@ DynamicPPL.getlogp(t::Transition) = t.lp # Metadata of VarInfo object metadata(vi::AbstractVarInfo) = (lp = getlogp(vi),) +# TODO: Implement additional checks for certain samplers, e.g. +# HMC not supporting discrete parameters. +function _check_model(model::DynamicPPL.Model) + return DynamicPPL.check_model(model; error_on_failure=true) +end +function _check_model(model::DynamicPPL.Model, alg::InferenceAlgorithm) + return _check_model(model) +end + ######################################### # Default definitions for the interface # ######################################### @@ -258,8 +267,10 @@ function AbstractMCMC.sample( model::AbstractModel, alg::InferenceAlgorithm, N::Integer; + check_model::Bool=true, kwargs... ) + check_model && _check_model(model, alg) return AbstractMCMC.sample(rng, model, Sampler(alg, model), N; kwargs...) end @@ -282,8 +293,10 @@ function AbstractMCMC.sample( ensemble::AbstractMCMC.AbstractMCMCEnsemble, N::Integer, n_chains::Integer; + check_model::Bool=true, kwargs... ) + check_model && _check_model(model, alg) return AbstractMCMC.sample(rng, model, Sampler(alg, model), ensemble, N, n_chains; kwargs...) end diff --git a/test/mcmc/Inference.jl b/test/mcmc/Inference.jl index f1ad4a621d..64a5e95df0 100644 --- a/test/mcmc/Inference.jl +++ b/test/mcmc/Inference.jl @@ -559,6 +559,28 @@ using Turing @test all(xs[:, 1] .=== [1, missing, 3]) @test all(xs[:, 2] .=== [missing, 2, 4]) end + + @testset "check model" begin + @model function demo_repeated_varname() + x ~ Normal(0, 1) + x ~ Normal(x, 1) + end + + @test_throws ErrorException sample( + demo_repeated_varname(), NUTS(), 1000; check_model=true + ) + # Make sure that disabling the check also works. + @test (sample( + demo_repeated_varname(), Prior(), 10; check_model=false + ); true) + + @model function demo_incorrect_missing(y) + y[1:1] ~ MvNormal(zeros(1), 1) + end + @test_throws ErrorException sample( + demo_incorrect_missing([missing]), NUTS(), 1000; check_model=true + ) + end end end diff --git a/test/mcmc/gibbs.jl b/test/mcmc/gibbs.jl index 5159e022b9..f30dc0f777 100644 --- a/test/mcmc/gibbs.jl +++ b/test/mcmc/gibbs.jl @@ -50,7 +50,9 @@ using Turing.RandomMeasures: ChineseRestaurantProcess, DirichletProcess Random.seed!(100) alg = Gibbs(CSMC(15, :s), HMC(0.2, 4, :m; adtype=adbackend)) chain = sample(gdemo(1.5, 2.0), alg, 10_000) - check_numerical(chain, [:s, :m], [49 / 24, 7 / 6]; atol=0.15) + check_numerical(chain, [:m], [7 / 6]; atol=0.15) + # Be more relaxed with the tolerance of the variance. + check_numerical(chain, [:s], [49 / 24]; atol=0.35) Random.seed!(100) diff --git a/test/mcmc/hmc.jl b/test/mcmc/hmc.jl index c802f8d9e7..968f24d7b7 100644 --- a/test/mcmc/hmc.jl +++ b/test/mcmc/hmc.jl @@ -319,7 +319,7 @@ using Turing # The discrepancies in the chains are in the tails, so we can't just compare the mean, etc. # KS will compare the empirical CDFs, which seems like a reasonable thing to do here. - @test pvalue(ApproximateTwoSampleKSTest(vec(results), vec(results_prior))) > 0.01 + @test pvalue(ApproximateTwoSampleKSTest(vec(results), vec(results_prior))) > 0.001 end end diff --git a/test/mcmc/is.jl b/test/mcmc/is.jl index bd3186cd93..47b20cc736 100644 --- a/test/mcmc/is.jl +++ b/test/mcmc/is.jl @@ -46,7 +46,7 @@ using Turing ref = reference(n) Random.seed!(seed) - chain = sample(model, alg, n) + chain = sample(model, alg, n; check_model=false) sampled = get(chain, [:a, :b, :lp]) @test vec(sampled.a) == ref.as diff --git a/test/mcmc/mh.jl b/test/mcmc/mh.jl index 0e3cc91f6f..a01d3dc253 100644 --- a/test/mcmc/mh.jl +++ b/test/mcmc/mh.jl @@ -44,21 +44,26 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) # c6 = sample(gdemo_default, s6, N) end @testset "mh inference" begin + # Set the initial parameters, because if we get unlucky with the initial state, + # these chains are too short to converge to reasonable numbers. + discard_initial = 1000 + initial_params = [1.0, 1.0] + Random.seed!(125) alg = MH() - chain = sample(gdemo_default, alg, 10_000) + chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params) check_gdemo(chain; atol=0.1) Random.seed!(125) # MH with Gaussian proposal alg = MH((:s, InverseGamma(2, 3)), (:m, GKernel(1.0))) - chain = sample(gdemo_default, alg, 10_000) + chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params) check_gdemo(chain; atol=0.1) Random.seed!(125) # MH within Gibbs alg = Gibbs(MH(:m), MH(:s)) - chain = sample(gdemo_default, alg, 10_000) + chain = sample(gdemo_default, alg, 10_000; discard_initial, initial_params) check_gdemo(chain; atol=0.1) Random.seed!(125) @@ -66,8 +71,14 @@ GKernel(var) = (x) -> Normal(x, sqrt.(var)) gibbs = Gibbs( CSMC(15, :z1, :z2, :z3, :z4), MH((:mu1, GKernel(1)), (:mu2, GKernel(1))) ) - chain = sample(MoGtest_default, gibbs, 500) - check_MoGtest_default(chain; atol=0.15) + chain = sample( + MoGtest_default, + gibbs, + 500; + discard_initial=100, + initial_params=[1.0, 1.0, 0.0, 0.0, 1.0, 4.0], + ) + check_MoGtest_default(chain; atol=0.2) end # Test MH shape passing.