Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CompatHelper: bump compat for Bijectors to 0.13, (keep existing compat) #2018

Merged
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ AdvancedMH = "0.6.8, 0.7"
AdvancedPS = "0.4"
AdvancedVI = "0.2"
BangBang = "0.3"
Bijectors = "0.12"
Bijectors = "0.13.2"
yebai marked this conversation as resolved.
Show resolved Hide resolved
DataStructures = "0.18"
Distributions = "0.23.3, 0.24, 0.25"
DistributionsAD = "0.6"
Expand Down
10 changes: 6 additions & 4 deletions test/contrib/inference/abstractmcmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ function initialize_mh(model)
end

@testset "External samplers" begin
@testset "AdvancedHMC.jl" begin
@turing_testset "AdvancedHMC.jl" begin
for model in DynamicPPL.TestUtils.DEMO_MODELS
# Need some functionality to initialize the sampler.
# TODO: Remove this once the constructors in the respective packages become "lazy".
Expand All @@ -52,12 +52,13 @@ end
5_000;
nadapts=1_000,
discard_initial=1_000,
rtol=0.2
rtol=0.2,
sampler_name="AdvancedHMC"
)
end
end

@testset "AdvancedMH.jl" begin
@turing_testset "AdvancedMH.jl" begin
for model in DynamicPPL.TestUtils.DEMO_MODELS
# Need some functionality to initialize the sampler.
# TODO: Remove this once the constructors in the respective packages become "lazy".
Expand All @@ -68,7 +69,8 @@ end
10_000;
discard_initial=1_000,
thinning=10,
rtol=0.2
rtol=0.2,
sampler_name="AdvancedMH"
)
end
end
Expand Down
6 changes: 0 additions & 6 deletions test/essential/ad.jl
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,6 @@
@model function dir()
theta ~ Dirichlet(1 ./ fill(4, 4))
end
Turing.setadbackend(:tracker)
sample(dir(), HMC(0.01, 1), 1000);
Turing.setadbackend(:zygote)
sample(dir(), HMC(0.01, 1), 1000)
Turing.setadbackend(:reversediff)
Expand All @@ -99,8 +97,6 @@
@model function wishart()
theta ~ Wishart(4, Matrix{Float64}(I, 4, 4))
end
Turing.setadbackend(:tracker)
sample(wishart(), HMC(0.01, 1), 1000);
Turing.setadbackend(:reversediff)
sample(wishart(), HMC(0.01, 1), 1000);
Turing.setadbackend(:zygote)
Expand All @@ -109,8 +105,6 @@
@model function invwishart()
theta ~ InverseWishart(4, Matrix{Float64}(I, 4, 4))
end
Turing.setadbackend(:tracker)
sample(invwishart(), HMC(0.01, 1), 1000);
Turing.setadbackend(:reversediff)
sample(invwishart(), HMC(0.01, 1), 1000);
Turing.setadbackend(:zygote)
Expand Down
41 changes: 3 additions & 38 deletions test/modes/OptimInterface.jl
Original file line number Diff line number Diff line change
@@ -1,38 +1,3 @@
# TODO: Remove these once the equivalent is present in `DynamicPPL.TestUtils.
function likelihood_optima(::DynamicPPL.TestUtils.UnivariateAssumeDemoModels)
return (s=1/16, m=7/4)
end
function posterior_optima(::DynamicPPL.TestUtils.UnivariateAssumeDemoModels)
# TODO: Figure out exact for `s`.
return (s=0.907407, m=7/6)
end

function likelihood_optima(model::DynamicPPL.TestUtils.MultivariateAssumeDemoModels)
# Get some containers to fill.
vals = Random.rand(model)

# NOTE: These are "as close to zero as we can get".
vals.s[1] = 1e-32
vals.s[2] = 1e-32

vals.m[1] = 1.5
vals.m[2] = 2.0

return vals
end
function posterior_optima(model::DynamicPPL.TestUtils.MultivariateAssumeDemoModels)
# Get some containers to fill.
vals = Random.rand(model)

# TODO: Figure out exact for `s[1]`.
vals.s[1] = 0.890625
vals.s[2] = 1
vals.m[1] = 3/4
vals.m[2] = 1

return vals
end

# Used for testing how well it works with nested contexts.
struct OverrideContext{C,T1,T2} <: DynamicPPL.AbstractContext
context::C
Expand All @@ -57,7 +22,7 @@ function DynamicPPL.tilde_observe(context::OverrideContext, right, left, vi)
return context.loglikelihood_weight, vi
end

@testset "OptimInterface.jl" begin
@numerical_testset "OptimInterface.jl" begin
@testset "MLE" begin
Random.seed!(222)
true_value = [0.0625, 1.75]
Expand Down Expand Up @@ -157,7 +122,7 @@ end
# FIXME: Some models doesn't work for Tracker and ReverseDiff.
if Turing.Essential.ADBACKEND[] === :forwarddiff
@testset "MAP for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS
result_true = posterior_optima(model)
result_true = DynamicPPL.TestUtils.posterior_optima(model)

@testset "$(nameof(typeof(optimizer)))" for optimizer in [LBFGS(), NelderMead()]
result = optimize(model, MAP(), optimizer)
Expand Down Expand Up @@ -188,7 +153,7 @@ end
DynamicPPL.TestUtils.demo_dot_assume_matrix_dot_observe_matrix,
]
@testset "MLE for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS
result_true = likelihood_optima(model)
result_true = DynamicPPL.TestUtils.likelihood_optima(model)

# `NelderMead` seems to struggle with convergence here, so we exclude it.
@testset "$(nameof(typeof(optimizer)))" for optimizer in [LBFGS(),]
Expand Down
Loading