From d3c02bd6bc5bed59073acdeaa8a79b3aaabcc832 Mon Sep 17 00:00:00 2001 From: Ray Kim Date: Tue, 4 Jun 2024 00:23:29 +0100 Subject: [PATCH 1/2] fix rename `adbackend` to `adtype` --- README.md | 4 ++-- docs/src/elbo/repgradelbo.md | 6 +++--- docs/src/examples.md | 2 +- src/AdvancedVI.jl | 4 ++-- src/objectives/elbo/repgradelbo.jl | 10 +++++----- src/optimize.jl | 6 +++--- test/inference/repgradelbo_distributionsad.jl | 8 ++++---- test/inference/repgradelbo_locationscale.jl | 8 ++++---- test/inference/repgradelbo_locationscale_bijectors.jl | 8 ++++---- 9 files changed, 28 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 591ce729..45cc197c 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ function LogDensityProblems.capabilities(::Type{<:NormalLogNormal}) end ``` -Since the support of `x` is constrained to be positive, and VI is best done in the unconstrained Euclidean space, we need to use a *bijector* to transform `x` into unconstrained Euclidean space. We will use the [`Bijectors.jl`](https://github.com/TuringLang/Bijectors.jl) package for this purpose. +Since the support of `x` is constrained to be positive and VI is best done in the unconstrained Euclidean space, we need to use a *bijector* to transform `x` into unconstrained Euclidean space. We will use the [`Bijectors.jl`](https://github.com/TuringLang/Bijectors.jl) package for this purpose. This corresponds to the automatic differentiation variational inference (ADVI) formulation[^KTRGB2017]. ```julia using Bijectors @@ -99,7 +99,7 @@ q, stats, _ = AdvancedVI.optimize( elbo, q_transformed, max_iter; - adbackend = ADTypes.AutoForwardDiff(), + adtype = ADTypes.AutoForwardDiff(), optimizer = Optimisers.Adam(1e-3) ) diff --git a/docs/src/elbo/repgradelbo.md b/docs/src/elbo/repgradelbo.md index 6c2d103d..24e99169 100644 --- a/docs/src/elbo/repgradelbo.md +++ b/docs/src/elbo/repgradelbo.md @@ -189,7 +189,7 @@ _, stats_cfe, _ = AdvancedVI.optimize( q0_trans, max_iter; show_progress = false, - adbackend = AutoForwardDiff(), + adtype = AutoForwardDiff(), optimizer = Optimisers.Adam(1e-3) ); @@ -199,7 +199,7 @@ _, stats_stl, _ = AdvancedVI.optimize( q0_trans, max_iter; show_progress = false, - adbackend = AutoForwardDiff(), + adtype = AutoForwardDiff(), optimizer = Optimisers.Adam(1e-3) ); @@ -264,7 +264,7 @@ _, stats_qmc, _ = AdvancedVI.optimize( q0_trans, max_iter; show_progress = false, - adbackend = AutoForwardDiff(), + adtype = AutoForwardDiff(), optimizer = Optimisers.Adam(1e-3) ); diff --git a/docs/src/examples.md b/docs/src/examples.md index da2b5ae9..dbf1de45 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -102,7 +102,7 @@ q_trans, stats, _ = AdvancedVI.optimize( q0_trans, n_max_iter; show_progress = false, - adbackend = AutoForwardDiff(), + adtype = AutoForwardDiff(), optimizer = Optimisers.Adam(1e-3) ); nothing diff --git a/src/AdvancedVI.jl b/src/AdvancedVI.jl index eeedd714..9fc986d3 100644 --- a/src/AdvancedVI.jl +++ b/src/AdvancedVI.jl @@ -93,14 +93,14 @@ export estimate_objective """ - estimate_gradient!(rng, obj, adbackend, out, prob, λ, restructure, obj_state) + estimate_gradient!(rng, obj, adtype, out, prob, λ, restructure, obj_state) Estimate (possibly stochastic) gradients of the variational objective `obj` targeting `prob` with respect to the variational parameters `λ` # Arguments - `rng::Random.AbstractRNG`: Random number generator. - `obj::AbstractVariationalObjective`: Variational objective. -- `adbackend::ADTypes.AbstractADType`: Automatic differentiation backend. +- `adtype::ADTypes.AbstractADType`: Automatic differentiation backend. - `out::DiffResults.MutableDiffResult`: Buffer containing the objective value and gradient estimates. - `prob`: The target log-joint likelihood implementing the `LogDensityProblem` interface. - `λ`: Variational parameters to evaluate the gradient on. diff --git a/src/objectives/elbo/repgradelbo.jl b/src/objectives/elbo/repgradelbo.jl index 13eacf18..2d95d076 100644 --- a/src/objectives/elbo/repgradelbo.jl +++ b/src/objectives/elbo/repgradelbo.jl @@ -95,10 +95,10 @@ estimate_objective(obj::RepGradELBO, q, prob; n_samples::Int = obj.n_samples) = estimate_objective(Random.default_rng(), obj, q, prob; n_samples) function estimate_gradient!( - rng ::Random.AbstractRNG, - obj ::RepGradELBO, - adbackend::ADTypes.AbstractADType, - out ::DiffResults.MutableDiffResult, + rng ::Random.AbstractRNG, + obj ::RepGradELBO, + adtype::ADTypes.AbstractADType, + out ::DiffResults.MutableDiffResult, prob, λ, restructure, @@ -112,7 +112,7 @@ function estimate_gradient!( elbo = energy + entropy -elbo end - value_and_gradient!(adbackend, f, λ, out) + value_and_gradient!(adtype, f, λ, out) nelbo = DiffResults.value(out) stat = (elbo=-nelbo,) diff --git a/src/optimize.jl b/src/optimize.jl index a1ded51d..8f49d801 100644 --- a/src/optimize.jl +++ b/src/optimize.jl @@ -16,7 +16,7 @@ The variational approximation can be constructed by passing the variational para - `objargs...`: Arguments to be passed to `objective`. # Keyword Arguments -- `adbackend::ADtypes.AbstractADType`: Automatic differentiation backend. +- `adtype::ADtypes.AbstractADType`: Automatic differentiation backend. - `optimizer::Optimisers.AbstractRule`: Optimizer used for inference. (Default: `Adam`.) - `rng::AbstractRNG`: Random number generator. (Default: `Random.default_rng()`.) - `show_progress::Bool`: Whether to show the progress bar. (Default: `true`.) @@ -54,7 +54,7 @@ function optimize( params_init, max_iter ::Int, objargs...; - adbackend ::ADTypes.AbstractADType, + adtype ::ADTypes.AbstractADType, optimizer ::Optimisers.AbstractRule = Optimisers.Adam(), show_progress::Bool = true, state_init ::NamedTuple = NamedTuple(), @@ -77,7 +77,7 @@ function optimize( stat = (iteration=t,) grad_buf, obj_st, stat′ = estimate_gradient!( - rng, objective, adbackend, grad_buf, problem, + rng, objective, adtype, grad_buf, problem, λ, restructure, obj_st, objargs... ) stat = merge(stat, stat′) diff --git a/test/inference/repgradelbo_distributionsad.jl b/test/inference/repgradelbo_distributionsad.jl index 53105a4a..f4b94235 100644 --- a/test/inference/repgradelbo_distributionsad.jl +++ b/test/inference/repgradelbo_distributionsad.jl @@ -14,7 +14,7 @@ using Test :RepGradELBOClosedFormEntropy => RepGradELBO(n_montecarlo), :RepGradELBOStickingTheLanding => RepGradELBO(n_montecarlo, entropy = StickingTheLandingEntropy()), ), - (adbackname, adbackend) ∈ Dict( + (adbackname, adtype) ∈ Dict( :ForwarDiff => AutoForwardDiff(), #:ReverseDiff => AutoReverseDiff(), :Zygote => AutoZygote(), @@ -39,7 +39,7 @@ using Test rng, model, objective, q0, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ = mean(q) @@ -57,7 +57,7 @@ using Test rng, model, objective, q0, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ = mean(q) L = sqrt(cov(q)) @@ -67,7 +67,7 @@ using Test rng_repl, model, objective, q0, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ_repl = mean(q) L_repl = sqrt(cov(q)) diff --git a/test/inference/repgradelbo_locationscale.jl b/test/inference/repgradelbo_locationscale.jl index 1a200474..6dd83990 100644 --- a/test/inference/repgradelbo_locationscale.jl +++ b/test/inference/repgradelbo_locationscale.jl @@ -15,7 +15,7 @@ using Test :RepGradELBOClosedFormEntropy => RepGradELBO(n_montecarlo), :RepGradELBOStickingTheLanding => RepGradELBO(n_montecarlo, entropy = StickingTheLandingEntropy()), ), - (adbackname, adbackend) in Dict( + (adbackname, adtype) in Dict( :ForwarDiff => AutoForwardDiff(), :ReverseDiff => AutoReverseDiff(), :Zygote => AutoZygote(), @@ -43,7 +43,7 @@ using Test rng, model, objective, q0, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ = q.location @@ -61,7 +61,7 @@ using Test rng, model, objective, q0, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ = q.location L = q.scale @@ -71,7 +71,7 @@ using Test rng_repl, model, objective, q0, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ_repl = q.location L_repl = q.scale diff --git a/test/inference/repgradelbo_locationscale_bijectors.jl b/test/inference/repgradelbo_locationscale_bijectors.jl index 41a4d740..036f1b82 100644 --- a/test/inference/repgradelbo_locationscale_bijectors.jl +++ b/test/inference/repgradelbo_locationscale_bijectors.jl @@ -14,7 +14,7 @@ using Test :RepGradELBOClosedFormEntropy => RepGradELBO(n_montecarlo), :RepGradELBOStickingTheLanding => RepGradELBO(n_montecarlo, entropy = StickingTheLandingEntropy()), ), - (adbackname, adbackend) in Dict( + (adbackname, adtype) in Dict( :ForwarDiff => AutoForwardDiff(), :ReverseDiff => AutoReverseDiff(), #:Zygote => AutoZygote(), @@ -48,7 +48,7 @@ using Test rng, model, objective, q0_z, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ = q.dist.location @@ -66,7 +66,7 @@ using Test rng, model, objective, q0_z, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ = q.dist.location L = q.dist.scale @@ -76,7 +76,7 @@ using Test rng_repl, model, objective, q0_z, T; optimizer = Optimisers.Adam(realtype(η)), show_progress = PROGRESS, - adbackend = adbackend, + adtype = adtype, ) μ_repl = q.dist.location L_repl = q.dist.scale From 06814458a668324ad77bc489b420d1c423fc9ca8 Mon Sep 17 00:00:00 2001 From: Ray Kim Date: Tue, 4 Jun 2024 00:30:12 +0100 Subject: [PATCH 2/2] fix interface tests rename `adbackend` to `adtype` --- test/interface/optimize.jl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/interface/optimize.jl b/test/interface/optimize.jl index 6e69616b..9666893b 100644 --- a/test/interface/optimize.jl +++ b/test/interface/optimize.jl @@ -14,7 +14,7 @@ using Test q0 = TuringDiagMvNormal(zeros(Float64, n_dims), ones(Float64, n_dims)) obj = RepGradELBO(10) - adbackend = AutoForwardDiff() + adtype = AutoForwardDiff() optimizer = Optimisers.Adam(1e-2) rng = StableRNG(seed) @@ -22,7 +22,7 @@ using Test rng, model, obj, q0, T; optimizer, show_progress = false, - adbackend, + adtype, ) λ_ref, _ = Optimisers.destructure(q_ref) @@ -31,7 +31,7 @@ using Test model, obj, q0, T; optimizer, show_progress = false, - adbackend, + adtype, ) λ₀, re = Optimisers.destructure(q0) @@ -39,7 +39,7 @@ using Test model, obj, re, λ₀, T; optimizer, show_progress = false, - adbackend, + adtype, ) end @@ -51,7 +51,7 @@ using Test rng, model, obj, re, λ₀, T; optimizer, show_progress = false, - adbackend, + adtype, ) @test λ == λ_ref @test stats == stats_ref @@ -67,7 +67,7 @@ using Test _, stats, _ = optimize( rng, model, obj, q0, T; show_progress = false, - adbackend, + adtype, callback ) @test [stat.test_value for stat ∈ stats] == test_values @@ -83,7 +83,7 @@ using Test rng, model, obj, q0, T_first; optimizer, show_progress = false, - adbackend + adtype ) q, stats, _ = optimize( @@ -91,7 +91,7 @@ using Test optimizer, show_progress = false, state_init = state, - adbackend + adtype ) @test q == q_ref end