Skip to content

Commit

Permalink
Merge branch 'master' of github.com:biaslab/ReactiveMP.jl
Browse files Browse the repository at this point in the history
  • Loading branch information
bvdmitri committed Dec 3, 2021
2 parents 05e5747 + 637dc80 commit 07391a6
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 178 deletions.
3 changes: 1 addition & 2 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@ makedocs(
"Overview" => "lib/node.md",
"Flow" => "lib/nodes/flow.md"
],
"Math utils" => "lib/math.md",
"Helper utils" => "lib/helpers.md"
"Math utils" => "lib/math.md"
],
"Contributing" => "extra/contributing.md",
],
Expand Down
21 changes: 0 additions & 21 deletions docs/src/lib/helpers.md

This file was deleted.

23 changes: 12 additions & 11 deletions src/distributions/sample_list.jl
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ sample_list_show(io::IO, ::Type{Multivariate}, sl::SampleList) = print(io, "Samp
sample_list_show(io::IO, ::Type{Matrixvariate}, sl::SampleList) = print(io, "SampleList(Matrixvariate", ndims(sl), ", ", length(sl), ")")

function SampleList(samples::S) where { S <: AbstractVector }
return SampleList(samples, OneDivNVector(deep_eltype(S), length(samples)))
N = length(samples)
return SampleList(samples, fill(one(deep_eltype(S)) / N, N))
end

function SampleList(samples::S, weights::W, meta::M = nothing) where { S, W, M }
Expand Down Expand Up @@ -152,8 +153,8 @@ function sample_list_zero_element(sl::SampleList)
return sample_list_zero_element(variate_form(sl), T, sl)
end

sample_list_zero_element(::Type{ Univariate }, ::Type{T}, sl::SampleList) where T = zero(T)
sample_list_zero_element(::Type{ Multivariate }, ::Type{T}, sl::SampleList) where T = zeros(T, ndims(sl))
sample_list_zero_element(::Type{ Univariate }, ::Type{T}, sl::SampleList) where T = zero(T)
sample_list_zero_element(::Type{ Multivariate }, ::Type{T}, sl::SampleList) where T = zeros(T, ndims(sl))
sample_list_zero_element(::Type{ Matrixvariate }, ::Type{T}, sl::SampleList) where T = zeros(T, ndims(sl))

# Generic mean_cov
Expand Down Expand Up @@ -317,7 +318,7 @@ function approximate_prod_with_sample_list(rng::AbstractRNG, ::BootstrapImportan

# Resulting samples and weights will go here
rcontainer = similar(y)
# vec here just to convert `OneDivNVector` into an array just in case
# vec here just to convert unusual containers into an array just in case
# does nothing if `get_weights` returns an array
rsamples, rweights = get_samples(rcontainer), vec(get_weights(rcontainer))

Expand All @@ -327,7 +328,7 @@ function approximate_prod_with_sample_list(rng::AbstractRNG, ::BootstrapImportan
H_x = zero(eltype(rweights_raw))

# Compute sample weights
@turbo for i in 1:nsamples
@inbounds for i in 1:nsamples
log_sample_x = logpdf(xlogpdf, samples[i]) # evaluate samples in logm4, i.e. logm4(s)
raw_weight = exp(log_sample_x) # m4(s)
raw_weight_prod = raw_weight * weights[i] # update the weights of posterior w_unnormalized = m4(s)*w_prev
Expand Down Expand Up @@ -374,7 +375,7 @@ function approximate_prod_with_sample_list(rng::AbstractRNG, ::BootstrapImportan
y_unnormalised_weights = get_unnormalised_weights(y)
r_unnormalised_weights = similar(y_unnormalised_weights)
r_unnormalised_weights_sum = zero(eltype(r_unnormalised_weights))
@turbo for i in 1:nsamples
@inbounds for i in 1:nsamples
r_unnormalised_weights_prod = y_unnormalised_weights[i] * rweights_raw[i]
r_unnormalised_weights[i] = r_unnormalised_weights_prod
r_unnormalised_weights_sum += r_unnormalised_weights_prod
Expand Down Expand Up @@ -543,7 +544,7 @@ function sample_list_vague(::Type{ Univariate }, nsamples::Int)
targetdist = vague(Uniform)
preallocated = preallocate_samples(Float64, (), nsamples)
rand!(targetdist, preallocated)
return SampleList(Val(()), preallocated, OneDivNVector(Float64, nsamples), nothing)
return SampleList(Val(()), preallocated, fill(one(Float64) / nsamples, nsamples), nothing)
end

## Multivariate
Expand All @@ -562,7 +563,7 @@ function sample_list_covm!(Σ, μ, ::Type{ Multivariate }, sl::SampleList)
tmp = similar(μ)
k = length(tmp)

@turbo for i in 1:n
@inbounds for i in 1:n
for j in 1:k
tmp[j] = samples[(i - 1) * k + j] - μ[j]
end
Expand Down Expand Up @@ -608,7 +609,7 @@ function sample_list_vague(::Type{ Multivariate }, dims::Int, nsamples::Int)
targetdist = vague(Uniform)
preallocated = preallocate_samples(Float64, (dims, ), nsamples)
rand!(targetdist, preallocated)
return SampleList(Val((dims, )), preallocated, OneDivNVector(Float64, nsamples), nothing)
return SampleList(Val((dims, )), preallocated, fill(one(Float64) / nsamples, nsamples), nothing)
end

## Matrixvariate
Expand All @@ -627,7 +628,7 @@ function sample_list_covm!(Σ, μ, ::Type{ Matrixvariate }, sl::SampleList)
k = length(μ)
= reshape(μ, k)
tmp = similar(rμ)
@turbo for i in 1:n
@inbounds for i in 1:n
for j in 1:k
tmp[j] = samples[(i - 1) * k + j] - μ[j]
end
Expand Down Expand Up @@ -673,7 +674,7 @@ function sample_list_vague(::Type{ Matrixvariate }, dims::Tuple{Int, Int}, nsamp
targetdist = vague(Uniform)
preallocated = preallocate_samples(Float64, dims, nsamples)
rand!(targetdist, preallocated)
return SampleList(Val(dims), preallocated, OneDivNVector(Float64, nsamples), nothing)
return SampleList(Val(dims), preallocated, fill(one(Float64) / nsamples, nsamples), nothing)
end

## Array operations, broadcasting and mapping
Expand Down
86 changes: 0 additions & 86 deletions src/helpers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,96 +37,10 @@ Base.getindex(iter::SkipIndexIterator, i::CartesianIndex{1}) = Base.getindex(ite

Rocket.similar_typeof(::SkipIndexIterator, ::Type{L}) where L = Vector{L}

cast_to_message_subscribable(some::T) where T = cast_to_message_subscribable(as_subscribable(T), some)

cast_to_message_subscribable(::InvalidSubscribableTrait, some) = of(as_message(some))
cast_to_message_subscribable(::SimpleSubscribableTrait, some) = some |> map(Message, as_message)
cast_to_message_subscribable(::ScheduledSubscribableTrait, some) = some |> map(Message, as_message)

reduce_with_sum(array) = reduce(+, array)

##

"""
OneDivNVector(N::Int)
OneDivNVector(::Type{T}, N::Int) where T
Allocation-free version of `fill(one(T) / N, N)` vector.
# Arguments
- `::Type{T}`: type of elements, optional, Float64 by default, should be a subtype of `Number`
- `N::Int`: number of elements in a container, should be greater than zero
# Examples
```jldoctest
julia> iter = ReactiveMP.OneDivNVector(3)
3-element ReactiveMP.OneDivNVector{3, Float64}:
0.3333333333333333
0.3333333333333333
0.3333333333333333
julia> length(iter)
3
julia> eltype(iter)
Float64
julia> collect(iter)
3-element Vector{Float64}:
0.3333333333333333
0.3333333333333333
0.3333333333333333
julia> iter = ReactiveMP.OneDivNVector(Float32, 3)
3-element ReactiveMP.OneDivNVector{3, Float32}:
0.33333334
0.33333334
0.33333334
julia> collect(iter)
3-element Vector{Float32}:
0.33333334
0.33333334
0.33333334
```
See also: [`SampleList`](@ref)
"""
struct OneDivNVector{N, T} <: AbstractVector{T} end

Base.show(io::IO, ::OneDivNVector{N, T}) where { N, T } = print(io, "OneDivNVector($T, $N)")

function OneDivNVector(N::Int)
return OneDivNVector(Float64, N)
end

function OneDivNVector(::Type{T}, N::Int) where T
@assert N > 0 "OneDivNVector creation error: N should be greater than zero"
@assert T <: Number "OneDivNVector creation error: T should be a subtype of `Number`"
return OneDivNVector{N, T}()
end

Base.IteratorSize(::Type{ <: OneDivNVector }) = Base.HasLength()
Base.IteratorEltype(::Type{ <: OneDivNVector }) = Base.HasEltype()

Base.sum(::OneDivNVector{ N, T }) where { N, T } = one(T)
Base.eltype(::Type{ <: OneDivNVector{N, T} }) where { N, T } = T
Base.length(::OneDivNVector{N}) where N = N
Base.size(::OneDivNVector{N}) where N = (N, )

Base.iterate(::OneDivNVector{N, T}) where { N, T } = (one(T) / N, 1)
Base.iterate(::OneDivNVector{N, T}, state) where { N, T } = state >= N ? nothing : (one(T) / N, state + 1)

Base.getindex(v::OneDivNVector{N, T}, index::Int) where { N, T } = 1 <= index <= N ? (one(T) / N) : throw(BoundsError(v, index))

Base.similar(v::OneDivNVector) = v
Base.similar(v::OneDivNVector{N}, ::Type{ T }) where { N, T } = OneDivNVector(T, N)

Base.vec(::OneDivNVector{N, T}) where { N, T } = fill(one(T) / N, N)

##

import Base: +, -, *, /, convert, float, isfinite, isinf, zero, eltype

struct InfCountingReal{ T <: Real }
Expand Down
34 changes: 17 additions & 17 deletions test/distributions/test_sample_list.jl
Original file line number Diff line number Diff line change
Expand Up @@ -172,18 +172,18 @@ import ReactiveMP: approximate_prod_with_sample_list
uni_samples2 = rand(rng, uni_distribution2, 20_000)
uni_sample_list2 = SampleList(uni_samples2)

m = rand(rng, 3)
r = rand(rng, 3)
Σ = diageye(3) + 2r*r' # positive definite matrix
mv_distribution = MvNormal(r, Σ)
Σ = I + 2r*r'
mv_distribution = MvNormal(m, Σ)
mv_samples = [ rand(rng, mv_distribution) for _ in 1:20_000 ]
mv_sample_list = SampleList(mv_samples)

r1 = rand(rng, 3)
W1 = rand(3, 4)
W1 = rand(rng, 3, 4)
r2 = rand(rng, 3)
W2 = diageye(3) + 2r2*r2' # positive definite matrix
W2 = I + 2r2*r2'
r3 = rand(rng, 4)
W3 = diageye(4) + 2r3*r3' # positive definite matrix
W3 = I + 2r3*r3'
mxv_distribution = MatrixNormal(W1, W2, W3)
mxv_samples = [ rand(rng, mxv_distribution) for _ in 1:20_000 ]
mxv_sample_list = SampleList(mxv_samples)
Expand Down Expand Up @@ -231,7 +231,7 @@ import ReactiveMP: approximate_prod_with_sample_list
@test isapprox(meanlogmean(uni_sample_list), meanlogmean(uni_distribution); atol = 0.25)

r4 = rand(rng, 5)
W4 = diageye(5) + 2r4*r4' # positive definite matrix
W4 = I + 2r4*r4'

mxv_distribution = Wishart(5, W4)
mxv_samples = [ rand(rng, mxv_distribution) for _ in 1:20_000 ]
Expand All @@ -240,8 +240,8 @@ import ReactiveMP: approximate_prod_with_sample_list
# Checking i = 1:2 that cache is not corrupted
for i in 1:2
@test isapprox(mean(mxv_sample_list), mean(mxv_distribution), atol = 1.0)
@test isapprox(var(mxv_sample_list), var(mxv_distribution), atol = 3.0)
@test isapprox(cov(mxv_sample_list), cov(mxv_distribution), atol = 10.0)
@test isapprox(var(mxv_sample_list), var(mxv_distribution), atol = 1.0)
@test isapprox(cov(mxv_sample_list), cov(mxv_distribution), atol = 5.0)
end
end

Expand Down Expand Up @@ -317,13 +317,13 @@ import ReactiveMP: approximate_prod_with_sample_list

uni_distribution = Uniform(-10rand(rng), 10rand(rng))

μ = rand(rng, 3)
r1 = rand(rng, 3)
Σ = I + 2r1*r1'
μ = rand(rng, 3)
L1 = rand(rng, 3, 3)
Σ = L1' * L1
mv_distribution = MvNormal(μ, Σ)

r2 = rand(rng, 3)
W = I + 2r2*r2'
L2 = rand(rng, 3, 3)
W = L2' * L2
mvx_distribution = Wishart(3, W)

# Entity to entity
Expand Down Expand Up @@ -380,15 +380,15 @@ import ReactiveMP: approximate_prod_with_sample_list

rng = StableRNG(1234)

posdefm(rng, s) = begin r = rand(rng, s); I + 2r*r' end
posdefm(rng, s) = begin L = rand(rng, s, s); L'*L end

sizes = [ 2_500, 5_000, 10_000 ]
inputs = [
(x = NormalMeanPrecision(3.0, 7.0), y = NormalMeanVariance(-4.0, 6.0), mean_tol = [ 1e-1, 1e-1, 1e-1 ], cov_tol = [ 1e-1, 1e-1, 1e-1 ], entropy_tol = [ 1e-1, 1e-1, 1e-1 ]),
(x = NormalMeanVariance(3.0, 7.0), y = NormalWeightedMeanPrecision(4.0, 6.0), mean_tol = [ 1e-1, 1e-1, 1e-1 ], cov_tol = [ 1e-1, 1e-1, 1e-1 ], entropy_tol = [ 1e-1, 1e-1, 1e-1 ]),
(x = GammaShapeRate(3.0, 7.0), y = GammaShapeScale(4.0, 6.0), mean_tol = [ 1e-1, 1e-1, 1e-1 ], cov_tol = [ 1e-1, 1e-1, 1e-1 ], entropy_tol = [ 3e-1, 3e-1, 3e-1 ]),
(x = MvNormalMeanCovariance(10rand(rng, 4), posdefm(rng, 4)), y = MvNormalMeanPrecision(10rand(rng, 4), posdefm(rng, 4)), mean_tol = [ 6e-1, 6e-1, 6e-1 ], cov_tol = [ 6e1, 6e1, 6e1 ], entropy_tol = [ 4e-1, 4e-1, 4e-1 ]),
(x = Wishart(10.0, posdefm(rng, 3)), y = Wishart(5.0, posdefm(rng, 3)), mean_tol = [ 7e-1, 7e-1, 7e-1 ], cov_tol = [ 3e2, 3e2, 3e2 ], entropy_tol = [ 2e-1, 2e-1, 2e-1 ]),
(x = MvNormalMeanCovariance(10rand(rng, 4), posdefm(rng, 4)), y = MvNormalMeanPrecision(10rand(rng, 4), posdefm(rng, 4)), mean_tol = [ 2e-1, 2e-1, 2e-1 ], cov_tol = [ 6e-1, 6e-1, 6e-1 ], entropy_tol = [ 4e-1, 4e-1, 4e-1 ]),
(x = Wishart(10.0, posdefm(rng, 3)), y = Wishart(5.0, posdefm(rng, 3)), mean_tol = [ 7e-1, 7e-1, 7e-1 ], cov_tol = [ 5e-1, 5e-1, 5e-1 ], entropy_tol = [ 2e-1, 2e-1, 2e-1 ]),
]

for (i, N) in enumerate(sizes)
Expand Down
41 changes: 0 additions & 41 deletions test/test_helpers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,47 +7,6 @@ import ReactiveMP: OneDivNVector
import ReactiveMP: deep_eltype

@testset "Helpers" begin

@testset "OneDivNVector" begin

for type in [ Float64, Float32, BigFloat ], len in [ 3, 5, 10 ]
iter = OneDivNVector(type, len)

@test eltype(iter) === type
@test length(iter) === len
@test size(iter) === (len, )
@test collect(iter) == fill(one(type) / len, len)
@test vec(iter) == fill(one(type) / len, len)
@test sizeof(iter) === 0

sim = similar(iter)

@test sim === iter
@test eltype(sim) === type
@test length(sim) === len
@test size(sim) === (len, )
@test collect(sim) == fill(one(type) / len, len)
@test vec(sim) == fill(one(type) / len, len)
@test sizeof(sim) === 0

sim = similar(iter, Float16)
@test eltype(sim) === Float16
@test length(sim) === len
@test size(sim) === (len, )
@test collect(sim) == fill(one(Float16) / len, len)
@test vec(sim) == fill(one(Float16) / len, len)
@test sizeof(sim) === 0
end

@test eltype(OneDivNVector(3)) === Float64
@test eltype(OneDivNVector(5)) === Float64

@test_throws AssertionError OneDivNVector(-2)
@test_throws AssertionError OneDivNVector(-10)
@test_throws AssertionError OneDivNVector(Vector, 2)
@test_throws AssertionError OneDivNVector(Matrix, 10)

end

@testset "deep_eltype" begin

Expand Down

2 comments on commit 07391a6

@bvdmitri
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/49832

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v1.2.1 -m "<description of version>" 07391a67acd46a2ec591045f1971ad01f180ac69
git push origin v1.2.1

Please sign in to comment.