Skip to content

Commit

Permalink
Fixed bug in docs as reported in #329 (#330)
Browse files Browse the repository at this point in the history
* Fixed bug in docs as reported in #329

* Removed usage of removed `stack` in favour of `Stacked` in docs
example as reported in #329
  • Loading branch information
torfjelde authored Sep 23, 2024
1 parent 70fb426 commit 8463fd6
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions docs/src/examples.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ Similarily to the multivariate ADVI example, we could use `Stacked` to get a _bo
```@repl normalizing-flows
d = MvNormal(zeros(2), ones(2));
ibs = inverse.(bijector.((InverseGamma(2, 3), Beta())));
sb = stack(ibs...) # == Stacked(ibs) == Stacked(ibs, [i:i for i = 1:length(ibs)]
sb = Stacked(ibs) # == Stacked(ibs, [i:i for i = 1:length(ibs)]
b = sb ∘ PlanarLayer(2)
td = transformed(d, b);
y = rand(rng, td)
Expand All @@ -128,7 +128,7 @@ struct NLLObjective{R,D,T}
data::T
end
function (obj::NLLObjective)(θs...)
function (obj::NLLObjective)(θs)
transformed_dist = transformed(obj.basedist, obj.reconstruct(θs))
return -sum(Base.Fix1(logpdf, transformed_dist), eachcol(obj.data))
end
Expand All @@ -140,19 +140,19 @@ xs = randn(2, 1000);
f = NLLObjective(reconstruct, MvNormal(2, 1), xs);
# Initial loss.
@info "Initial loss: $(f(θs...))"
@info "Initial loss: $(f(θs))"
# Train using gradient descent.
ε = 1e-3;
for i in 1:100
∇s = Zygote.gradient(f, θs...)
θs = map(θs, ∇s) do θ, ∇
(∇s,) = Zygote.gradient(f, θs)
θs = fmap(θs, ∇s) do θ, ∇
θ - ε .* ∇
end
end
# Final loss
@info "Finall loss: $(f(θs...))"
@info "Final loss: $(f(θs))"
# Very simple check to see if we learned something useful.
samples = rand(transformed(f.basedist, f.reconstruct(θs)), 1000);
Expand Down

0 comments on commit 8463fd6

Please sign in to comment.