Skip to content

Commit

Permalink
docs: remove callback from solve as the output becomes long
Browse files Browse the repository at this point in the history
  • Loading branch information
sathvikbhagavan committed Mar 13, 2024
1 parent b13f7c0 commit 01e3111
Show file tree
Hide file tree
Showing 18 changed files with 110 additions and 50 deletions.
2 changes: 1 addition & 1 deletion docs/src/examples/3rd.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); callback = callback, maxiters = 2000)
res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2000)
phi = discretization.phi
```

Expand Down
2 changes: 1 addition & 1 deletion docs/src/examples/heterogeneous.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,5 +45,5 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100)
res = Optimization.solve(prob, BFGS(); maxiters = 100)
```
4 changes: 1 addition & 3 deletions docs/src/examples/ks.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ callback = function (p, l)
end
opt = OptimizationOptimJL.BFGS()
res = Optimization.solve(prob, opt; callback = callback, maxiters = 2000)
res = Optimization.solve(prob, opt; maxiters = 2000)
phi = discretization.phi
```

Expand All @@ -93,5 +93,3 @@ p2 = plot(xs, u_real, title = "analytic")
p3 = plot(xs, diff_u, title = "error")
plot(p1, p2, p3)
```

![plotks](https://user-images.githubusercontent.com/12683885/91025889-a6253200-e602-11ea-8f61-8e6e2488e025.png)
15 changes: 12 additions & 3 deletions docs/src/examples/linear_parabolic.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ w(t, 1) = \frac{e^{\lambda_1} cos(\frac{x}{a})-e^{\lambda_2}cos(\frac{x}{a})}{\l

with a physics-informed neural network.

```@example
```@example linear_parabolic
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimisers, OptimizationOptimJL, LineSearches
using Plots
using ModelingToolkit: Interval, infimum, supremum
Expand Down Expand Up @@ -92,7 +92,7 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); callback = callback, maxiters = 10000)
res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); maxiters = 10000)
phi = discretization.phi
Expand All @@ -105,10 +105,19 @@ analytic_sol_func(t, x) = [u_analytic(t, x), w_analytic(t, x)]
u_real = [[analytic_sol_func(t, x)[i] for t in ts for x in xs] for i in 1:2]
u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:2]
diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2]
ps = []
for i in 1:2
p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "u$i, analytic")
p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict")
p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error")
plot(p1, p2, p3)
push!(ps, plot(p1, p2, p3))
end
```

```@example linear_parabolic
ps[1]
```

```@example linear_parabolic
ps[2]
```
17 changes: 13 additions & 4 deletions docs/src/examples/nonlinear_elliptic.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ where k is a root of the algebraic (transcendental) equation f(k) = g(k).

This is done using a derivative neural network approximation.

```@example
```@example nonlinear_elliptic
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots
using Plots
import ModelingToolkit: Interval, infimum, supremum
using ModelingToolkit: Interval, infimum, supremum
@parameters x, y
Dx = Differential(x)
Expand Down Expand Up @@ -103,7 +103,7 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100)
res = Optimization.solve(prob, BFGS(); maxiters = 100)
phi = discretization.phi
Expand All @@ -116,10 +116,19 @@ analytic_sol_func(x, y) = [u_analytic(x, y), w_analytic(x, y)]
u_real = [[analytic_sol_func(x, y)[i] for x in xs for y in ys] for i in 1:2]
u_predict = [[phi[i]([x, y], minimizers_[i])[1] for x in xs for y in ys] for i in 1:2]
diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2]
ps = []
for i in 1:2
p1 = plot(xs, ys, u_real[i], linetype = :contourf, title = "u$i, analytic")
p2 = plot(xs, ys, u_predict[i], linetype = :contourf, title = "predict")
p3 = plot(xs, ys, diff_u[i], linetype = :contourf, title = "error")
plot(p1, p2, p3)
push!(ps, plot(p1, p2, p3))
end
```

```@example nonlinear_elliptic
ps[1]
```

```@example nonlinear_elliptic
ps[2]
```
18 changes: 14 additions & 4 deletions docs/src/examples/nonlinear_hyperbolic.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@ where k is a root of the algebraic (transcendental) equation f(k) = g(k), j0 and

We solve this with Neural:

```@example
```@example nonlinear_hyperbolic
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots, LineSearches
using SpecialFunctions
using Plots
import ModelingToolkit: Interval, infimum, supremum
using ModelingToolkit: Interval, infimum, supremum
@parameters t, x
@variables u(..), w(..)
Expand Down Expand Up @@ -99,7 +99,7 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); callback = callback, maxiters = 200)
res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); maxiters = 200)
phi = discretization.phi
Expand All @@ -112,10 +112,20 @@ analytic_sol_func(t, x) = [u_analytic(t, x), w_analytic(t, x)]
u_real = [[analytic_sol_func(t, x)[i] for t in ts for x in xs] for i in 1:2]
u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:2]
diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2]
ps = []
for i in 1:2
p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "u$i, analytic")
p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict")
p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error")
plot(p1, p2, p3)
push!(ps, plot(p1, p2, p3))
end
```


```@example nonlinear_hyperbolic
ps[1]
```

```@example nonlinear_hyperbolic
ps[2]
```
16 changes: 4 additions & 12 deletions docs/src/examples/wave.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ Further, the solution of this equation with the given boundary conditions is pre

```@example wave
using NeuralPDE, Lux, Optimization, OptimizationOptimJL
import ModelingToolkit: Interval
using ModelingToolkit: Interval
@parameters t, x
@variables u(..)
Expand Down Expand Up @@ -99,7 +99,7 @@ with grid discretization `dx = 0.05` and physics-informed neural networks. Here,
```@example wave2
using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL
using Plots, Printf
import ModelingToolkit: Interval, infimum, supremum
using ModelingToolkit: Interval, infimum, supremum
@parameters t, x
@variables u(..) Dxu(..) Dtu(..) O1(..) O2(..)
Expand Down Expand Up @@ -162,9 +162,9 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 2000)
res = Optimization.solve(prob, BFGS(); maxiters = 2000)
prob = remake(prob, u0 = res.u)
res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 2000)
res = Optimization.solve(prob, BFGS(); maxiters = 2000)
phi = discretization.phi[1]
Expand Down Expand Up @@ -212,11 +212,3 @@ p2 = plot(ts, xs, u_predict, linetype = :contourf, title = "predict");
p3 = plot(ts, xs, diff_u, linetype = :contourf, title = "error");
plot(p1, p2, p3)
```

We can see the results here:

![Damped_wave_sol_adaptive_u](https://user-images.githubusercontent.com/12683885/149665332-d4daf7d0-682e-4933-a2b4-34f403881afb.png)

Plotted as a line, one can see the analytical solution and the prediction here:

![1Dwave_damped_adaptive](https://user-images.githubusercontent.com/12683885/149665327-69d04c01-2240-45ea-981e-a7b9412a3b58.gif)
2 changes: 1 addition & 1 deletion docs/src/tutorials/constraints.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ cb_ = function (p, l)
return false
end
res = Optimization.solve(prob, BFGS(), callback = cb_, maxiters = 600)
res = Optimization.solve(prob, BFGS(), maxiters = 600)
```

And some analysis:
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorials/dae.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ Let's solve a simple DAE system:
using NeuralPDE
using Random
using OrdinaryDiffEq, Statistics
import Lux, OptimizationOptimisers
using Lux, OptimizationOptimisers
example = (du, u, p, t) -> [cos(2pi * t) - du[1], u[2] + cos(2pi * t) - du[2]]
u₀ = [1.0, -1.0]
Expand Down
36 changes: 32 additions & 4 deletions docs/src/tutorials/derivative_neural_network.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); callback = callback, maxiters = 2000)
res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2000)
prob = remake(prob, u0 = res.u)
res = Optimization.solve(prob, LBFGS(linesearch = BackTracking()); callback = callback, maxiters = 200)
res = Optimization.solve(prob, LBFGS(linesearch = BackTracking()); maxiters = 200)
phi = discretization.phi
```
Expand Down Expand Up @@ -142,12 +142,40 @@ end
u_real = [[analytic_sol_func_all(t, x)[i] for t in ts for x in xs] for i in 1:7]
u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:7]
diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:7]
ps = []
titles = ["u1", "u2", "u3", "Dtu1", "Dtu2", "Dxu1", "Dxu2"]
for i in 1:7
p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "$(titles[i]), analytic")
p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict")
p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error")
plot(p1, p2, p3)
push!(ps, plot(p1, p2, p3))
end
```

```@example derivativenn
ps[1]
```

```@example derivativenn
ps[2]
```

```@example derivativenn
ps[3]
```

```@example derivativenn
ps[4]
```

```@example derivativenn
ps[5]
```

```@example derivativenn
ps[6]
```

```@example derivativenn
ps[7]
```
4 changes: 2 additions & 2 deletions docs/src/tutorials/dgm.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,9 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, Adam(0.1); callback = callback, maxiters = 100)
res = Optimization.solve(prob, Adam(0.1); maxiters = 100)
prob = remake(prob, u0 = res.u)
res = Optimization.solve(prob, Adam(0.01); callback = callback, maxiters = 500)
res = Optimization.solve(prob, Adam(0.01); maxiters = 500)
phi = discretization.phi
u_predict= [first(phi([t, x], res.minimizer)) for t in ts, x in xs]
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorials/gpu.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); callback = callback, maxiters = 2500)
res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); maxiters = 2500)
```

We then use the `remake` function to rebuild the PDE problem to start a new optimization at the optimized parameters, and continue with a lower learning rate:
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorials/integro_diff.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ callback = function (p, l)
println("Current loss is: $l")
return false
end
res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100)
res = Optimization.solve(prob, BFGS(); maxiters = 100)
```

Plotting the final solution and analytical solution
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorials/low_level.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ end
f_ = OptimizationFunction(loss_function, Optimization.AutoZygote())
prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params)
res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); callback = callback, maxiters = 3000)
res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); maxiters = 3000)
```

And some analysis:
Expand Down
10 changes: 5 additions & 5 deletions docs/src/tutorials/neural_adapter.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 10000)
res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000)
phi = discretization.phi
inner_ = 8
Expand All @@ -72,7 +72,7 @@ end
strategy = NeuralPDE.QuadratureTraining()
prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy)
res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 10000)
res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000)
phi_ = PhysicsInformedNN(chain2, strategy; init_params = res_.u).phi
Expand Down Expand Up @@ -180,7 +180,7 @@ for i in 1:count_decomp
prob = NeuralPDE.discretize(pde_system_, discretization)
symprob = NeuralPDE.symbolic_discretize(pde_system_, discretization)
res_ = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback)
res_ = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000)
phi = discretization.phi
push!(reses, res_)
push!(phis, phi)
Expand Down Expand Up @@ -244,10 +244,10 @@ end
prob_ = NeuralPDE.neural_adapter(losses, init_params2, pde_system_map,
NeuralPDE.QuadratureTraining())
res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 5000)
res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000)
prob_ = NeuralPDE.neural_adapter(losses, res_.u, pde_system_map,
NeuralPDE.QuadratureTraining())
res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 5000)
res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000)
phi_ = PhysicsInformedNN(chain2, strategy; init_params = res_.u).phi
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorials/param_estim.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ callback = function (p, l)
println("Current loss is: $l")
return false
end
res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); callback = callback, maxiters = 1000)
res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); maxiters = 1000)
p_ = res.u[(end - 2):end] # p_ = [9.93, 28.002, 2.667]
```

Expand Down
5 changes: 3 additions & 2 deletions docs/src/tutorials/pdesystem.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ end
# Optimizer
opt = OptimizationOptimJL.LBFGS(linesearch = BackTracking())
res = solve(prob, opt, callback = callback, maxiters = 1000)
res = solve(prob, opt, maxiters = 1000)
phi = discretization.phi
dx = 0.05
Expand Down Expand Up @@ -145,7 +145,8 @@ callback = function (p, l)
return false
end
res = Optimization.solve(prob, opt, callback = callback, maxiters = 1000)
# We can pass the callback function in the solve. Not doing here as the output would be very long.
res = Optimization.solve(prob, opt, maxiters = 1000)
phi = discretization.phi
```

Expand Down
Loading

0 comments on commit 01e3111

Please sign in to comment.