From 6cb01341bf6599b5aea1af0f1c2f40da46deac8d Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Mar 2024 04:42:48 +0000 Subject: [PATCH] docs: remove callback from solve as the output becomes long --- docs/src/examples/3rd.md | 2 +- docs/src/examples/heterogeneous.md | 2 +- docs/src/examples/ks.md | 4 +-- docs/src/examples/linear_parabolic.md | 15 ++++++-- docs/src/examples/nonlinear_elliptic.md | 17 ++++++--- docs/src/examples/nonlinear_hyperbolic.md | 18 +++++++--- docs/src/examples/wave.md | 16 +++------ docs/src/tutorials/constraints.md | 2 +- docs/src/tutorials/dae.md | 2 +- .../tutorials/derivative_neural_network.md | 36 ++++++++++++++++--- docs/src/tutorials/gpu.md | 2 +- docs/src/tutorials/integro_diff.md | 2 +- docs/src/tutorials/low_level.md | 2 +- docs/src/tutorials/neural_adapter.md | 10 +++--- docs/src/tutorials/param_estim.md | 2 +- docs/src/tutorials/pdesystem.md | 5 +-- docs/src/tutorials/systems.md | 19 ++++++++-- 17 files changed, 108 insertions(+), 48 deletions(-) diff --git a/docs/src/examples/3rd.md b/docs/src/examples/3rd.md index 829cd3f7e2..e64358e177 100644 --- a/docs/src/examples/3rd.md +++ b/docs/src/examples/3rd.md @@ -47,7 +47,7 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); callback = callback, maxiters = 2000) +res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2000) phi = discretization.phi ``` diff --git a/docs/src/examples/heterogeneous.md b/docs/src/examples/heterogeneous.md index ad4b72b2ec..286e085be3 100644 --- a/docs/src/examples/heterogeneous.md +++ b/docs/src/examples/heterogeneous.md @@ -45,5 +45,5 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100) +res = Optimization.solve(prob, BFGS(); maxiters = 100) ``` diff --git a/docs/src/examples/ks.md b/docs/src/examples/ks.md index bb2b1064f5..55f75f825d 100644 --- a/docs/src/examples/ks.md +++ b/docs/src/examples/ks.md @@ -72,7 +72,7 @@ callback = function (p, l) end opt = OptimizationOptimJL.BFGS() -res = Optimization.solve(prob, opt; callback = callback, maxiters = 2000) +res = Optimization.solve(prob, opt; maxiters = 2000) phi = discretization.phi ``` @@ -93,5 +93,3 @@ p2 = plot(xs, u_real, title = "analytic") p3 = plot(xs, diff_u, title = "error") plot(p1, p2, p3) ``` - -![plotks](https://user-images.githubusercontent.com/12683885/91025889-a6253200-e602-11ea-8f61-8e6e2488e025.png) diff --git a/docs/src/examples/linear_parabolic.md b/docs/src/examples/linear_parabolic.md index 2e7936f5c0..60494a5c9a 100644 --- a/docs/src/examples/linear_parabolic.md +++ b/docs/src/examples/linear_parabolic.md @@ -23,7 +23,7 @@ w(t, 1) = \frac{e^{\lambda_1} cos(\frac{x}{a})-e^{\lambda_2}cos(\frac{x}{a})}{\l with a physics-informed neural network. -```@example +```@example linear_parabolic using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimisers, OptimizationOptimJL, LineSearches using Plots using ModelingToolkit: Interval, infimum, supremum @@ -92,7 +92,7 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); callback = callback, maxiters = 10000) +res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); maxiters = 10000) phi = discretization.phi @@ -105,10 +105,19 @@ analytic_sol_func(t, x) = [u_analytic(t, x), w_analytic(t, x)] u_real = [[analytic_sol_func(t, x)[i] for t in ts for x in xs] for i in 1:2] u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:2] diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2] +ps = [] for i in 1:2 p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "u$i, analytic") p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict") p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error") - plot(p1, p2, p3) + push!(ps, plot(p1, p2, p3)) end ``` + +```@example linear_parabolic +ps[1] +``` + +```@example linear_parabolic +ps[2] +``` diff --git a/docs/src/examples/nonlinear_elliptic.md b/docs/src/examples/nonlinear_elliptic.md index 8ac44080cd..155330b2bc 100644 --- a/docs/src/examples/nonlinear_elliptic.md +++ b/docs/src/examples/nonlinear_elliptic.md @@ -26,10 +26,10 @@ where k is a root of the algebraic (transcendental) equation f(k) = g(k). This is done using a derivative neural network approximation. -```@example +```@example nonlinear_elliptic using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots using Plots -import ModelingToolkit: Interval, infimum, supremum +using ModelingToolkit: Interval, infimum, supremum @parameters x, y Dx = Differential(x) @@ -103,7 +103,7 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100) +res = Optimization.solve(prob, BFGS(); maxiters = 100) phi = discretization.phi @@ -116,10 +116,19 @@ analytic_sol_func(x, y) = [u_analytic(x, y), w_analytic(x, y)] u_real = [[analytic_sol_func(x, y)[i] for x in xs for y in ys] for i in 1:2] u_predict = [[phi[i]([x, y], minimizers_[i])[1] for x in xs for y in ys] for i in 1:2] diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2] +ps = [] for i in 1:2 p1 = plot(xs, ys, u_real[i], linetype = :contourf, title = "u$i, analytic") p2 = plot(xs, ys, u_predict[i], linetype = :contourf, title = "predict") p3 = plot(xs, ys, diff_u[i], linetype = :contourf, title = "error") - plot(p1, p2, p3) + push!(ps, plot(p1, p2, p3)) end ``` + +```@example nonlinear_elliptic +ps[1] +``` + +```@example nonlinear_elliptic +ps[2] +``` diff --git a/docs/src/examples/nonlinear_hyperbolic.md b/docs/src/examples/nonlinear_hyperbolic.md index 6666912dd4..60203b1610 100644 --- a/docs/src/examples/nonlinear_hyperbolic.md +++ b/docs/src/examples/nonlinear_hyperbolic.md @@ -32,11 +32,11 @@ where k is a root of the algebraic (transcendental) equation f(k) = g(k), j0 and We solve this with Neural: -```@example +```@example nonlinear_hyperbolic using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots, LineSearches using SpecialFunctions using Plots -import ModelingToolkit: Interval, infimum, supremum +using ModelingToolkit: Interval, infimum, supremum @parameters t, x @variables u(..), w(..) @@ -99,7 +99,7 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); callback = callback, maxiters = 200) +res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); maxiters = 200) phi = discretization.phi @@ -112,10 +112,20 @@ analytic_sol_func(t, x) = [u_analytic(t, x), w_analytic(t, x)] u_real = [[analytic_sol_func(t, x)[i] for t in ts for x in xs] for i in 1:2] u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:2] diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2] +ps = [] for i in 1:2 p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "u$i, analytic") p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict") p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error") - plot(p1, p2, p3) + push!(ps, plot(p1, p2, p3)) end ``` + + +```@example nonlinear_hyperbolic +ps[1] +``` + +```@example nonlinear_hyperbolic +ps[2] +``` diff --git a/docs/src/examples/wave.md b/docs/src/examples/wave.md index d65a7b7225..954b15cc9a 100644 --- a/docs/src/examples/wave.md +++ b/docs/src/examples/wave.md @@ -17,7 +17,7 @@ Further, the solution of this equation with the given boundary conditions is pre ```@example wave using NeuralPDE, Lux, Optimization, OptimizationOptimJL -import ModelingToolkit: Interval +using ModelingToolkit: Interval @parameters t, x @variables u(..) @@ -99,7 +99,7 @@ with grid discretization `dx = 0.05` and physics-informed neural networks. Here, ```@example wave2 using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL using Plots, Printf -import ModelingToolkit: Interval, infimum, supremum +using ModelingToolkit: Interval, infimum, supremum @parameters t, x @variables u(..) Dxu(..) Dtu(..) O1(..) O2(..) @@ -162,9 +162,9 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 2000) +res = Optimization.solve(prob, BFGS(); maxiters = 2000) prob = remake(prob, u0 = res.u) -res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 2000) +res = Optimization.solve(prob, BFGS(); maxiters = 2000) phi = discretization.phi[1] @@ -212,11 +212,3 @@ p2 = plot(ts, xs, u_predict, linetype = :contourf, title = "predict"); p3 = plot(ts, xs, diff_u, linetype = :contourf, title = "error"); plot(p1, p2, p3) ``` - -We can see the results here: - -![Damped_wave_sol_adaptive_u](https://user-images.githubusercontent.com/12683885/149665332-d4daf7d0-682e-4933-a2b4-34f403881afb.png) - -Plotted as a line, one can see the analytical solution and the prediction here: - -![1Dwave_damped_adaptive](https://user-images.githubusercontent.com/12683885/149665327-69d04c01-2240-45ea-981e-a7b9412a3b58.gif) diff --git a/docs/src/tutorials/constraints.md b/docs/src/tutorials/constraints.md index 5d96bff3f1..491267fac2 100644 --- a/docs/src/tutorials/constraints.md +++ b/docs/src/tutorials/constraints.md @@ -84,7 +84,7 @@ cb_ = function (p, l) return false end -res = Optimization.solve(prob, BFGS(), callback = cb_, maxiters = 600) +res = Optimization.solve(prob, BFGS(), maxiters = 600) ``` And some analysis: diff --git a/docs/src/tutorials/dae.md b/docs/src/tutorials/dae.md index da17f4f46d..de83542c63 100644 --- a/docs/src/tutorials/dae.md +++ b/docs/src/tutorials/dae.md @@ -17,7 +17,7 @@ Let's solve a simple DAE system: using NeuralPDE using Random using OrdinaryDiffEq, Statistics -import Lux, OptimizationOptimisers +using Lux, OptimizationOptimisers example = (du, u, p, t) -> [cos(2pi * t) - du[1], u[2] + cos(2pi * t) - du[2]] uâ‚€ = [1.0, -1.0] diff --git a/docs/src/tutorials/derivative_neural_network.md b/docs/src/tutorials/derivative_neural_network.md index c803cdbf8e..82ad99b986 100644 --- a/docs/src/tutorials/derivative_neural_network.md +++ b/docs/src/tutorials/derivative_neural_network.md @@ -111,9 +111,9 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); callback = callback, maxiters = 2000) +res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2000) prob = remake(prob, u0 = res.u) -res = Optimization.solve(prob, LBFGS(linesearch = BackTracking()); callback = callback, maxiters = 200) +res = Optimization.solve(prob, LBFGS(linesearch = BackTracking()); maxiters = 200) phi = discretization.phi ``` @@ -142,12 +142,40 @@ end u_real = [[analytic_sol_func_all(t, x)[i] for t in ts for x in xs] for i in 1:7] u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:7] diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:7] - +ps = [] titles = ["u1", "u2", "u3", "Dtu1", "Dtu2", "Dxu1", "Dxu2"] for i in 1:7 p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "$(titles[i]), analytic") p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict") p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error") - plot(p1, p2, p3) + push!(ps, plot(p1, p2, p3)) end ``` + +```@example derivativenn +ps[1] +``` + +```@example derivativenn +ps[2] +``` + +```@example derivativenn +ps[3] +``` + +```@example derivativenn +ps[4] +``` + +```@example derivativenn +ps[5] +``` + +```@example derivativenn +ps[6] +``` + +```@example derivativenn +ps[7] +``` diff --git a/docs/src/tutorials/gpu.md b/docs/src/tutorials/gpu.md index 4f47df8475..feea05c9e1 100644 --- a/docs/src/tutorials/gpu.md +++ b/docs/src/tutorials/gpu.md @@ -104,7 +104,7 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); callback = callback, maxiters = 2500) +res = Optimization.solve(prob, OptimizationOptimisers.Adam(1e-2); maxiters = 2500) ``` We then use the `remake` function to rebuild the PDE problem to start a new optimization at the optimized parameters, and continue with a lower learning rate: diff --git a/docs/src/tutorials/integro_diff.md b/docs/src/tutorials/integro_diff.md index 88e7683171..e977b87af4 100644 --- a/docs/src/tutorials/integro_diff.md +++ b/docs/src/tutorials/integro_diff.md @@ -67,7 +67,7 @@ callback = function (p, l) println("Current loss is: $l") return false end -res = Optimization.solve(prob, BFGS(); callback = callback, maxiters = 100) +res = Optimization.solve(prob, BFGS(); maxiters = 100) ``` Plotting the final solution and analytical solution diff --git a/docs/src/tutorials/low_level.md b/docs/src/tutorials/low_level.md index 63fa0d476c..33b605e77d 100644 --- a/docs/src/tutorials/low_level.md +++ b/docs/src/tutorials/low_level.md @@ -67,7 +67,7 @@ end f_ = OptimizationFunction(loss_function, Optimization.AutoZygote()) prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params) -res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); callback = callback, maxiters = 3000) +res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); maxiters = 3000) ``` And some analysis: diff --git a/docs/src/tutorials/neural_adapter.md b/docs/src/tutorials/neural_adapter.md index 6f50157448..a56e30a269 100644 --- a/docs/src/tutorials/neural_adapter.md +++ b/docs/src/tutorials/neural_adapter.md @@ -50,7 +50,7 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 10000) +res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) phi = discretization.phi inner_ = 8 @@ -72,7 +72,7 @@ end strategy = NeuralPDE.QuadratureTraining() prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy) -res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 10000) +res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) phi_ = PhysicsInformedNN(chain2, strategy; init_params = res_.u).phi @@ -180,7 +180,7 @@ for i in 1:count_decomp prob = NeuralPDE.discretize(pde_system_, discretization) symprob = NeuralPDE.symbolic_discretize(pde_system_, discretization) - res_ = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + res_ = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) phi = discretization.phi push!(reses, res_) push!(phis, phi) @@ -244,10 +244,10 @@ end prob_ = NeuralPDE.neural_adapter(losses, init_params2, pde_system_map, NeuralPDE.QuadratureTraining()) -res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 5000) +res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000) prob_ = NeuralPDE.neural_adapter(losses, res_.u, pde_system_map, NeuralPDE.QuadratureTraining()) -res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); callback, maxiters = 5000) +res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000) phi_ = PhysicsInformedNN(chain2, strategy; init_params = res_.u).phi diff --git a/docs/src/tutorials/param_estim.md b/docs/src/tutorials/param_estim.md index 79aac22b31..e696c76702 100644 --- a/docs/src/tutorials/param_estim.md +++ b/docs/src/tutorials/param_estim.md @@ -103,7 +103,7 @@ callback = function (p, l) println("Current loss is: $l") return false end -res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); callback = callback, maxiters = 1000) +res = Optimization.solve(prob, BFGS(linesearch = BackTracking()); maxiters = 1000) p_ = res.u[(end - 2):end] # p_ = [9.93, 28.002, 2.667] ``` diff --git a/docs/src/tutorials/pdesystem.md b/docs/src/tutorials/pdesystem.md index 4076bce5cf..d1c438b30c 100644 --- a/docs/src/tutorials/pdesystem.md +++ b/docs/src/tutorials/pdesystem.md @@ -66,7 +66,7 @@ end # Optimizer opt = OptimizationOptimJL.LBFGS(linesearch = BackTracking()) -res = solve(prob, opt, callback = callback, maxiters = 1000) +res = solve(prob, opt, maxiters = 1000) phi = discretization.phi dx = 0.05 @@ -145,7 +145,8 @@ callback = function (p, l) return false end -res = Optimization.solve(prob, opt, callback = callback, maxiters = 1000) +# We can pass the callback function in the solve. Not doing here as the output would be very long. +res = Optimization.solve(prob, opt, maxiters = 1000) phi = discretization.phi ``` diff --git a/docs/src/tutorials/systems.md b/docs/src/tutorials/systems.md index d4b277b1f3..80e3fbb62a 100644 --- a/docs/src/tutorials/systems.md +++ b/docs/src/tutorials/systems.md @@ -84,7 +84,7 @@ callback = function (p, l) return false end -res = solve(prob, LBFGS(linesearch = BackTracking()); callback = callback, maxiters = 1000) +res = solve(prob, LBFGS(linesearch = BackTracking()); maxiters = 1000) phi = discretization.phi ``` @@ -151,7 +151,7 @@ end f_ = OptimizationFunction(loss_function, Optimization.AutoZygote()) prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params) -res = Optimization.solve(prob, OptimizationOptimJL.LBFGS(linesearch = BackTracking()); callback = callback, maxiters = 1000) +res = Optimization.solve(prob, OptimizationOptimJL.LBFGS(linesearch = BackTracking()); maxiters = 1000) ``` ## Solution Representation @@ -172,14 +172,27 @@ end u_real = [[analytic_sol_func(t, x)[i] for t in ts for x in xs] for i in 1:3] u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:3] diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:3] +ps = [] for i in 1:3 p1 = plot(ts, xs, u_real[i], linetype = :contourf, title = "u$i, analytic") p2 = plot(ts, xs, u_predict[i], linetype = :contourf, title = "predict") p3 = plot(ts, xs, diff_u[i], linetype = :contourf, title = "error") - plot(p1, p2, p3) + push!(ps, plot(p1, p2, p3)) end ``` +```@example system +ps[1] +``` + +```@example system +ps[2] +``` + +```@example system +ps[3] +``` + Notice here that the solution is represented in the `OptimizationSolution` with `u` as the parameters for the trained neural network. But, for the case where the neural network is from Lux.jl, it's given as a `ComponentArray` where `res.u.depvar.x` corresponds to the result