From 5fdb971a903ced9b00a5e900ae587f5e098ac707 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 18 Feb 2024 07:23:58 -0500 Subject: [PATCH 01/13] Version bumps --- docs/Project.toml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index a6a50951800..8b2024e4a41 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -53,12 +53,12 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" AdvancedHMC = "0.5" BenchmarkTools = "1" CSV = "0.10" -CUDA = "4" +CUDA = "5" ComponentArrays = "0.14, 0.15" DataDrivenDiffEq = "1.2" DataDrivenSparse = "0.1" DataFrames = "1" -DiffEqFlux = "2" +DiffEqFlux = "3" DiffEqGPU = "2" DifferentialEquations = "7" Distributions = "0.25" @@ -77,13 +77,13 @@ MethodOfLines = "0.9" ModelingToolkit = "8" MultiDocumenter = "0.7" NeuralPDE = "5" -NonlinearSolve = "1" +NonlinearSolve = "3" Optimization = "3" -OptimizationMOI = "0.1" -OptimizationNLopt = "0.1" -OptimizationOptimJL = "0.1" -OptimizationOptimisers = "0.1" -OptimizationPolyalgorithms = "0.1" +OptimizationMOI = "0.3" +OptimizationNLopt = "0.2" +OptimizationOptimJL = "0.2" +OptimizationOptimisers = "0.2" +OptimizationPolyalgorithms = "0.2" OrdinaryDiffEq = "6" Plots = "1" SciMLExpectations = "2" From 2b336229ebe4530ed1ffd9e12681ab102dbc3c77 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sun, 18 Feb 2024 07:36:15 -0500 Subject: [PATCH 02/13] require Integrals v4 --- docs/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index d18587eec48..08bb8d52e3b 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -67,7 +67,7 @@ DomainSets = "0.6, 0.7" Flux = "0.13, 0.14" ForwardDiff = "0.10" IncompleteLU = "0.2" -Integrals = "3, 4" +Integrals = "4" LineSearches = "7" LinearSolve = "2" Lux = "0.5" From 607b3b921d7eb2d285573c44d79f3debfe5a1f88 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Fri, 1 Mar 2024 15:23:03 -0600 Subject: [PATCH 03/13] Update bayesian_neural_ode.md --- docs/src/showcase/bayesian_neural_ode.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/showcase/bayesian_neural_ode.md b/docs/src/showcase/bayesian_neural_ode.md index abd4be4cf25..5426c343ce9 100644 --- a/docs/src/showcase/bayesian_neural_ode.md +++ b/docs/src/showcase/bayesian_neural_ode.md @@ -52,7 +52,7 @@ complicated architecture can take a huge computational time without increasing p ```@example bnode dudt2 = Flux.Chain(x -> x .^ 3, Flux.Dense(2, 50, tanh), - Flux.Dense(50, 2)) |> f64 + Flux.Dense(50, 2)) |> Flux.f64 prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps) rng = Random.default_rng() p = Float64.(prob_neuralode.p) From 4bb302751da646bb8b3fbc283e800eda9490b0c4 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 4 Mar 2024 10:00:31 -0500 Subject: [PATCH 04/13] Update Bayesian to Lux --- docs/src/showcase/bayesian_neural_ode.md | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/docs/src/showcase/bayesian_neural_ode.md b/docs/src/showcase/bayesian_neural_ode.md index 5426c343ce9..e9d0ff037ac 100644 --- a/docs/src/showcase/bayesian_neural_ode.md +++ b/docs/src/showcase/bayesian_neural_ode.md @@ -7,7 +7,7 @@ the Neural ODE estimation and forecasting. In this tutorial, a working example o Bayesian Neural ODE: NUTS sampler is shown. !!! note - + For more details, have a look at this paper: https://arxiv.org/abs/2012.07244 ## Step 1: Import Libraries @@ -16,7 +16,10 @@ For this example, we will need the following libraries: ```@example bnode # SciML Libraries -using DiffEqFlux, Flux, DifferentialEquations +using DiffEqFlux, DifferentialEquations + +# ML Tools +using Lux, Zygote # External Tools using Random, Plots, AdvancedHMC, MCMCChains, StatsPlots, ComponentArrays @@ -50,12 +53,13 @@ better at prediction/forecasting than a 50 unit architecture. On the other hand, complicated architecture can take a huge computational time without increasing performance. ```@example bnode -dudt2 = Flux.Chain(x -> x .^ 3, - Flux.Dense(2, 50, tanh), - Flux.Dense(50, 2)) |> Flux.f64 +dudt2 = Lux.Chain(x -> x .^ 3, + Lux.Dense(2, 50, tanh), + Lux.Dense(50, 2)) prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps) rng = Random.default_rng() -p = Float64.(prob_neuralode.p) +p, st = Lux.setup(rng, dudt2) +p = ComponentArray{Float64}(p) ``` Note that the `f64` is required to put the Flux neural network into Float64 precision. @@ -64,7 +68,7 @@ Note that the `f64` is required to put the Flux neural network into Float64 prec ```@example bnode function predict_neuralode(p) - Array(prob_neuralode(u0, p)) + Array(prob_neuralode(u0, p, st)[1]) end function loss_neuralode(p) pred = predict_neuralode(p) @@ -84,7 +88,7 @@ The user can make several modifications to Step 4. The user can try different ac ```@example bnode l(θ) = -sum(abs2, ode_data .- predict_neuralode(θ)) - sum(θ .* θ) function dldθ(θ) - x, lambda = Flux.Zygote.pullback(l, θ) + x, lambda = Zygote.pullback(l, θ) grad = first(lambda(1)) return x, grad end From 0d7c5c8b4ba3f46fc4b0f144db654a277fc5d52e Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 4 Mar 2024 13:24:56 -0500 Subject: [PATCH 05/13] Convert --- docs/src/showcase/bayesian_neural_ode.md | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/docs/src/showcase/bayesian_neural_ode.md b/docs/src/showcase/bayesian_neural_ode.md index e9d0ff037ac..3f7539f2641 100644 --- a/docs/src/showcase/bayesian_neural_ode.md +++ b/docs/src/showcase/bayesian_neural_ode.md @@ -16,10 +16,7 @@ For this example, we will need the following libraries: ```@example bnode # SciML Libraries -using DiffEqFlux, DifferentialEquations - -# ML Tools -using Lux, Zygote +using DiffEqFlux, Flux, DifferentialEquations # External Tools using Random, Plots, AdvancedHMC, MCMCChains, StatsPlots, ComponentArrays @@ -53,17 +50,15 @@ better at prediction/forecasting than a 50 unit architecture. On the other hand, complicated architecture can take a huge computational time without increasing performance. ```@example bnode -dudt2 = Lux.Chain(x -> x .^ 3, - Lux.Dense(2, 50, tanh), - Lux.Dense(50, 2)) +dudt2 = Flux.Chain(x -> x .^ 3, + Flux.Dense(2, 50, tanh), + Flux.Dense(50, 2)) |> f64 prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps) rng = Random.default_rng() -p, st = Lux.setup(rng, dudt2) +p = Float64.(prob_neuralode.p) p = ComponentArray{Float64}(p) ``` -Note that the `f64` is required to put the Flux neural network into Float64 precision. - ## Step 3: Define the loss function for the Neural ODE. ```@example bnode @@ -88,7 +83,7 @@ The user can make several modifications to Step 4. The user can try different ac ```@example bnode l(θ) = -sum(abs2, ode_data .- predict_neuralode(θ)) - sum(θ .* θ) function dldθ(θ) - x, lambda = Zygote.pullback(l, θ) + x, lambda = Flux.Zygote.pullback(l, θ) grad = first(lambda(1)) return x, grad end From 5dcc75a6e590c3baec78fd0f2497c5f4a9c6291f Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 4 Mar 2024 13:26:10 -0500 Subject: [PATCH 06/13] Fix? --- docs/src/showcase/bayesian_neural_ode.md | 325 ++++++++++++----------- 1 file changed, 166 insertions(+), 159 deletions(-) diff --git a/docs/src/showcase/bayesian_neural_ode.md b/docs/src/showcase/bayesian_neural_ode.md index 3f7539f2641..8b233da5af1 100644 --- a/docs/src/showcase/bayesian_neural_ode.md +++ b/docs/src/showcase/bayesian_neural_ode.md @@ -1,159 +1,166 @@ -# [Uncertainty Quantified Deep Bayesian Model Discovery](@id bnode) - -In this tutorial, we show how SciML can combine the differential equation solvers seamlessly -with Bayesian estimation libraries like AdvancedHMC.jl and Turing.jl. This enables -converting Neural ODEs to Bayesian Neural ODEs, which enables us to estimate the error in -the Neural ODE estimation and forecasting. In this tutorial, a working example of the -Bayesian Neural ODE: NUTS sampler is shown. - -!!! note - - For more details, have a look at this paper: https://arxiv.org/abs/2012.07244 - -## Step 1: Import Libraries - -For this example, we will need the following libraries: - -```@example bnode -# SciML Libraries -using DiffEqFlux, Flux, DifferentialEquations - -# External Tools -using Random, Plots, AdvancedHMC, MCMCChains, StatsPlots, ComponentArrays -``` - -## Setup: Get the data from the Spiral ODE example - -We will also need data to fit against. As a demonstration, we will generate our data -using a simple cubic ODE `u' = A*u^3` as follows: - -```@example bnode -u0 = [2.0; 0.0] -datasize = 40 -tspan = (0.0, 1) -tsteps = range(tspan[1], tspan[2], length = datasize) -function trueODEfunc(du, u, p, t) - true_A = [-0.1 2.0; -2.0 -0.1] - du .= ((u .^ 3)'true_A)' -end -prob_trueode = ODEProblem(trueODEfunc, u0, tspan) -ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps)) -``` - -We will want to train a neural network to capture the dynamics that fit `ode_data`. - -## Step 2: Define the Neural ODE architecture. - -Note that this step potentially offers a lot of flexibility in the number of layers/ number -of units in each layer. It may not necessarily be true that a 100 units architecture is -better at prediction/forecasting than a 50 unit architecture. On the other hand, a -complicated architecture can take a huge computational time without increasing performance. - -```@example bnode -dudt2 = Flux.Chain(x -> x .^ 3, - Flux.Dense(2, 50, tanh), - Flux.Dense(50, 2)) |> f64 -prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps) -rng = Random.default_rng() -p = Float64.(prob_neuralode.p) -p = ComponentArray{Float64}(p) -``` - -## Step 3: Define the loss function for the Neural ODE. - -```@example bnode -function predict_neuralode(p) - Array(prob_neuralode(u0, p, st)[1]) -end -function loss_neuralode(p) - pred = predict_neuralode(p) - loss = sum(abs2, ode_data .- pred) - return loss, pred -end -``` - -## Step 4: Now we start integrating the Bayesian estimation workflow as prescribed by the AdvancedHMC interface with the NeuralODE defined above - -The AdvancedHMC interface requires us to specify: (a) the Hamiltonian log density and its gradient , (b) the sampler and (c) the step size adaptor function. - -For the Hamiltonian log density, we use the loss function. The θ*θ term denotes the use of Gaussian priors. - -The user can make several modifications to Step 4. The user can try different acceptance ratios, warmup samples and posterior samples. One can also use the Variational Inference (ADVI) framework, which doesn't work quite as well as NUTS. The SGLD (Stochastic Gradient Langevin Descent) sampler is seen to have a better performance than NUTS. Have a look at https://sebastiancallh.github.io/post/langevin/ for a brief introduction to SGLD. - -```@example bnode -l(θ) = -sum(abs2, ode_data .- predict_neuralode(θ)) - sum(θ .* θ) -function dldθ(θ) - x, lambda = Flux.Zygote.pullback(l, θ) - grad = first(lambda(1)) - return x, grad -end - -metric = DiagEuclideanMetric(length(p)) -h = Hamiltonian(metric, l, dldθ) -``` - -We use the NUTS sampler with an acceptance ratio of δ= 0.45 in this example. In addition, we use Nesterov Dual Averaging for the Step Size adaptation. - -We sample using 500 warmup samples and 500 posterior samples. - -```@example bnode -integrator = Leapfrog(find_good_stepsize(h, p)) -kernel = HMCKernel(Trajectory{MultinomialTS}(integrator, GeneralisedNoUTurn())) -adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), StepSizeAdaptor(0.45, integrator)) -samples, stats = sample(h, kernel, p, 500, adaptor, 500; progress = true) -``` - -## Step 5: Plot diagnostics - -Now let's make sure the fit is good. This can be done by looking at the chain mixing plot -and the autocorrelation plot. First, let's create the chain mixing plot using the plot -recipes from ???? - -```@example bnode -samples = hcat(samples...) -samples_reduced = samples[1:5, :] -samples_reshape = reshape(samples_reduced, (500, 5, 1)) -Chain_Spiral = Chains(samples_reshape) -plot(Chain_Spiral) -``` - -Now we check the autocorrelation plot: - -```@example bnode -autocorplot(Chain_Spiral) -``` - -As another diagnostic, let's check the result on retrodicted data. To do this, we generate -solutions of the Neural ODE on samples of the neural network parameters, and check the -results of the predictions against the data. Let's start by looking at the time series: - -```@example bnode -pl = scatter(tsteps, ode_data[1, :], color = :red, label = "Data: Var1", xlabel = "t", - title = "Spiral Neural ODE") -scatter!(tsteps, ode_data[2, :], color = :blue, label = "Data: Var2") -for k in 1:300 - resol = predict_neuralode(samples[:, 100:end][:, rand(1:400)]) - plot!(tsteps, resol[1, :], alpha = 0.04, color = :red, label = "") - plot!(tsteps, resol[2, :], alpha = 0.04, color = :blue, label = "") -end - -losses = map(x -> loss_neuralode(x)[1], eachcol(samples)) -idx = findmin(losses)[2] -prediction = predict_neuralode(samples[:, idx]) -plot!(tsteps, prediction[1, :], color = :black, w = 2, label = "") -plot!(tsteps, prediction[2, :], color = :black, w = 2, label = "Best fit prediction", - ylims = (-2.5, 3.5)) -``` - -That showed the time series form. We can similarly do a phase-space plot: - -```@example bnode -pl = scatter(ode_data[1, :], ode_data[2, :], color = :red, label = "Data", xlabel = "Var1", - ylabel = "Var2", title = "Spiral Neural ODE") -for k in 1:300 - resol = predict_neuralode(samples[:, 100:end][:, rand(1:400)]) - plot!(resol[1, :], resol[2, :], alpha = 0.04, color = :red, label = "") -end -plot!(prediction[1, :], prediction[2, :], color = :black, w = 2, - label = "Best fit prediction", ylims = (-2.5, 3)) -``` +# [Uncertainty Quantified Deep Bayesian Model Discovery](@id bnode) + +In this tutorial, we show how SciML can combine the differential equation solvers seamlessly +with Bayesian estimation libraries like AdvancedHMC.jl and Turing.jl. This enables +converting Neural ODEs to Bayesian Neural ODEs, which enables us to estimate the error in +the Neural ODE estimation and forecasting. In this tutorial, a working example of the +Bayesian Neural ODE: NUTS sampler is shown. + +!!! note + + For more details, have a look at this paper: https://arxiv.org/abs/2012.07244 + +## Step 1: Import Libraries + +For this example, we will need the following libraries: + +```@example bnode +# SciML Libraries +using DiffEqFlux, DifferentialEquations + +# ML Tools +using Lux, Zygote + +# External Tools +using Random, Plots, AdvancedHMC, MCMCChains, StatsPlots, ComponentArrays +``` + +## Setup: Get the data from the Spiral ODE example + +We will also need data to fit against. As a demonstration, we will generate our data +using a simple cubic ODE `u' = A*u^3` as follows: + +```@example bnode +u0 = [2.0; 0.0] +datasize = 40 +tspan = (0.0, 1) +tsteps = range(tspan[1], tspan[2], length = datasize) +function trueODEfunc(du, u, p, t) + true_A = [-0.1 2.0; -2.0 -0.1] + du .= ((u .^ 3)'true_A)' +end +prob_trueode = ODEProblem(trueODEfunc, u0, tspan) +ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps)) +``` + +We will want to train a neural network to capture the dynamics that fit `ode_data`. + +## Step 2: Define the Neural ODE architecture. + +Note that this step potentially offers a lot of flexibility in the number of layers/ number +of units in each layer. It may not necessarily be true that a 100 units architecture is +better at prediction/forecasting than a 50 unit architecture. On the other hand, a +complicated architecture can take a huge computational time without increasing performance. + +```@example bnode +dudt2 = Lux.Chain(x -> x .^ 3, + Lux.Dense(2, 50, tanh), + Lux.Dense(50, 2)) +prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps) +rng = Random.default_rng() +p, st = Lux.setup(rng, dudt2) +p = ComponentArray{Float64}(p) +const _p = p +``` + +Note that the `f64` is required to put the Flux neural network into Float64 precision. + +## Step 3: Define the loss function for the Neural ODE. + +```@example bnode +function predict_neuralode(p) + p isa ComponentArray ? p : convert(typeof(_p),p) + Array(prob_neuralode(u0, p, st)[1]) +end +function loss_neuralode(p) + pred = predict_neuralode(p) + loss = sum(abs2, ode_data .- pred) + return loss, pred +end +``` + +## Step 4: Now we start integrating the Bayesian estimation workflow as prescribed by the AdvancedHMC interface with the NeuralODE defined above + +The AdvancedHMC interface requires us to specify: (a) the Hamiltonian log density and its gradient , (b) the sampler and (c) the step size adaptor function. + +For the Hamiltonian log density, we use the loss function. The θ*θ term denotes the use of Gaussian priors. + +The user can make several modifications to Step 4. The user can try different acceptance ratios, warmup samples and posterior samples. One can also use the Variational Inference (ADVI) framework, which doesn't work quite as well as NUTS. The SGLD (Stochastic Gradient Langevin Descent) sampler is seen to have a better performance than NUTS. Have a look at https://sebastiancallh.github.io/post/langevin/ for a brief introduction to SGLD. + +```@example bnode +l(θ) = -sum(abs2, ode_data .- predict_neuralode(θ)) - sum(θ .* θ) +function dldθ(θ) + x, lambda = Zygote.pullback(l, θ) + grad = first(lambda(1)) + return x, grad +end + +metric = DiagEuclideanMetric(length(p)) +h = Hamiltonian(metric, l, dldθ) +``` + +We use the NUTS sampler with an acceptance ratio of δ= 0.45 in this example. In addition, we use Nesterov Dual Averaging for the Step Size adaptation. + +We sample using 500 warmup samples and 500 posterior samples. + +```@example bnode +integrator = Leapfrog(find_good_stepsize(h, p)) +kernel = HMCKernel(Trajectory{MultinomialTS}(integrator, GeneralisedNoUTurn())) +adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), StepSizeAdaptor(0.45, integrator)) +samples, stats = sample(h, kernel, p, 500, adaptor, 500; progress = true) +``` + +## Step 5: Plot diagnostics + +Now let's make sure the fit is good. This can be done by looking at the chain mixing plot +and the autocorrelation plot. First, let's create the chain mixing plot using the plot +recipes from ???? + +```@example bnode +samples = hcat(samples...) +samples_reduced = samples[1:5, :] +samples_reshape = reshape(samples_reduced, (500, 5, 1)) +Chain_Spiral = Chains(samples_reshape) +plot(Chain_Spiral) +``` + +Now we check the autocorrelation plot: + +```@example bnode +autocorplot(Chain_Spiral) +``` + +As another diagnostic, let's check the result on retrodicted data. To do this, we generate +solutions of the Neural ODE on samples of the neural network parameters, and check the +results of the predictions against the data. Let's start by looking at the time series: + +```@example bnode +pl = scatter(tsteps, ode_data[1, :], color = :red, label = "Data: Var1", xlabel = "t", + title = "Spiral Neural ODE") +scatter!(tsteps, ode_data[2, :], color = :blue, label = "Data: Var2") +for k in 1:300 + resol = predict_neuralode(samples[:, 100:end][:, rand(1:400)]) + plot!(tsteps, resol[1, :], alpha = 0.04, color = :red, label = "") + plot!(tsteps, resol[2, :], alpha = 0.04, color = :blue, label = "") +end + +losses = map(x -> loss_neuralode(x)[1], eachcol(samples)) +idx = findmin(losses)[2] +prediction = predict_neuralode(samples[:, idx]) +plot!(tsteps, prediction[1, :], color = :black, w = 2, label = "") +plot!(tsteps, prediction[2, :], color = :black, w = 2, label = "Best fit prediction", + ylims = (-2.5, 3.5)) +``` + +That showed the time series form. We can similarly do a phase-space plot: + +```@example bnode +pl = scatter(ode_data[1, :], ode_data[2, :], color = :red, label = "Data", xlabel = "Var1", + ylabel = "Var2", title = "Spiral Neural ODE") +for k in 1:300 + resol = predict_neuralode(samples[:, 100:end][:, rand(1:400)]) + plot!(resol[1, :], resol[2, :], alpha = 0.04, color = :red, label = "") +end +plot!(prediction[1, :], prediction[2, :], color = :black, w = 2, + label = "Best fit prediction", ylims = (-2.5, 3)) +``` From 45cdceff08eceb284aaaa98978126cc9f00a9734 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Mon, 4 Mar 2024 17:02:35 -0500 Subject: [PATCH 07/13] fix the plots --- docs/src/showcase/bayesian_neural_ode.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/showcase/bayesian_neural_ode.md b/docs/src/showcase/bayesian_neural_ode.md index 8b233da5af1..673ebc3c435 100644 --- a/docs/src/showcase/bayesian_neural_ode.md +++ b/docs/src/showcase/bayesian_neural_ode.md @@ -69,7 +69,7 @@ Note that the `f64` is required to put the Flux neural network into Float64 prec ```@example bnode function predict_neuralode(p) - p isa ComponentArray ? p : convert(typeof(_p),p) + p = p isa ComponentArray ? p : convert(typeof(_p),p) Array(prob_neuralode(u0, p, st)[1]) end function loss_neuralode(p) From 606634d3bad459f240a7dd0f95e87660ecdbf8ec Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Tue, 5 Mar 2024 02:36:09 -0500 Subject: [PATCH 08/13] avoid unit error --- docs/src/showcase/ode_types.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/src/showcase/ode_types.md b/docs/src/showcase/ode_types.md index facf7d61358..c586b6411d2 100644 --- a/docs/src/showcase/ode_types.md +++ b/docs/src/showcase/ode_types.md @@ -33,13 +33,13 @@ time values in the same type as tspan, and the solution values in the same type initial condition. !!! note - + Support for this feature is restricted to the native algorithms of OrdinaryDiffEq.jl. The other solvers such as Sundials.jl, and ODEInterface.jl are incompatible with some number systems. !!! warn - + Adaptive timestepping requires that the time type is compatible with `sqrt` and `^` functions. Thus for example, `tspan` cannot be `Int` if adaptive timestepping is chosen. @@ -154,8 +154,8 @@ sqrt(t) Many operations work. These operations will check to make sure units are correct, and will throw an error for incorrect operations: -```@example odetypes -#t + sqrt(t) +```julia +t + sqrt(t) ``` ### Using Unitful with DifferentialEquations.jl From 7d4346a1c06922e9ed5fe41a0da6791ed04432ab Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Fri, 29 Mar 2024 15:33:29 -0400 Subject: [PATCH 09/13] Update Project.toml --- docs/Project.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index 08bb8d52e3b..1fd9eeb670a 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -73,8 +73,8 @@ LinearSolve = "2" Lux = "0.5" MCMCChains = "6" Measurements = "2" -MethodOfLines = "0.9, 0.10" -ModelingToolkit = "8" +MethodOfLines = "0.11" +ModelingToolkit = "9" MultiDocumenter = "0.7" NeuralPDE = "5" NonlinearSolve = "3" From 5f966f087b1d627e463763361cba4d04970bb735 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 30 Mar 2024 01:36:55 -0400 Subject: [PATCH 10/13] Update Project.toml --- docs/Project.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/Project.toml b/docs/Project.toml index 1fd9eeb670a..ff2ac0a9787 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -50,12 +50,12 @@ Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] -AdvancedHMC = "0.5, 0.6" +AdvancedHMC = "0.6" BenchmarkTools = "1" CSV = "0.10" CUDA = "5" -ComponentArrays = "0.14, 0.15" -DataDrivenDiffEq = "1.2" +ComponentArrays = "0.15" +DataDrivenDiffEq = "1.4" DataDrivenSparse = "0.1" DataFrames = "1" DiffEqFlux = "3" @@ -74,9 +74,9 @@ Lux = "0.5" MCMCChains = "6" Measurements = "2" MethodOfLines = "0.11" -ModelingToolkit = "9" +ModelingToolkit = "9.9" MultiDocumenter = "0.7" -NeuralPDE = "5" +NeuralPDE = "5.15" NonlinearSolve = "3" Optimization = "3" OptimizationMOI = "0.3" From 23cfe8d85bd4ea823904cb2ea40e470a35261164 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 30 Mar 2024 01:39:19 -0400 Subject: [PATCH 11/13] Update Project.toml --- docs/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index ff2ac0a9787..861dac8bb32 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -79,7 +79,7 @@ MultiDocumenter = "0.7" NeuralPDE = "5.15" NonlinearSolve = "3" Optimization = "3" -OptimizationMOI = "0.3" +OptimizationMOI = "0.4" OptimizationNLopt = "0.2" OptimizationOptimJL = "0.2" OptimizationOptimisers = "0.2" From 74e710c24f91e126c3ff62a7df7aa92298fda6f6 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 30 Mar 2024 02:08:52 -0400 Subject: [PATCH 12/13] Update first_simulation.md --- docs/src/getting_started/first_simulation.md | 33 +++++++------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/docs/src/getting_started/first_simulation.md b/docs/src/getting_started/first_simulation.md index abe8acaeb2a..957d4672443 100644 --- a/docs/src/getting_started/first_simulation.md +++ b/docs/src/getting_started/first_simulation.md @@ -66,14 +66,11 @@ eqs = [D(x) ~ α * x - β * x * y z ~ x + y] # Bring these pieces together into an ODESystem with independent variable t -@named sys = ODESystem(eqs, t) - -# Symbolically Simplify the System -simpsys = structural_simplify(sys) +@mtkbuild sys = ODESystem(eqs, t) # Convert from a symbolic to a numerical problem to simulate tspan = (0.0, 10.0) -prob = ODEProblem(simpsys, [], tspan) +prob = ODEProblem(sys, [], tspan) # Solve the ODE sol = solve(prob) @@ -173,22 +170,15 @@ to represent an `ODESystem` with the following: ```@example first_sim # Bring these pieces together into an ODESystem with independent variable t -@named sys = ODESystem(eqs, t) +@mtkbuild sys = ODESystem(eqs, t) ``` -Next, we want to simplify this into a standard ODE system. Notice that in our equations -we have an algebraic equation `z ~ x + y`. This is not a differential equation but an -algebraic equation, and thus we call this set of equations a Differential-Algebraic Equation -(DAE). The symbolic system of ModelingToolkit can eliminate such equations to return simpler -forms to numerically approximate. Let's tell it to simplify the system using -`structural_simplify`: - -```@example first_sim -# Symbolically Simplify the System -simpsys = structural_simplify(sys) -``` +Notice that in our equations we have an algebraic equation `z ~ x + y`. This is not a +differential equation but an algebraic equation, and thus we call this set of equations a +Differential-Algebraic Equation (DAE). The symbolic system of ModelingToolkit can eliminate +such equations to return simpler forms to numerically approximate. -Notice that what is returned is another `ODESystem`, but now with the simplified set of +Notice that what is returned is an `ODESystem`, but now with the simplified set of equations. `z` has been turned into an “observable”, i.e. a state that is not computed but can be constructed on-demand. This is one of the ways that SciML reaches its speed: you can have 100,000 equations, but solve only 1,000 to then automatically reconstruct @@ -213,7 +203,7 @@ like: ```@example first_sim # Convert from a symbolic to a numerical problem to simulate tspan = (0.0, 10.0) -prob = ODEProblem(simpsys, [], tspan) +prob = ODEProblem(sys, [], tspan) ``` ### Step 4: Solve the ODE System @@ -282,9 +272,8 @@ D = Differential(t) eqs = [D(🐰) ~ α * 🐰 - β * 🐰 * 🐺, D(🐺) ~ -γ * 🐺 + δ * 🐰 * 🐺] -@named sys = ODESystem(eqs, t) -simpsys = structural_simplify(sys) -prob = ODEProblem(simpsys, [], (0.0, 10.0)) +@mtkbuild sys = ODESystem(eqs, t) +prob = ODEProblem(sys, [], (0.0, 10.0)) sol = solve(prob) ``` From 36eb2009a1501f981372684732d29ab9bd4202c5 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Sat, 30 Mar 2024 02:12:22 -0400 Subject: [PATCH 13/13] Update find_root.md --- docs/src/getting_started/find_root.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/src/getting_started/find_root.md b/docs/src/getting_started/find_root.md index aa6d021f25f..ec532754e34 100644 --- a/docs/src/getting_started/find_root.md +++ b/docs/src/getting_started/find_root.md @@ -47,7 +47,7 @@ using ModelingToolkit, NonlinearSolve eqs = [0 ~ σ * (y - x), 0 ~ x * (ρ - z) - y, 0 ~ x * y - β * z] -@named ns = NonlinearSystem(eqs, [x, y, z], [σ, ρ, β]) +@mtkbuild ns = NonlinearSystem(eqs, [x, y, z], [σ, ρ, β]) # Convert the symbolic system into a numerical system prob = NonlinearProblem(ns, []) @@ -56,7 +56,7 @@ prob = NonlinearProblem(ns, []) sol = solve(prob, NewtonRaphson()) # Analyze the solution -@show sol.u, prob.f(sol.u, prob.p) +@show sol[[x,y,z]], sol.resid ``` ## Step-by-Step Solution @@ -122,7 +122,7 @@ Finally, we bring these pieces together, the equation along with its states and define our `NonlinearSystem`: ```@example first_rootfind -@named ns = NonlinearSystem(eqs, [x, y, z], [σ, ρ, β]) +@mtkbuild ns = NonlinearSystem(eqs, [x, y, z], [σ, ρ, β]) ``` ### Step 3: Convert the Symbolic Problem to a Numerical Problem @@ -178,5 +178,5 @@ We can check it as follows: ```@example first_rootfind # Analyze the solution -@show sol.u, sol.resid +@show sol[[x,y,z]], sol.resid ```