Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
KirillZubov committed Jun 17, 2024
1 parent 09f4891 commit 188ceec
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 62 deletions.
1 change: 0 additions & 1 deletion src/NeuralPDE.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ using UnPack: @unpack
import ChainRulesCore, Lux, ComponentArrays
using Lux: FromFluxAdaptor
using ChainRulesCore: @non_differentiable
#using NeuralOperators

RuntimeGeneratedFunctions.init(@__MODULE__)

Expand Down
56 changes: 37 additions & 19 deletions src/pino_ode_solve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,12 @@ end
function dfdx(phi::PINOPhi{C, T}, x::Tuple, θ, prob::ODEProblem) where {C <: DeepONet, T}
p, t = x
f = prob.f
branch_left, branch_right = f.(0, p, t .+ sqrt(eps(eltype(p)))), f.(0, p, t)
# if any(in(keys(bounds)), (:u0,))
# branch_left, branch_right = f.(0, zeros(size(p)), t .+ sqrt(eps(eltype(p)))),
# f.(0, zeros(size(p)), t)
# else
branch_left, branch_right = f.(0, p, t .+ sqrt(eps(eltype(p)))), f.(0, p, t)
# end
trunk_left, trunk_right = t .+ sqrt(eps(eltype(t))), t
x_left = (branch = branch_left, trunk = trunk_left)
x_right = (branch = branch_right, trunk = trunk_right)
Expand All @@ -91,35 +96,46 @@ function physics_loss(
phi::PINOPhi{C, T}, prob::ODEProblem, x, θ) where {C <: DeepONet, T}
p, t = x
f = prob.f
#TODO If du = f(u,p,t), where f = g(p,t)*u so it will wrong, f(0, p, t) = g(p,t)*0 = 0
#work correct only with function like du = f(p,t) + g(u)
# TODO If du = f(u,p,t), where f = g(p,t)*u so it will wrong, f(0, p, t) = g(p,t)*0 = 0
# work correct only with function like du = f(p,t) + g(u)
# tuple = (branch = p, trunk = t)
# out = phi(tuple, θ)
# f.(out, p, t) ?
du = vec(dfdx(phi, x, θ, prob))

tuple = (branch = f.(0, p, t), trunk = t)
# if any(in(keys(bounds)), (:u0,))
# f_ = f.(0, zeros(size(p)), t)
# else
f_ = f.(0, p, t)
# end
#TODO if DeepONet else error
tuple = (branch = f_, trunk = t)
out = phi(tuple, θ)
f_ = vec(f.(out, p, t))
norm = prod(size(out))
sum(abs2, du .- f_) / norm
end

function inital_condition_loss(

Check warning on line 118 in src/pino_ode_solve.jl

View workflow job for this annotation

GitHub Actions / Spell Check with Typos

"inital" should be "initial".
phi::PINOPhi{C, T}, prob::ODEProblem, x, θ, bounds) where {C <: DeepONet, T}
phi::PINOPhi{C, T}, prob::ODEProblem, x, θ) where {C <: DeepONet, T}
p, t = x
f = prob.f
t0 = t[:, :, [1]]
f_0 = f.(0, p, t0)
tuple = (branch = f_0, trunk = t0)
out = phi(tuple, θ)
u = vec(out)
#TODO
if any(in(keys(bounds)), (:u0,))
u0_ = p
u0 = p
else
# if any(in(keys(bounds)), (:u0,))
# u0_ = collect(p)
# u0 = vec(u0_)
# #TODO f_0 = f.(0, p, t0) and p call as u0
# f_0 = f.(0, zeros(size(u0_)), t0)
# else
u0_ = fill(prob.u0, size(out))
u0 = vec(u0_)
end
norm = prod(size(u0_))
f_0 = f.(0, p, t0)
# end
tuple = (branch = f_0, trunk = t0)
out = phi(tuple, θ)
u = vec(out)

norm = prod(size(u0))
sum(abs2, u .- u0) / norm
end

Expand All @@ -137,7 +153,7 @@ end
function generate_loss(strategy::GridTraining, prob::ODEProblem, phi, bounds, tspan)
x = get_trainset(strategy, bounds, tspan)
function loss(θ, _)
inital_condition_loss(phi, prob, x, θ, bounds) + physics_loss(phi, prob, x, θ)
inital_condition_loss(phi, prob, x, θ) + physics_loss(phi, prob, x, θ)

Check warning on line 156 in src/pino_ode_solve.jl

View workflow job for this annotation

GitHub Actions / Spell Check with Typos

"inital" should be "initial".
end
end

Expand All @@ -155,8 +171,10 @@ function SciMLBase.__solve(prob::SciMLBase.AbstractODEProblem,
!(chain isa Lux.AbstractExplicitLayer) &&
error("Only Lux.AbstractExplicitLayer neural networks are supported")

#TODO implement for u0
if !any(in(keys(bounds)), (:p, :u0))
# if !any(in(keys(bounds)), (:p, :u0))
# error("bounds should contain p only")
# end
if !any(in(keys(bounds)), (:p,))
error("bounds should contain p only")
end
phi, init_params = generate_pino_phi_θ(chain, init_params)
Expand Down
51 changes: 9 additions & 42 deletions test/PINO_ode_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ using NeuralPDE
Lux.Dense(10, 10, Lux.tanh_fast),
Lux.Dense(10, 10, Lux.tanh_fast))

deeponet = NeuralPDE.DeepONet(branch, trunk; linear = nothing)
deeponet = DeepONet(branch, trunk; linear = nothing)
a = rand(1, 50, 40)
b = rand(1, 1, 40)
x = (branch = a, trunk = b)
Expand All @@ -30,10 +30,10 @@ using NeuralPDE
bounds = (p = [0.1f0, pi],)
db = (bounds.p[2] - bounds.p[1]) / 50
dt = (tspan[2] - tspan[1]) / 40
strategy = NeuralPDE.GridTraining([db, dt])
strategy = GridTraining([db, dt])
opt = OptimizationOptimisers.Adam(0.03)
alg = NeuralPDE.PINOODE(deeponet, opt, bounds; strategy = strategy)
sol = solve(prob, alg, verbose = false, maxiters = 2000)
alg = PINOODE(deeponet, opt, bounds; strategy = strategy)
sol = solve(prob, alg, verbose = true, maxiters = 2000)
ground_analytic = (u0, p, t) -> u0 + sin(p * t) / (p)
p_ = bounds.p[1]:strategy.dx[1]:bounds.p[2]
p = reshape(p_, 1, size(p_)[1], 1)
Expand Down Expand Up @@ -141,46 +141,12 @@ end
@test ground_solutionsol.u rtol=0.005
end

#u0
@testset "Example du = cos(p * t)" begin
equation = (u, p, t) -> cos(p * t)
tspan = (0.0f0, 1.0f0)
u0 = 1.0f0
prob = ODEProblem(equation, u0, tspan)

branch = Lux.Chain(
Lux.Dense(1, 10, Lux.tanh_fast),
Lux.Dense(10, 10, Lux.tanh_fast),
Lux.Dense(10, 10))
trunk = Lux.Chain(
Lux.Dense(1, 10, Lux.tanh_fast),
Lux.Dense(10, 10, Lux.tanh_fast),
Lux.Dense(10, 10, Lux.tanh_fast))

deeponet = NeuralPDE.DeepONet(branch, trunk; linear = nothing)

bounds = (u0 = [0.5f0, 2.f0],)
db = (bounds.u0[2] - bounds.u0[1]) / 50
dt = (tspan[2] - tspan[1]) / 40
strategy = NeuralPDE.GridTraining([db, dt])
opt = OptimizationOptimisers.Adam(0.03)
alg = NeuralPDE.PINOODE(deeponet, opt, bounds; strategy = strategy)
sol = solve(prob, alg, verbose = true, maxiters = 2000)

ground_analytic = (u0, p, t) -> u0 + sin(p * t) / (p)
p_ = bounds.u0[1]:strategy.dx[1]:bounds.u0[2]
p = reshape(p_, 1, size(p_)[1], 1)
ground_solution = ground_analytic.(u0, p, sol.t.trunk)

@test ground_solutionsol.u rtol=0.01
end

plot(sol.u[1, :, :], linetype = :contourf)
plot(ground_solution[1, :, :], linetype = :contourf)
plot!(ground_solution[1, :, :], linetype = :contourf)

function plot_()
# Animate
anim = @animate for (i) in 1:51
anim = @animate for (i) in 1:41
plot(ground_solution[1, i, :], label = "Ground")
# plot(equation_[1, i, :], label = "equation")
plot!(sol.u[1, i, :], label = "Predicted")
Expand All @@ -192,7 +158,8 @@ plot_()

#vector outputs and multiple parameters
@testset "Example du = cos(p * t)" begin
equation = (u, p, t) -> cos(p1 * t) + p2
# equation = (u, p, t) -> cos(p1 * t) + p2
equation = (u, p, t) -> cos(p[1] * t) + p[2]
tspan = (0.0f0, 1.0f0)
u0 = 1.0f0
prob = ODEProblem(equation, u0, tspan)
Expand All @@ -208,7 +175,7 @@ plot_()

deeponet = NeuralPDE.DeepONet(branch, trunk; linear = nothing)

bounds = (p1 = [0.1f0, pi], p2 = [0.1f0, 2.f0], u0 = [0.0f0, 2.0f0])
bounds = (p1 = [0.1f0, pi], p2 = [0.1f0, 2.0f0])
db = (bounds.u0[2] - bounds.u0[1]) / 50
dt = (tspan[2] - tspan[1]) / 40
strategy = NeuralPDE.GridTraining([db, dt])
Expand Down

0 comments on commit 188ceec

Please sign in to comment.