diff --git a/.gitignore b/.gitignore index 31d573e..4a919cc 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ /docs/Manifest.toml /docs/build/ Manifest.toml +.vscode/settings.json diff --git a/src/NeuralLyapunov.jl b/src/NeuralLyapunov.jl index e874d01..78171b2 100644 --- a/src/NeuralLyapunov.jl +++ b/src/NeuralLyapunov.jl @@ -7,16 +7,19 @@ using ModelingToolkit import SciMLBase include("conditions_specification.jl") +include("structure_specification.jl") +include("minimization_conditions.jl") +include("decrease_conditions.jl") +include("decrease_conditions_RoA_aware.jl") include("NeuralLyapunovPDESystem.jl") include("local_Lyapunov.jl") export NeuralLyapunovPDESystem, NumericalNeuralLyapunovFunctions export local_Lyapunov -export NeuralLyapunovSpecification, NeuralLyapunovStructure, - UnstructuredNeuralLyapunov, NonnegativeNeuralLyapunov, - PositiveSemiDefiniteStructure, - LyapunovMinimizationCondition, StrictlyPositiveDefinite, - PositiveSemiDefinite, DontCheckNonnegativity, LyapunovDecreaseCondition, - AsymptoticDecrease, ExponentialDecrease, DontCheckDecrease +export NeuralLyapunovSpecification, NeuralLyapunovStructure, UnstructuredNeuralLyapunov, + NonnegativeNeuralLyapunov, PositiveSemiDefiniteStructure, + LyapunovMinimizationCondition, StrictlyPositiveDefinite, PositiveSemiDefinite, + DontCheckNonnegativity, LyapunovDecreaseCondition, AsymptoticDecrease, + ExponentialDecrease, DontCheckDecrease, RoAAwareDecreaseCondition, make_RoA_aware end diff --git a/src/NeuralLyapunovPDESystem.jl b/src/NeuralLyapunovPDESystem.jl index 95d1d18..349ae2d 100644 --- a/src/NeuralLyapunovPDESystem.jl +++ b/src/NeuralLyapunovPDESystem.jl @@ -1,28 +1,26 @@ """ NeuralLyapunovPDESystem(dynamics, lb, ub, spec; fixed_point, ps) -Constructs a ModelingToolkit PDESystem to train a neural Lyapunov function - -Returns the PDESystem and a function representing the neural network, which -operates columnwise. - -The neural Lyapunov function will only be trained for { x : lb .≤ x .≤ ub }. -The Lyapunov function will be for the dynamical system represented by dynamics -If dynamics is an ODEProblem or ODEFunction, then the corresponding ODE; if -dynamics is a function, then the ODE is ẋ = dynamics(x, p, t). This ODE should -not depend on t (time t=0.0 alone will be used) and should have a fixed point -at x = fixed_point. The particular Lyapunov conditions to be used and structure -of the neural Lyapunov function are specified through spec, which is a -NeuralLyapunovSpecification. - -The returned neural network function takes three inputs: the neural network -structure phi, the trained network parameters, and a matrix of inputs to -operate on columnwise. - -If dynamics requires parameters, their values can be supplied through the -Vector p, or through dynamics.p if dynamics isa ODEProblem (in which case, let -the other be SciMLBase.NullParameters()). If dynamics is an ODEFunction and -dynamics.paramsyms is defined, then p should have the same order. +Constructs a ModelingToolkit `PDESystem` to train a neural Lyapunov function + +Returns the `PDESystem` and a function representing the neural network, which operates +columnwise. + +The neural Lyapunov function will only be trained for `{ x : lb .≤ x .≤ ub }`. The Lyapunov +function will be for the dynamical system represented by `dynamics`. If `dynamics` is an +`ODEProblem` or `ODEFunction`, then the corresponding ODE; if `dynamics` is a function, then +the ODE is `ẋ = dynamics(x, p, t)`. This ODE should not depend on `t` (time `t=0.0` alone +will be used) and should have a fixed point at `x = fixed_point`. The particular Lyapunov +conditions to be used and structure of the neural Lyapunov function are specified through +`spec`, which is a `NeuralLyapunovSpecification`. + +The returned neural network function takes three inputs: the neural network structure `phi`, +the trained network parameters, and a matrix of inputs to operate on columnwise. + +If `dynamics` requires parameters, their values can be supplied through the Vector `p`, or +through the parameters of `dynamics` if `dynamics isa ODEProblem` (in which case, let +the other be `SciMLBase.NullParameters()`). If `dynamics` is an `ODEFunction` and +`dynamics.paramsyms` is defined, then `p` should have the same order. """ function NeuralLyapunovPDESystem( dynamics::ODEFunction, @@ -128,7 +126,9 @@ function NeuralLyapunovPDESystem( elseif dynamics.p == p p else - throw(ErrorException("Conflicting parameter definitions. Please define parameters only through p or dynamics.p; the other should be SciMLBase.NullParameters()")) + throw(ErrorException("Conflicting parameter definitions. Please define parameters" * + " only through p or dynamics.p; the other should be " * + "SciMLBase.NullParameters()")) end return NeuralLyapunovPDESystem( @@ -269,20 +269,21 @@ function _NeuralLyapunovPDESystem( end """ - NumericalNeuralLyapunovFunctions(phi, θ, network_func, structure, dynamics, fixed_point; jac, J_net) + NumericalNeuralLyapunovFunctions(phi, θ, network_func, structure, dynamics, fixed_point; + jac, J_net) -Returns the Lyapunov function, its time derivative, and its gradient: V(state), -V̇(state), and ∇V(state) +Returns the Lyapunov function, its time derivative, and its gradient: `V(state)`, +`V̇(state)`, and `∇V(state)` -These functions can operate on a state vector or columnwise on a matrix of state -vectors. phi is the neural network with parameters θ. network_func(phi, θ, state) -is an output of NeuralLyapunovPDESystem, which evaluates the neural network -represented phi with parameters θ at state. +These functions can operate on a state vector or columnwise on a matrix of state vectors. +`phi` is the neural network with parameters `θ`. `network_func(phi, θ, state)` is an output +of `NeuralLyapunovPDESystem`, which evaluates the neural network represented by `phi` with +parameters `θ` at `state`. The Lyapunov function structure is specified in structure, which is a -NeuralLyapunovStructure. The Jacobian of the network is either specified via -J_net(_phi, _θ, state) or calculated using jac, which defaults to -ForwardDiff.jacobian +`NeuralLyapunovStructure`. The Jacobian of the network is either specified via +`J_net(_phi, _θ, state)` or calculated using `jac`, which defaults to +`ForwardDiff.jacobian`. """ function NumericalNeuralLyapunovFunctions( phi, @@ -328,18 +329,19 @@ function NumericalNeuralLyapunovFunctions( end """ - NumericalNeuralLyapunovFunctions(phi, θ, network_func, V_structure, dynamics, fixed_point, grad) + NumericalNeuralLyapunovFunctions(phi, θ, network_func, V_structure, dynamics, + fixed_point, grad) -Returns the Lyapunov function, its time derivative, and its gradient: V(state), -V̇(state), and ∇V(state) +Returns the Lyapunov function, its time derivative, and its gradient: `V(state)`, +`V̇(state)`, and `∇V(state)`. -These functions can operate on a state vector or columnwise on a matrix of state -vectors. phi is the neural network with parameters θ. network_func is an output -of NeuralLyapunovPDESystem. +These functions can operate on a state vector or columnwise on a matrix of state vectors. +`phi` is the neural network with parameters `θ`. `network_func` is an output of +`NeuralLyapunovPDESystem`. The Lyapunov function structure is defined by - V_structure(_network_func, state, fixed_point) -Its gradient is calculated using grad, which defaults to ForwardDiff.gradient. + `V_structure(_network_func, state, fixed_point)` +Its gradient is calculated using `grad`, which defaults to `ForwardDiff.gradient`. """ function NumericalNeuralLyapunovFunctions( phi, diff --git a/src/conditions_specification.jl b/src/conditions_specification.jl index d9634e4..8e30305 100644 --- a/src/conditions_specification.jl +++ b/src/conditions_specification.jl @@ -3,15 +3,15 @@ Specifies the structure of the neural Lyapunov function and its derivative. -Allows the user to define the Lyapunov in terms of the neural network to -structurally enforce Lyapunov conditions. -network_dim is the dimension of the output of the neural network. -V(phi::Function, state, fixed_point) takes in the neural network, the state, -and the fixed_point, and outputs the value of the Lyapunov function at state -V̇(phi::Function, J_phi::Function, f::Function, state, fixed_point) takes in the -neural network, the jacobian of the neural network, the dynamics (as a function -of the state alone), the state, and the fixed_point, and outputs the time -derivative of the Lyapunov function at state. +Allows the user to define the Lyapunov in terms of the neural network to structurally +enforce Lyapunov conditions. +`network_dim` is the dimension of the output of the neural network. +`V(phi::Function, state, fixed_point)` takes in the neural network, the state, and the fixed +point, and outputs the value of the Lyapunov function at `state`. +`V̇(phi::Function, J_phi::Function, f::Function, state, fixed_point)` takes in the neural +network, the jacobian of the neural network, the dynamics (as a function of the state +alone), the state, and the fixed point, and outputs the time derivative of the Lyapunov +function at `state`. """ struct NeuralLyapunovStructure V::Function @@ -21,470 +21,98 @@ struct NeuralLyapunovStructure end """ - UnstructuredNeuralLyapunov() + AbstractLyapunovMinimizationCondition -Creates a NeuralLyapunovStructure where the Lyapunov function is the neural -network evaluated at state. This does not structurally enforce any Lyapunov -conditions. -""" -function UnstructuredNeuralLyapunov()::NeuralLyapunovStructure - NeuralLyapunovStructure( - (net, state, fixed_point) -> net(state), - (net, grad_net, state, fixed_point) -> grad_net(state), - (net, grad_net, f, state, fixed_point) -> grad_net(state) ⋅ f(state), - 1 - ) -end +Represents the minimization condition in a neural Lyapunov problem +All concrete `AbstractLyapunovMinimizationCondition` subtypes should define the +`check_nonnegativity`, `check_fixed_point`, and `get_minimization_condition` functions. """ - NonnegativeNeuralLyapunov(network_dim, δ, pos_def; grad_pos_def, grad) - -Creates a NeuralLyapunovStructure where the Lyapunov function is the L2 norm of -the neural network output plus a constant δ times a function pos_def. +abstract type AbstractLyapunovMinimizationCondition end -The condition that the Lyapunov function must be minimized uniquely at the -fixed point can be represented as V(fixed_point) = 0, V(state) > 0 when -state != fixed_point. This structure ensures V(state) ≥ 0. Further, if δ > 0 -and pos_def(fixed_point, fixed_point) = 0, but pos_def(state, fixed_point) > 0 -when state != fixed_point, this ensures that V(state) > 0 when -state != fixed_point. This does not enforce V(fixed_point) = 0, so that -condition must included in the neural Lyapunov loss function. +""" + AbstractLyapunovDecreaseCondition -grad_pos_def(state, fixed_point) should be the gradient of pos_def with respect -to state at state. If grad_pos_def is not defined, it is evaluated using grad, -which defaults to ForwardDiff.gradient. +Represents the decrease condition in a neural Lyapunov problem -The neural network output has dimension network_dim. +All concrete `AbstractLyapunovDecreaseCondition` subtypes should define the +`check_decrease`, `check_stationary_fixed_point`, and `get_decrease_condition` functions. """ -function NonnegativeNeuralLyapunov( - network_dim::Integer; - δ::Real = 0.0, - pos_def::Function = (state, fixed_point) -> log(1.0 + - (state - fixed_point) ⋅ - (state - fixed_point)), - grad_pos_def = nothing, - grad = ForwardDiff.gradient -)::NeuralLyapunovStructure - if δ == 0.0 - NeuralLyapunovStructure( - (net, state, fixed_point) -> net(state) ⋅ net(state), - (net, J_net, state, fixed_point) -> 2 * transpose(net(state)) * J_net(state), - (net, J_net, f, state, fixed_point) -> 2 * - dot(net(state), J_net(state), f(state)), - network_dim - ) - else - grad_pos_def = if isnothing(grad_pos_def) - (state, fixed_point) -> grad((x) -> pos_def(x, fixed_point), state) - else - grad_pos_def - end - NeuralLyapunovStructure( - (net, state, fixed_point) -> net(state) ⋅ net(state) + - δ * pos_def(state, fixed_point), - (net, J_net, state, fixed_point) -> 2 * transpose(net(state)) * J_net(state) + - δ * grad_pos_def(state, fixed_point), - (net, J_net, f, state, fixed_point) -> 2 * dot( - net(state), - J_net(state), - f(state) - ) + δ * grad_pos_def(state, fixed_point) ⋅ f(state), - network_dim - ) - end -end +abstract type AbstractLyapunovDecreaseCondition end """ - PositiveSemiDefiniteStructure(network_dim; pos_def, non_neg, grad_pos_def, grad_non_neg, grad) - -Creates a NeuralLyapunovStructure where the Lyapunov function is the product of -a positive (semi-)definite function pos_def which does not depend on the -network and a nonnegative function non_neg which does depend the network. - -The condition that the Lyapunov function must be minimized uniquely at the -fixed point can be represented as V(fixed_point) = 0, V(state) > 0 when -state != fixed_point. This structure ensures V(state) ≥ 0. Further, if pos_def -is 0 only at fixed_point (and positive elsewhere) and if non_neg is strictly -positive away from fixed_point (as is the case for the default values of pos_def -and non_neg), then this structure ensures V(fixed_point) = 0 and V(state) > 0 -when state != fixed_point. + NeuralLyapunovSpecification -grad_pos_def(state, fixed_point) should be the gradient of pos_def with respect -to state at state. Similarly, grad_non_neg(net, J_net, state, fixed_point) -should be the gradient of non_neg(net, state, fixed_point) with respect to -state at state. If grad_pos_def or grad_non_neg is not defined, it is evaluated -using grad, which defaults to ForwardDiff.gradient. - -The neural network output has dimension network_dim. +Specifies a neural Lyapunov problem """ -function PositiveSemiDefiniteStructure( - network_dim::Integer; - pos_def::Function = (state, fixed_point) -> log(1.0 + - (state - fixed_point) ⋅ - (state - fixed_point)), - non_neg::Function = (net, state, fixed_point) -> 1 + net(state) ⋅ net(state), - grad_pos_def = nothing, - grad_non_neg = nothing, - grad = ForwardDiff.gradient -) - _grad(f::Function, x::AbstractArray{T}) where {T <: Num} = Symbolics.gradient(f(x), x) - _grad(f::Function, x) = grad(f, x) - grad_pos_def = if isnothing(grad_pos_def) - (state, fixed_point) -> _grad((x) -> pos_def(x, fixed_point), state) - else - grad_pos_def - end - grad_non_neg = if isnothing(grad_non_neg) - (net, J_net, state, fixed_point) -> _grad( - (x) -> non_neg(net, x, fixed_point), state) - else - grad_non_neg - end - NeuralLyapunovStructure( - (net, state, fixed_point) -> pos_def(state, fixed_point) * - non_neg(net, state, fixed_point), - (net, J_net, state, fixed_point) -> grad_pos_def(state, fixed_point) * - non_neg(net, state, fixed_point) + - pos_def(state, fixed_point) * - grad_non_neg(net, J_net, state, fixed_point), - (net, J_net, f, state, fixed_point) -> (f(state) ⋅ - grad_pos_def(state, fixed_point)) * - non_neg(net, state, fixed_point) + - pos_def(state, fixed_point) * - (f(state) ⋅ grad_non_neg( - net, J_net, state, fixed_point)), - network_dim - ) +struct NeuralLyapunovSpecification + structure::NeuralLyapunovStructure + minimzation_condition::AbstractLyapunovMinimizationCondition + decrease_condition::AbstractLyapunovDecreaseCondition end -abstract type AbstractLyapunovMinimizationCondition end - """ check_nonnegativity(cond::AbstractLyapunovMinimizationCondition) -True if cond specifies training to meet the Lyapunov minimization condition, -false if cond specifies no training to meet this condition. +`true` if `cond` specifies training to meet the Lyapunov minimization condition, `false` if +`cond` specifies no training to meet this condition. """ function check_nonnegativity(cond::AbstractLyapunovMinimizationCondition)::Bool - error("check_nonnegativity not implemented for AbstractLyapunovMinimizationCondition of type $(typeof(cond))") + error("check_nonnegativity not implemented for AbstractLyapunovMinimizationCondition " * + "of type $(typeof(cond))") end """ check_fixed_point(cond::AbstractLyapunovMinimizationCondition) -True if cond specifies training for the Lyapunov function to equal zero at the -fixed point, false if cond specifies no training to meet this condition. +`true` if `cond` specifies training for the Lyapunov function to equal zero at the +fixed point, `false` if `cond` specifies no training to meet this condition. """ function check_fixed_point(cond::AbstractLyapunovMinimizationCondition)::Bool - error("check_fixed_point not implemented for AbstractLyapunovMinimizationCondition of type $(typeof(cond))") + error("check_fixed_point not implemented for AbstractLyapunovMinimizationCondition of" * + " type $(typeof(cond))") end """ get_minimization_condition(cond::AbstractLyapunovMinimizationCondition) -Returns a function of V, state, and fixed_point that equals zero when the -Lyapunov minimization condition is met and greater than zero when it's violated +Returns a function of `V`, `state`, and `fixed_point` that equals zero when the +Lyapunov minimization condition is met and greater than zero when it's violated. """ function get_minimization_condition(cond::AbstractLyapunovMinimizationCondition) - error("get_condition not implemented for AbstractLyapunovMinimizationCondition of type $(typeof(cond))") + error("get_condition not implemented for AbstractLyapunovMinimizationCondition of " * + "type $(typeof(cond))") end -""" - LyapunovMinimizationCondition - -Specifies the form of the Lyapunov conditions to be used. - -If check_nonnegativity is true, training will attempt to enforce - V(state) ≥ strength(state, fixed_point) -The inequality will be approximated by the equation - relu(strength(state, fixed_point) - V(state)) = 0.0 -If check_fixed_point is true, then training will attempt to enforce - V(fixed_point) = 0 - -# Examples - -The condition that the Lyapunov function must be minimized uniquely at the -fixed point can be represented as V(fixed_point) = 0, V(state) > 0 when -state != fixed_point. This could be enfored by V ≥ ||state - fixed_point||^2, -which would be represented, with check_nonnegativity = true, by - strength(state, fixed_point) = ||state - fixed_point||^2, -paired with V(fixed_point) = 0, which can be enforced with - check_fixed_point = true - -If V were structured such that it is always nonnegative, then V(fixed_point) = 0 -is all that must be enforced in training for the Lyapunov function to be -uniquely minimized at fixed_point. So, in that case, we would use - check_nonnegativity = false; check_fixed_point = true - -In either case, relu = (t) -> max(0.0, t) exactly represents the inequality, -but approximations of this function are allowed. -""" -struct LyapunovMinimizationCondition <: AbstractLyapunovMinimizationCondition - check_nonnegativity::Bool - strength::Function - relu::Function - check_fixed_point::Bool -end - -function check_nonnegativity(cond::LyapunovMinimizationCondition)::Bool - cond.check_nonnegativity -end - -function check_fixed_point(cond::LyapunovMinimizationCondition)::Bool - cond.check_fixed_point -end - -function get_minimization_condition(cond::LyapunovMinimizationCondition) - if cond.check_nonnegativity - return (V, x, fixed_point) -> cond.relu(cond.strength(x, fixed_point) - V(x)) - else - return nothing - end -end - -""" - StrictlyPositiveDefinite(C; check_fixed_point, relu) - -Constructs a LyapunovMinimizationCondition representing - V(state) ≥ C * ||state - fixed_point||^2 -If check_fixed_point is true, then training will also attempt to enforce - V(fixed_point) = 0 - -The inequality is represented by a ≥ b <==> relu(b-a) = 0.0 -""" -function StrictlyPositiveDefinite(; - check_fixed_point = true, - C::Real = 1e-6, - relu = (t) -> max(0.0, t) -)::LyapunovMinimizationCondition - LyapunovMinimizationCondition( - true, - (state, fixed_point) -> C * (state - fixed_point) ⋅ (state - fixed_point), - relu, - check_fixed_point - ) -end - -""" - PositiveSemiDefinite(check_fixed_point) - -Constructs a LyapunovMinimizationCondition representing - V(state) ≥ 0 -If check_fixed_point is true, then training will also attempt to enforce - V(fixed_point) = 0 - -The inequality is represented by a ≥ b <==> relu(b-a) = 0.0 -""" -function PositiveSemiDefinite(; - check_fixed_point = true, - relu = (t) -> max(0.0, t) -)::LyapunovMinimizationCondition - LyapunovMinimizationCondition( - true, - (state, fixed_point) -> 0.0, - relu, - check_fixed_point - ) -end - -""" - DontCheckNonnegativity(check_fixed_point) - -Constructs a LyapunovMinimizationCondition which represents not checking for -nonnegativity of the Lyapunov function. This is appropriate in cases where this -condition has been structurally enforced. - -It is still possible to check for V(fixed_point) = 0, even in this case, for -example if V is structured to be positive for state != fixed_point, but it is -not guaranteed structurally that V(fixed_point) = 0. -""" -function DontCheckNonnegativity(; check_fixed_point = false)::LyapunovMinimizationCondition - LyapunovMinimizationCondition( - false, - (state, fixed_point) -> 0.0, - (t) -> 0.0, - check_fixed_point - ) -end - -abstract type AbstractLyapunovDecreaseCondition end - """ check_decrease(cond::AbstractLyapunovDecreaseCondition) -True if cond specifies training to meet the Lyapunov decrease condition, false -if cond specifies no training to meet this condition. +`true` if `cond` specifies training to meet the Lyapunov decrease condition, `false` +if `cond` specifies no training to meet this condition. """ function check_decrease(cond::AbstractLyapunovDecreaseCondition)::Bool - error("check_decrease not implemented for AbstractLyapunovDecreaseCondition of type $(typeof(cond))") + error("check_decrease not implemented for AbstractLyapunovDecreaseCondition of type " * + string(typeof(cond))) end """ check_stationary_fixed_point(cond::AbstractLyapunovDecreaseCondition) -True if cond specifies training for the Lyapunov function not to change at the -fixed point, false if cond specifies no training to meet this condition. +`true` if `cond` specifies training for the Lyapunov function not to change at the +fixed point, `false` if `cond` specifies no training to meet this condition. """ function check_stationary_fixed_point(cond::AbstractLyapunovDecreaseCondition)::Bool - error("check_fixed_point not implemented for AbstractLyapunovDecreaseCondition of type $(typeof(cond))") + error("check_fixed_point not implemented for AbstractLyapunovDecreaseCondition of " * + "type $(typeof(cond))") end """ get_decrease_condition(cond::AbstractLyapunovDecreaseCondition) -Returns a function of V, dVdt, state, and fixed_point that is equal to zero -when the Lyapunov decrease condition is met and greater than zero when it is -violated +Returns a function of `V`, `dVdt`, `state`, and `fixed_point` that is equal to zero +when the Lyapunov decrease condition is met and greater than zero when it is violated. """ function get_decrease_condition(cond::AbstractLyapunovDecreaseCondition) - error("get_condition not implemented for AbstractLyapunovDecreaseCondition of type $(typeof(cond))") -end - -""" -LyapunovDecreaseCondition(decrease, strength, check_fixed_point) - -Specifies the form of the Lyapunov conditions to be used; training will enforce - decrease(V, dVdt) ≤ strength(state, fixed_point) -The inequality will be approximated by the equation - relu(decrease(V, dVdt) - strength(state, fixed_point)) = 0.0 -If check_fixed_point is false, then training assumes dVdt(fixed_point) = 0, but -if check_fixed_point is true, then training will enforce dVdt(fixed_point) = 0. - -If the dynamics truly have a fixed point at fixed_point and dVdt has been -defined properly in terms of the dynamics, then dVdt(fixed_point) will be 0 and -there is no need to enforce dVdt(fixed_point) = 0, so check_fixed_point defaults -to false. - -# Examples: - -Asymptotic decrease can be enforced by requiring - dVdt ≤ -C |state - fixed_point|^2, -which corresponds to - decrease = (V, dVdt) -> dVdt - strength = (x, x0) -> -C * (x - x0) ⋅ (x - x0) - -Exponential decrease of rate k is proven by dVdt ≤ - k * V, so corresponds to - decrease = (V, dVdt) -> dVdt + k * V - strength = (x, x0) -> 0.0 -""" -struct LyapunovDecreaseCondition <: AbstractLyapunovDecreaseCondition - check_decrease::Bool - decrease::Function - strength::Function - relu::Function - check_fixed_point::Bool -end - -function check_decrease(cond::LyapunovDecreaseCondition)::Bool - cond.check_decrease -end - -function check_stationary_fixed_point(cond::LyapunovDecreaseCondition)::Bool - cond.check_fixed_point -end - -function get_decrease_condition(cond::LyapunovDecreaseCondition) - if cond.check_decrease - return (V, dVdt, x, fixed_point) -> cond.relu( - cond.decrease(V(x), dVdt(x)) - cond.strength(x, fixed_point) - ) - else - return nothing - end -end - -""" - AsymptoticDecrease(strict; check_fixed_point, C) - -Constructs a LyapunovDecreaseCondition corresponding to asymptotic decrease. - -If strict is false, the condition is dV/dt ≤ 0, and if strict is true, the -condition is dV/dt ≤ - C | state - fixed_point |^2 - -The inequality is represented by a ≥ b <==> relu(b-a) = 0.0 -""" -function AsymptoticDecrease(; - strict::Bool = false, - check_fixed_point::Bool = false, - C::Real = 1e-6, - relu = (t) -> max(0.0, t) -)::LyapunovDecreaseCondition - if strict - return LyapunovDecreaseCondition( - true, - (V, dVdt) -> dVdt, - (x, x0) -> -C * (x - x0) ⋅ (x - x0), - relu, - check_fixed_point - ) - else - return LyapunovDecreaseCondition( - true, - (V, dVdt) -> dVdt, - (x, x0) -> 0.0, - relu, - check_fixed_point - ) - end -end - -""" - ExponentialDecrease(k, strict; check_fixed_point, C) - -Constructs a LyapunovDecreaseCondition corresponding to exponential decrease of rate k. - -If strict is false, the condition is dV/dt ≤ -k * V, and if strict is true, the -condition is dV/dt ≤ -k * V - C * ||state - fixed_point||^2 - -The inequality is represented by a ≥ b <==> relu(b-a) = 0.0 -""" -function ExponentialDecrease( - k::Real; - strict::Bool = false, - check_fixed_point::Bool = false, - C::Real = 1e-6, - relu = (t) -> max(0.0, t) -)::LyapunovDecreaseCondition - if strict - return LyapunovDecreaseCondition( - true, - (V, dVdt) -> dVdt + k * V, - (x, x0) -> -C * (x - x0) ⋅ (x - x0), - relu, - check_fixed_point - ) - else - return LyapunovDecreaseCondition( - true, - (V, dVdt) -> dVdt + k * V, - (x, x0) -> 0.0, - relu, - check_fixed_point - ) - end -end - -""" - DontCheckDecrease(check_fixed_point = false) - -Constructs a LyapunovDecreaseCondition which represents not checking for -decrease of the Lyapunov function along system trajectories. This is appropriate -in cases when the Lyapunov decrease condition has been structurally enforced. - -It is still possible to check for dV/dt = 0 at fixed_point, even in this case. -""" -function DontCheckDecrease(check_fixed_point::Bool = false)::LyapunovDecreaseCondition - return LyapunovDecreaseCondition( - false, - (V, dVdt) -> 0.0, - (state, fixed_point) -> 0.0, - (t) -> 0.0, - check_fixed_point - ) -end - -struct NeuralLyapunovSpecification - structure::NeuralLyapunovStructure - minimzation_condition::AbstractLyapunovMinimizationCondition - decrease_condition::AbstractLyapunovDecreaseCondition + error("get_condition not implemented for AbstractLyapunovDecreaseCondition of type " * + string((typeof(cond)))) end diff --git a/src/decrease_conditions.jl b/src/decrease_conditions.jl new file mode 100644 index 0000000..48353d4 --- /dev/null +++ b/src/decrease_conditions.jl @@ -0,0 +1,134 @@ +""" + LyapunovDecreaseCondition(check_decrease, decrease, strength, relu, check_fixed_point) + +Specifies the form of the Lyapunov conditions to be used; if `check_decrease`, training will +enforce `decrease(V, dVdt) ≤ strength(state, fixed_point)`. + +The inequality will be approximated by the equation + `relu(decrease(V, dVdt) - strength(state, fixed_point)) = 0.0`. +If `check_fixed_point` is `false`, then training assumes `dVdt(fixed_point) = 0`, but +if `check_fixed_point` is `true`, then training will enforce `dVdt(fixed_point) = 0`. + +If the dynamics truly have a fixed point at `fixed_point` and `dVdt` has been defined +properly in terms of the dynamics, then `dVdt(fixed_point)` will be `0` and there is no need +to enforce `dVdt(fixed_point) = 0`, so `check_fixed_point` defaults to `false`. + +# Examples: + +Asymptotic decrease can be enforced by requiring + `dVdt ≤ -C |state - fixed_point|^2`, +which corresponds to + `decrease = (V, dVdt) -> dVdt` + `strength = (x, x0) -> -C * (x - x0) ⋅ (x - x0)` + +Exponential decrease of rate `k` is proven by `dVdt ≤ - k * V`, so corresponds to + `decrease = (V, dVdt) -> dVdt + k * V` + `strength = (x, x0) -> 0.0` +""" +struct LyapunovDecreaseCondition <: AbstractLyapunovDecreaseCondition + check_decrease::Bool + decrease::Function + strength::Function + relu::Function + check_fixed_point::Bool +end + +function check_decrease(cond::LyapunovDecreaseCondition)::Bool + cond.check_decrease +end + +function check_stationary_fixed_point(cond::LyapunovDecreaseCondition)::Bool + cond.check_fixed_point +end + +function get_decrease_condition(cond::LyapunovDecreaseCondition) + if cond.check_decrease + return (V, dVdt, x, fixed_point) -> cond.relu( + cond.decrease(V(x), dVdt(x)) - cond.strength(x, fixed_point) + ) + else + return nothing + end +end + +""" + AsymptoticDecrease(strict; check_fixed_point, C) + +Constructs a `LyapunovDecreaseCondition` corresponding to asymptotic decrease. + +If `strict` is `false`, the condition is `dV/dt ≤ 0`, and if `strict` is `true`, the +condition is `dV/dt ≤ - C | state - fixed_point |^2`. + +The inequality is represented by `a ≥ b` <==> `relu(b-a) = 0.0`. +""" +function AsymptoticDecrease(; + strict::Bool = false, + check_fixed_point::Bool = false, + C::Real = 1e-6, + relu = (t) -> max(0.0, t) +)::LyapunovDecreaseCondition + strength = if strict + (x, x0) -> -C * (x - x0) ⋅ (x - x0) + else + (x, x0) -> 0.0 + end + + return LyapunovDecreaseCondition( + true, + (V, dVdt) -> dVdt, + strength, + relu, + check_fixed_point + ) +end + +""" + ExponentialDecrease(k, strict; check_fixed_point, C) + +Constructs a `LyapunovDecreaseCondition` corresponding to exponential decrease of rate `k`. + +If `strict` is `false`, the condition is `dV/dt ≤ -k * V`, and if `strict` is `true`, the +condition is `dV/dt ≤ -k * V - C * ||state - fixed_point||^2`. + +The inequality is represented by `a ≥ b` <==> `relu(b-a) = 0.0`. +""" +function ExponentialDecrease( + k::Real; + strict::Bool = false, + check_fixed_point::Bool = false, + C::Real = 1e-6, + relu = (t) -> max(0.0, t) +)::LyapunovDecreaseCondition + strength = if strict + (x, x0) -> -C * (x - x0) ⋅ (x - x0) + else + (x, x0) -> 0.0 + end + + return LyapunovDecreaseCondition( + true, + (V, dVdt) -> dVdt + k * V, + strength, + relu, + check_fixed_point + ) +end + +""" + DontCheckDecrease(check_fixed_point = false) + +Constructs a `LyapunovDecreaseCondition` which represents not checking for +decrease of the Lyapunov function along system trajectories. This is appropriate +in cases when the Lyapunov decrease condition has been structurally enforced. + +It is still possible to check for `dV/dt = 0` at `fixed_point`, even in this case. +""" +function DontCheckDecrease(check_fixed_point::Bool = false)::LyapunovDecreaseCondition + return LyapunovDecreaseCondition( + false, + (V, dVdt) -> 0.0, + (state, fixed_point) -> 0.0, + (t) -> 0.0, + check_fixed_point + ) +end diff --git a/src/decrease_conditions_RoA_aware.jl b/src/decrease_conditions_RoA_aware.jl new file mode 100644 index 0000000..d6a9de3 --- /dev/null +++ b/src/decrease_conditions_RoA_aware.jl @@ -0,0 +1,99 @@ +""" + RoAAwareDecreaseCondition(check_decrease, decrease, strength, relu, check_fixed_point, + ρ, out_of_RoA_penalty) + +Specifies the form of the Lyapunov conditions to be used, training for a region of +attraction estimate of `{ x : V(x) ≤ ρ }` + +If `check_decrease`, training will enforce +`decrease(V(state), dVdt(state)) ≤ strength(state, fixed_point)` whenever `V(state) ≤ ρ`, +and will instead apply +`|out_of_RoA_penalty(V(state), dVdt(state), state, fixed_point, ρ)|^2` when `V(state) > ρ`. + +The inequality will be approximated by the equation + `relu(decrease(V, dVdt) - strength(state, fixed_point)) = 0.0`. +If `check_fixed_point` is `false`, then training assumes `dVdt(fixed_point) = 0`, but +if `check_fixed_point` is `true`, then training will attempt to enforce +`dVdt(fixed_point) = 0`. + +If the dynamics truly have a fixed point at `fixed_point` and `dVdt` has been defined +properly in terms of the dynamics, then `dVdt(fixed_point)` will be `0` and there is no need +to enforce `dVdt(fixed_point) = 0`, so `check_fixed_point` defaults to `false`. + +# Examples: + +Asymptotic decrease can be enforced by requiring + `dVdt ≤ -C |state - fixed_point|^2`, +which corresponds to + `decrease = (V, dVdt) -> dVdt` and + `strength = (x, x0) -> -C * (x - x0) ⋅ (x - x0)`. + +Exponential decrease of rate `k` is proven by `dVdt ≤ - k * V`, so corresponds to + `decrease = (V, dVdt) -> dVdt + k * V` and + `strength = (x, x0) -> 0.0`. + +Enforcing either condition only in the region of attraction and not penalizing any points +outside that region would correspond to + `out_of_RoA_penalty = (V, dVdt, state, fixed_point, ρ) -> 0.0`, +whereas an example of a penalty that decays farther in state space from the fixed point is + `out_of_RoA_penalty = (V, dVdt, state, fixed_point, ρ) -> 1.0 / ((x - x0) ⋅ (x - x0))`. +Note that this penalty could also depend on values of `V`, `dVdt`, and `ρ`. +""" +struct RoAAwareDecreaseCondition <: AbstractLyapunovDecreaseCondition + check_decrease::Bool + decrease::Function + strength::Function + relu::Function + check_fixed_point::Bool + ρ::Real + out_of_RoA_penalty::Function +end + +function check_decrease(cond::RoAAwareDecreaseCondition)::Bool + cond.check_decrease +end + +function check_stationary_fixed_point(cond::RoAAwareDecreaseCondition)::Bool + cond.check_fixed_point +end + +function get_decrease_condition(cond::RoAAwareDecreaseCondition) + if cond.check_decrease + return function (V, dVdt, x, fixed_point) + (V(x) ≤ cond.ρ) * cond.relu( + cond.decrease(V(x), dVdt(x)) - cond.strength(x, fixed_point) + ) + + (V(x) > cond.ρ) * cond.out_of_RoA_penalty(V(x), dVdt(x), x, fixed_point, + cond.ρ) + end + else + return nothing + end +end + +""" + make_RoA_aware(cond; ρ, out_of_RoA_penalty) + +Adds awareness of the region of attraction (RoA) estimation task to the supplied +`LyapunovDecreaseCondition` + +`ρ` is the target level such that the RoA will be `{ x : V(x) ≤ ρ }`. +`cond` specifies the loss applied when `V(state) ≤ ρ`, and +`|out_of_RoA_penalty(V(state), dVdt(state), state, fixed_point, ρ)|^2` is the loss from +`state` values such that `V(state) > ρ`. +""" +function make_RoA_aware( + cond::LyapunovDecreaseCondition; + ρ = 1.0, + out_of_RoA_penalty = (V, dVdt, state, fixed_point, _ρ) -> 0.0 +)::RoAAwareDecreaseCondition + RoAAwareDecreaseCondition( + cond.check_decrease, + cond.decrease, + cond.strength, + cond.relu, + cond.check_fixed_point, + ρ, + out_of_RoA_penalty + ) +end diff --git a/src/minimization_conditions.jl b/src/minimization_conditions.jl new file mode 100644 index 0000000..f19b53b --- /dev/null +++ b/src/minimization_conditions.jl @@ -0,0 +1,116 @@ +""" + LyapunovMinimizationCondition + +Specifies the form of the Lyapunov conditions to be used. + +If `check_nonnegativity` is `true`, training will attempt to enforce + `V(state) ≥ strength(state, fixed_point)`. +The inequality will be approximated by the equation + `relu(strength(state, fixed_point) - V(state)) = 0.0`. +If `check_fixed_point` is `true`, then training will also attempt to enforce + `V(fixed_point) = 0`. + +# Examples + +The condition that the Lyapunov function must be minimized uniquely at the fixed point can +be represented as `V(fixed_point) = 0`, `V(state) > 0` when `state ≠ fixed_point`. This +could be enfored by `V(fixed_point) ≥ ||state - fixed_point||^2`, which would be +represented, with `check_nonnegativity = true`, by + strength(state, fixed_point) = ||state - fixed_point||^2, +paired with `V(fixed_point) = 0`, which can be enforced with `check_fixed_point = true`. + +If `V` were structured such that it is always nonnegative, then `V(fixed_point) = 0` is all +that must be enforced in training for the Lyapunov function to be uniquely minimized at +`fixed_point`. So, in that case, we would use + `check_nonnegativity = false; check_fixed_point = true`. + +In either case, `relu = (t) -> max(0.0, t)` exactly represents the inequality, but +approximations of this function are allowed. +""" +struct LyapunovMinimizationCondition <: AbstractLyapunovMinimizationCondition + check_nonnegativity::Bool + strength::Function + relu::Function + check_fixed_point::Bool +end + +function check_nonnegativity(cond::LyapunovMinimizationCondition)::Bool + cond.check_nonnegativity +end + +function check_fixed_point(cond::LyapunovMinimizationCondition)::Bool + cond.check_fixed_point +end + +function get_minimization_condition(cond::LyapunovMinimizationCondition) + if cond.check_nonnegativity + return (V, x, fixed_point) -> cond.relu(cond.strength(x, fixed_point) - V(x)) + else + return nothing + end +end + +""" + StrictlyPositiveDefinite(C; check_fixed_point, relu) + +Constructs a `LyapunovMinimizationCondition` representing + `V(state) ≥ C * ||state - fixed_point||^2`. +If `check_fixed_point` is `true`, then training will also attempt to enforce + `V(fixed_point) = 0`. + +The inequality is represented by `a ≥ b` <==> `relu(b-a) = 0.0`. +""" +function StrictlyPositiveDefinite(; + check_fixed_point = true, + C::Real = 1e-6, + relu = (t) -> max(0.0, t) +)::LyapunovMinimizationCondition + LyapunovMinimizationCondition( + true, + (state, fixed_point) -> C * (state - fixed_point) ⋅ (state - fixed_point), + relu, + check_fixed_point + ) +end + +""" + PositiveSemiDefinite(check_fixed_point) + +Constructs a `LyapunovMinimizationCondition` representing + `V(state) ≥ 0`. +If `check_fixed_point` is `true`, then training will also attempt to enforce + `V(fixed_point) = 0`. + +The inequality is represented by `a ≥ b` <==> `relu(b-a) = 0.0`. +""" +function PositiveSemiDefinite(; + check_fixed_point = true, + relu = (t) -> max(0.0, t) +)::LyapunovMinimizationCondition + LyapunovMinimizationCondition( + true, + (state, fixed_point) -> 0.0, + relu, + check_fixed_point + ) +end + +""" + DontCheckNonnegativity(check_fixed_point) + +Constructs a `LyapunovMinimizationCondition` which represents not checking for nonnegativity +of the Lyapunov function. This is appropriate in cases where this condition has been +structurally enforced. + +It is still possible to check for `V(fixed_point) = 0`, even in this case, for example if +`V` is structured to be positive for `state ≠ fixed_point`, but it is not guaranteed +structurally that `V(fixed_point) = 0`. +""" +function DontCheckNonnegativity(; check_fixed_point = false)::LyapunovMinimizationCondition + LyapunovMinimizationCondition( + false, + (state, fixed_point) -> 0.0, + (t) -> 0.0, + check_fixed_point + ) +end diff --git a/src/structure_specification.jl b/src/structure_specification.jl new file mode 100644 index 0000000..e225fb4 --- /dev/null +++ b/src/structure_specification.jl @@ -0,0 +1,134 @@ +""" + UnstructuredNeuralLyapunov() + +Creates a `NeuralLyapunovStructure` where the Lyapunov function is the neural network +evaluated at the state. This does not structurally enforce any Lyapunov conditions. +""" +function UnstructuredNeuralLyapunov()::NeuralLyapunovStructure + NeuralLyapunovStructure( + (net, state, fixed_point) -> net(state), + (net, grad_net, state, fixed_point) -> grad_net(state), + (net, grad_net, f, state, fixed_point) -> grad_net(state) ⋅ f(state), + 1 + ) +end + +""" + NonnegativeNeuralLyapunov(network_dim, δ, pos_def; grad_pos_def, grad) + +Creates a `NeuralLyapunovStructure` where the Lyapunov function is the L2 norm of the neural +network output plus a constant δ times a function `pos_def`. + +The condition that the Lyapunov function must be minimized uniquely at the fixed point can +be represented as `V(fixed_point) = 0`, `V(state) > 0` when `state ≠ fixed_point`. This +structure ensures `V(state) ≥ 0`. Further, if `δ > 0` and +`pos_def(fixed_point, fixed_point) = 0`, but `pos_def(state, fixed_point) > 0` when +`state ≠ fixed_point`, this ensures that `V(state) > 0` when `state != fixed_point`. This +does not enforce `V(fixed_point) = 0`, so that condition must included in the neural +Lyapunov loss function. + +`grad_pos_def(state, fixed_point)` should be the gradient of `pos_def` with respect to +`state` at `state`. If `grad_pos_def` is not defined, it is evaluated using `grad`, which +defaults to `ForwardDiff.gradient`. + +The neural network output has dimension `network_dim`. +""" +function NonnegativeNeuralLyapunov( + network_dim::Integer; + δ::Real = 0.0, + pos_def::Function = (state, fixed_point) -> log(1.0 + + (state - fixed_point) ⋅ + (state - fixed_point)), + grad_pos_def = nothing, + grad = ForwardDiff.gradient +)::NeuralLyapunovStructure + if δ == 0.0 + NeuralLyapunovStructure( + (net, state, fixed_point) -> net(state) ⋅ net(state), + (net, J_net, state, fixed_point) -> 2 * transpose(net(state)) * J_net(state), + (net, J_net, f, state, fixed_point) -> 2 * + dot(net(state), J_net(state), f(state)), + network_dim + ) + else + grad_pos_def = if isnothing(grad_pos_def) + (state, fixed_point) -> grad((x) -> pos_def(x, fixed_point), state) + else + grad_pos_def + end + NeuralLyapunovStructure( + (net, state, fixed_point) -> net(state) ⋅ net(state) + + δ * pos_def(state, fixed_point), + (net, J_net, state, fixed_point) -> 2 * transpose(net(state)) * J_net(state) + + δ * grad_pos_def(state, fixed_point), + (net, J_net, f, state, fixed_point) -> 2 * dot( + net(state), + J_net(state), + f(state) + ) + δ * grad_pos_def(state, fixed_point) ⋅ f(state), + network_dim + ) + end +end + +""" + PositiveSemiDefiniteStructure(network_dim; pos_def, non_neg, grad_pos_def, grad_non_neg, grad) + +Creates a `NeuralLyapunovStructure` where the Lyapunov function is the product of a positive +(semi-)definite function `pos_def` which does not depend on the network and a nonnegative +function non_neg which does depend the network. + +The condition that the Lyapunov function must be minimized uniquely at the fixed point can +be represented as `V(fixed_point) = 0`, `V(state) > 0` when `state ≠ fixed_point`. This +structure ensures `V(state) ≥ 0`. Further, if `pos_def` is `0` only at `fixed_point` (and +positive elsewhere) and if `non_neg` is strictly positive away from `fixed_point` (as is the +case for the default values of `pos_def` and `non_neg`), then this structure ensures +`V(fixed_point) = 0` and `V(state) > 0` when `state ≠ fixed_point`. + +`grad_pos_def(state, fixed_point)` should be the gradient of `pos_def` with respect to +`state` at `state`. Similarly, `grad_non_neg(net, J_net, state, fixed_point)` should be the +gradient of `non_neg(net, state, fixed_point)` with respect to `state` at `state`. If +`grad_pos_def` or `grad_non_neg` is not defined, it is evaluated using `grad`, which +defaults to `ForwardDiff.gradient`. + +The neural network output has dimension `network_dim`. +""" +function PositiveSemiDefiniteStructure( + network_dim::Integer; + pos_def::Function = (state, fixed_point) -> log(1.0 + + (state - fixed_point) ⋅ + (state - fixed_point)), + non_neg::Function = (net, state, fixed_point) -> 1 + net(state) ⋅ net(state), + grad_pos_def = nothing, + grad_non_neg = nothing, + grad = ForwardDiff.gradient +) + _grad(f::Function, x::AbstractArray{T}) where {T <: Num} = Symbolics.gradient(f(x), x) + _grad(f::Function, x) = grad(f, x) + grad_pos_def = if isnothing(grad_pos_def) + (state, fixed_point) -> _grad((x) -> pos_def(x, fixed_point), state) + else + grad_pos_def + end + grad_non_neg = if isnothing(grad_non_neg) + (net, J_net, state, fixed_point) -> _grad( + (x) -> non_neg(net, x, fixed_point), state) + else + grad_non_neg + end + NeuralLyapunovStructure( + (net, state, fixed_point) -> pos_def(state, fixed_point) * + non_neg(net, state, fixed_point), + (net, J_net, state, fixed_point) -> grad_pos_def(state, fixed_point) * + non_neg(net, state, fixed_point) + + pos_def(state, fixed_point) * + grad_non_neg(net, J_net, state, fixed_point), + (net, J_net, f, state, fixed_point) -> (f(state) ⋅ + grad_pos_def(state, fixed_point)) * + non_neg(net, state, fixed_point) + + pos_def(state, fixed_point) * + (f(state) ⋅ grad_non_neg( + net, J_net, state, fixed_point)), + network_dim + ) +end diff --git a/test/roa_estimation.jl b/test/roa_estimation.jl new file mode 100644 index 0000000..43fb96e --- /dev/null +++ b/test/roa_estimation.jl @@ -0,0 +1,131 @@ +using LinearAlgebra +using NeuralPDE, Lux, ModelingToolkit +using Optimization, OptimizationOptimisers, OptimizationOptimJL, NLopt +using NeuralLyapunov +using Random +using Test + +Random.seed!(200) + +println("Region of Attraction Estimation") + +######################### Define dynamics and domain ########################## + +f(x, p, t) = -x .+ x .^ 3 +lb = [-2]; +ub = [2]; + +####################### Specify neural Lyapunov problem ####################### + +# Define neural network discretization +dim_state = length(lb) +dim_hidden = 5 +dim_output = 2 +chain = [Lux.Chain( + Dense(dim_state, dim_hidden, tanh), + Dense(dim_hidden, dim_hidden, tanh), + Dense(dim_hidden, 1, use_bias = false) + ) for _ in 1:dim_output] + +# Define neural network discretization +strategy = GridTraining(0.1) +discretization = PhysicsInformedNN(chain, strategy) + +# Define neural Lyapunov structure +structure = PositiveSemiDefiniteStructure(dim_output) +minimization_condition = DontCheckNonnegativity() + +# Define Lyapunov decrease condition +decrease_condition = make_RoA_aware(AsymptoticDecrease(strict = true)) + +# Construct neural Lyapunov specification +spec = NeuralLyapunovSpecification( + structure, + minimization_condition, + decrease_condition +) + +############################# Construct PDESystem ############################# + +pde_system, network_func = NeuralLyapunovPDESystem( + f, + lb, + ub, + spec +) + +######################## Construct OptimizationProblem ######################## + +prob = discretize(pde_system, discretization) +sym_prob = symbolic_discretize(pde_system, discretization) + +########################## Solve OptimizationProblem ########################## + +res = Optimization.solve(prob, OptimizationOptimisers.Adam(); maxiters = 300) +prob = Optimization.remake(prob, u0 = res.u) +res = Optimization.solve(prob, BFGS(); maxiters = 300) + +###################### Get numerical numerical functions ###################### +V_func, V̇_func, ∇V_func = NumericalNeuralLyapunovFunctions( + discretization.phi, + res.u, + network_func, + structure.V, + ODEFunction(f), + zeros(length(lb)) +) + +################################## Simulate ################################### +states = first(lb):0.001:first(ub) +V_predict = vec(V_func(states')) +dVdt_predict = vec(V̇_func(states')) + +# Calculated RoA estimate +ρ = decrease_condition.ρ +RoA_states = states[vec(V_func(transpose(states))) .≤ ρ] +RoA = (first(RoA_states), last(RoA_states)) + +#################################### Tests #################################### + +# Network structure should enforce positive definiteness of V +@test min(V_func([0.0]), minimum(V_predict)) ≥ 0.0 +@test V_func([0.0]) == 0.0 + +# Dynamics should result in a fixed point at the origin +@test V̇_func([0.0]) == 0.0 + +# V̇ should be negative everywhere in the region of attraction except the fixed point +@test all(V̇_func(transpose(RoA_states[RoA_states .!= 0.0])) .< 0) + +# The estimated region of attraction should be a subset of the real region of attraction +@test first(RoA) ≥ -1.0 && last(RoA) ≤ 1.0 + +#= +# Print statistics +println("V(0.,0.) = ", V_func([0.0])) +println("V ∋ [", min(V_func([0.0]), minimum(V_predict)), ", ", maximum(V_predict), "]") +println( + "V̇ ∋ [", + minimum(dVdt_predict), + ", ", + max(V̇_func([0.0]), maximum(dVdt_predict)), + "]", +) +println("True region of attraction: (-1, 1)") +println("Estimated region of attraction: ", RoA) + +# Plot results +using Plots + +p_V = plot(states, V_predict, label = "V", xlabel = "x", linewidth=2); +p_V = hline!([ρ], label = "V = ρ", legend = :top); +p_V = vspan!(collect(RoA); label = "Estimated Region of Attraction", color = :gray, fillstyle = :/); +p_V = vspan!([-1, 1]; label = "True Region of Attraction", opacity = 0.2, color = :green); + +p_V̇ = plot(states, dVdt_predict, label = "dV/dt", xlabel = "x", linewidth=2); +p_V̇ = hline!([0.0], label = "dV/dt = 0", legend = :top); +p_V̇ = vspan!(collect(RoA); label = "Estimated Region of Attraction", color = :gray, fillstyle = :/); +p_V̇ = vspan!([-1, 1]; label = "True Region of Attraction", opacity = 0.2, color = :green); + +plt = plot(p_V, p_V̇) +=#