diff --git a/test/newton_neural_ode_tests.jl b/test/newton_neural_ode_tests.jl index 35ec5aab9..62e429237 100644 --- a/test/newton_neural_ode_tests.jl +++ b/test/newton_neural_ode_tests.jl @@ -34,10 +34,10 @@ optprob = Optimization.OptimizationProblem(optf, psd) res = Optimization.solve(optprob, NewtonTrustRegion(); maxiters = 100, callback = cb) - @test loss_function(res.minimizer) < l1 + @test loss_function(res.u) < l1 res = Optimization.solve(optprob, OptimizationOptimJL.Optim.KrylovTrustRegion(); maxiters = 100, callback = cb) - @test loss_function(res.minimizer) < l1 + @test loss_function(res.u) < l1 @info "ROCK2" nODE = NeuralODE(NN, tspan, ROCK2(); reltol = 1.0f-4, saveat = [tspan[end]]) @@ -55,8 +55,8 @@ optprob = Optimization.OptimizationProblem(optfunc, psd) res = Optimization.solve(optprob, NewtonTrustRegion(); maxiters = 100, callback = cb) - @test loss_function(res.minimizer) < l1 + @test loss_function(res.u) < l1 res = Optimization.solve(optprob, OptimizationOptimJL.Optim.KrylovTrustRegion(); maxiters = 100, callback = cb) - @test loss_function(res.minimizer) < l1 + @test loss_function(res.u) < l1 end diff --git a/test/second_order_ode_tests.jl b/test/second_order_ode_tests.jl index 45641dbe3..5078afa01 100644 --- a/test/second_order_ode_tests.jl +++ b/test/second_order_ode_tests.jl @@ -39,7 +39,7 @@ (x, p) -> loss_n_ode(x), Optimization.AutoZygote()) optprob = Optimization.OptimizationProblem(optfunc, p) res = Optimization.solve(optprob, Adam(0.01f0); callback = callback, maxiters = 100) - l2 = loss_n_ode(res.minimizer) + l2 = loss_n_ode(res.u) @test l2 < l1 function predict(p) @@ -59,7 +59,7 @@ (x, p) -> loss_n_ode(x), Optimization.AutoZygote()) optprob = Optimization.OptimizationProblem(optfunc, p) res = Optimization.solve(optprob, Adam(0.01f0); callback = callback, maxiters = 100) - l2 = loss_n_ode(res.minimizer) + l2 = loss_n_ode(res.u) @test l2 < l1 function predict(p) @@ -79,6 +79,6 @@ (x, p) -> loss_n_ode(x), Optimization.AutoZygote()) optprob = Optimization.OptimizationProblem(optfunc, p) res = Optimization.solve(optprob, Adam(0.01f0); callback = callback, maxiters = 100) - l2 = loss_n_ode(res.minimizer) + l2 = loss_n_ode(res.u) @test l2 < l1 end diff --git a/test/stiff_nested_ad_tests.jl b/test/stiff_nested_ad_tests.jl index 4742936f4..697300563 100644 --- a/test/stiff_nested_ad_tests.jl +++ b/test/stiff_nested_ad_tests.jl @@ -39,7 +39,7 @@ optprob = Optimization.OptimizationProblem(optfunc, ps) res = Optimization.solve( optprob, Adam(0.1); callback = callback(solver), maxiters = 100) - loss2 = loss_n_ode(lux_model, res.minimizer) + loss2 = loss_n_ode(lux_model, res.u) @test loss2 < loss1 end end