From 8e2723c2b4ab89ff74d151e50246968415db6041 Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Tue, 14 May 2024 14:33:54 -0400 Subject: [PATCH] Update SciMLSensitivity compats --- docs/run_single_tutorial.jl | 2 ++ examples/GravitationalWaveForm/Project.toml | 2 +- examples/NeuralODE/Project.toml | 2 +- examples/PolynomialFitting/main.jl | 4 ++-- examples/SymbolicOptimalControl/Project.toml | 4 ++-- 5 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/run_single_tutorial.jl b/docs/run_single_tutorial.jl index 9a505ef7b..222011fb8 100644 --- a/docs/run_single_tutorial.jl +++ b/docs/run_single_tutorial.jl @@ -10,6 +10,8 @@ output_directory = ARGS[2] path = ARGS[3] io = open(pkg_log_path, "w") +Pkg.Registry.update() +Pkg.update() Pkg.develop(; path=joinpath(@__DIR__, ".."), io) Pkg.instantiate(; io) close(io) diff --git a/examples/GravitationalWaveForm/Project.toml b/examples/GravitationalWaveForm/Project.toml index 2d7e35733..6b1abe0b0 100644 --- a/examples/GravitationalWaveForm/Project.toml +++ b/examples/GravitationalWaveForm/Project.toml @@ -25,4 +25,4 @@ LuxCUDA = "0.2, 0.3" Optimization = "3" OptimizationOptimJL = "0.1, 0.2" OrdinaryDiffEq = "6" -SciMLSensitivity = "7" +SciMLSensitivity = "7.57" diff --git a/examples/NeuralODE/Project.toml b/examples/NeuralODE/Project.toml index cd1370406..7b68ec681 100644 --- a/examples/NeuralODE/Project.toml +++ b/examples/NeuralODE/Project.toml @@ -27,6 +27,6 @@ MLUtils = "0.2, 0.3, 0.4" OneHotArrays = "0.1, 0.2" Optimisers = "0.2, 0.3" OrdinaryDiffEq = "6" -SciMLSensitivity = "7.45" +SciMLSensitivity = "7.57" Statistics = "1" Zygote = "0.6" diff --git a/examples/PolynomialFitting/main.jl b/examples/PolynomialFitting/main.jl index efe2442de..ac3a1020f 100644 --- a/examples/PolynomialFitting/main.jl +++ b/examples/PolynomialFitting/main.jl @@ -74,12 +74,12 @@ vjp_rule = AutoZygote() function main(tstate::Lux.Experimental.TrainState, vjp, data, epochs) data = data .|> gpu_device() for epoch in 1:epochs - grads, loss, stats, tstate = Lux.Training.compute_gradients( + grads, loss, stats, tstate = Lux.Experimental.compute_gradients( vjp, loss_function, data, tstate) if epoch % 50 == 1 || epoch == epochs @printf "Epoch: %3d \t Loss: %.5g\n" epoch loss end - tstate = Lux.Training.apply_gradients!(tstate, grads) + tstate = Lux.Experimental.apply_gradients!(tstate, grads) end return tstate end diff --git a/examples/SymbolicOptimalControl/Project.toml b/examples/SymbolicOptimalControl/Project.toml index 14c3b727b..6c4f46ec1 100644 --- a/examples/SymbolicOptimalControl/Project.toml +++ b/examples/SymbolicOptimalControl/Project.toml @@ -30,7 +30,7 @@ Optimization = "3.24.3" OptimizationOptimJL = "0.2.3" OptimizationOptimisers = "0.2.1" OrdinaryDiffEq = "6.74.1" -SciMLSensitivity = "7.56.2" -Statistics = "1.11.1" +SciMLSensitivity = "7.57" +Statistics = "1.11" SymbolicRegression = "0.24.1" SymbolicUtils = "1.5.1"