diff --git a/bench/Project.toml b/bench/Project.toml index d04acbc..fb9708d 100644 --- a/bench/Project.toml +++ b/bench/Project.toml @@ -1,8 +1,7 @@ [deps] -LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11" -MKL = "33e6dc65-8f57-5167-99aa-e5a354878fb2" +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" NeuralOperators = "ea5c82af-86e5-48da-8ee1-382d6ad7af4b" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" -Reactant = "3c362404-f566-11ee-1572-e11a4b42c853" -Reactant_jll = "0192cb87-2b54-54ad-80e0-3be72ad8a3c0" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +ThreadPinning = "811555cd-349b-4f26-b7bc-1f208b848042" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/bench/comparison.md b/bench/comparison.md index 56a147a..2343c64 100644 --- a/bench/comparison.md +++ b/bench/comparison.md @@ -1,6 +1,7 @@ ## NeuralOperators.jl (Lux) - ### FNO +## FNO + | #layers | Forward | Train: 10 epochs | | --- | --- | --- | | 1 | 14.173699999999998 ms | 755.1466 ms | @@ -9,15 +10,6 @@ | 4 | 41.431400000000004 ms | 3035.1971 ms | | 5 | 59.305 ms | 3456.1902999999998 ms | - ### DeepONet -| #layers | Forward | Train: 10 epochs | -| --- | --- | --- | -| 1 | 3.26 ms | 124.5257 ms | -| 2 | 4.6474 ms | 165.8869 ms | -| 3 | 5.394699999999999 ms | 191.9034 ms | -| 4 | 7.0155 ms | 230.3965 ms | -| 5 | 7.9754000000000005 ms | 268.3852 ms | - ## FNO (python: neuraloperator) | #layers | Forward | Train: 10 epochs | @@ -28,11 +20,22 @@ | 4 | 12.33892210002523 ms | 41.98180860001594 ms | | 5 | 14.732645300013246 ms | 50.13744520000182 ms | +## DeepONet + +| #layers | Forward | Train: 10 epochs | +| --- | --- | --- | +| 1 | 3.3952750000000003 ms | 76.604576 ms | +| 2 | 4.360458 ms | 104.460251 ms | +| 3 | 5.6310780000000005 ms | 149.148633 ms | +| 4 | 7.199777 ms | 178.464657 ms | +| 5 | 7.8226819999999995 ms | 193.760173 ms | + ## DeepONet (python: deepxde) + | #layers | Forward | Train: 10 epochs | | --- | --- | --- | -| 1 | 0.6186819999129511 ms | 19.45471799990628 ms | -| 2 | 0.5869279999751598 ms | 26.725713000050746 ms | -| 3 | 0.7359159999759868 ms | 32.94088099995861 ms | -| 4 | 0.9005440000328235 ms | 38.81546599994181 ms | -| 5 | 1.0816649999469519 ms | 44.166897000104655 ms | \ No newline at end of file +| 1 | 0.7689221948385239 ms | 25.76469287276268 ms | +| 2 | 0.7733150571584702 ms | 32.17746138572693 ms | +| 3 | 0.8474267274141312 ms | 36.93301998078823 ms | +| 4 | 1.0069304704666138 ms | 45.45578710734844 ms | +| 5 | 1.406572386622429 ms | 59.06449243426323 ms | diff --git a/bench/lux_no.jl b/bench/lux.jl similarity index 92% rename from bench/lux_no.jl rename to bench/lux.jl index d9bdf49..ad2492b 100644 --- a/bench/lux_no.jl +++ b/bench/lux.jl @@ -1,16 +1,10 @@ -using Pkg +using ThreadPinning +pinthreads(:cores) +threadinfo() -Pkg.activate(".") +using BenchmarkTools, NeuralOperators, Random, Optimisers, Zygote -using MKL - -using NeuralOperators -using BenchmarkTools -using Random -using Optimisers -using Zygote - -rng = Random.default_rng() +rng = Xoshiro(1234) train!(args...; kwargs...) = train!(MSELoss(), AutoZygote(), args...; kwargs...) @@ -86,3 +80,5 @@ print("| --- | --- | --- | \n") for i in 1:5 print("| $i | $(t_fwd[i] * 1000) ms | $(t_train[i] * 1000) ms | \n") end + + diff --git a/bench/pyproject.toml b/bench/pyproject.toml new file mode 100644 index 0000000..09dc947 --- /dev/null +++ b/bench/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +package-mode = false + +[tool.poetry.dependencies] +python = "^3.10" +torch = "^2.4.0" +deepxde = "^1.12.0" +neuraloperator = "^0.3.0" +wandb = "^0.17.5" +ipython = "^8.26.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/bench/bm_py_codes.py b/bench/pytorch.py similarity index 61% rename from bench/bm_py_codes.py rename to bench/pytorch.py index 808b3c9..5b6bbca 100644 --- a/bench/bm_py_codes.py +++ b/bench/pytorch.py @@ -8,10 +8,10 @@ """ n_iters = 100 -fwd_timed_arr = [0.] * 5 -training_timed_arr = [0.] * 5 +fwd_timed_arr = [0.0] * 5 +training_timed_arr = [0.0] * 5 -for i in range(1,6): +for i in range(1, 6): setup_code = f"""eval_points = 128 batch_size = 64 dim_y = 1 @@ -43,19 +43,31 @@ """ test_code = "model.predict((u,y))" - timed = timeit.timeit(setup = import_code+ setup_code, stmt = test_code, number = n_iters) - fwd_timed_arr[i-1] = timed - + timed = timeit.timeit( + setup=import_code + setup_code, stmt=test_code, number=n_iters + ) + fwd_timed_arr[i - 1] = timed + test_code = "model.train(epochs = 10)" - timed = timeit.timeit(setup = import_code+ setup_code, stmt = test_code, number = n_iters) - training_timed_arr[i-1] = timed + timed = timeit.timeit( + setup=import_code + setup_code, stmt=test_code, number=n_iters + ) + training_timed_arr[i - 1] = timed # print(i, "\t", timed/n_iters * 1000, " ms \n") print("## DeepONet") print("| #layers | Forward | Train: 10 epochs |") print("| --- | --- | --- |") -for i in range(1,6): - print("| ", i, " | ", fwd_timed_arr[i-1]/n_iters * 1000, " ms | ", training_timed_arr[i-1]/n_iters * 1000, " ms |") +for i in range(1, 6): + print( + "| ", + i, + " | ", + fwd_timed_arr[i - 1] / n_iters * 1000, + " ms | ", + training_timed_arr[i - 1] / n_iters * 1000, + " ms |", + ) # FNO @@ -64,13 +76,13 @@ import torch """ -timed_arr = [0.] * 5 +timed_arr = [0.0] * 5 n_iters = 100 -fwd_timed_arr = [0.] * 5 -training_timed_arr = [0.] * 5 +fwd_timed_arr = [0.0] * 5 +training_timed_arr = [0.0] * 5 n_iters = 1000 -for i in range(1,6): +for i in range(1, 6): setup_code = f"""operator1d = operator1d = FNO1d(n_modes_height=16, hidden_channels=64, in_channels=1, @@ -90,18 +102,29 @@ def train_model(model, data, y, optimser, epochs): optimiser.step() """ test_code = "y = operator1d(x)" - timed = timeit.timeit(setup = import_code+ setup_code, stmt = test_code, number = n_iters) - fwd_timed_arr[i-1] = timed - + timed = timeit.timeit( + setup=import_code + setup_code, stmt=test_code, number=n_iters + ) + fwd_timed_arr[i - 1] = timed + test_code = """train_model(operator1d, x, y, optimiser, 10) """ - timed = timeit.timeit(setup = import_code+ setup_code, stmt = test_code, number = n_iters) - training_timed_arr[i-1] = timed + timed = timeit.timeit( + setup=import_code + setup_code, stmt=test_code, number=n_iters + ) + training_timed_arr[i - 1] = timed print("## FNO ") print("| #layers | Forward | Train: 10 epochs | ") print("| --- | --- | --- | ") -for i in range(1,6): - print("| ", i, " | ", fwd_timed_arr[i-1]/n_iters * 1000, " ms | ", training_timed_arr[i-1]/n_iters * 1000, " ms | ") - +for i in range(1, 6): + print( + "| ", + i, + " | ", + fwd_timed_arr[i - 1] / n_iters * 1000, + " ms | ", + training_timed_arr[i - 1] / n_iters * 1000, + " ms | ", + )