Skip to content

Commit

Permalink
refactor: update to latest versions
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Aug 4, 2024
1 parent ce51553 commit 5016b20
Show file tree
Hide file tree
Showing 9 changed files with 80 additions and 56 deletions.
5 changes: 0 additions & 5 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,6 @@ jobs:
- uses: julia-actions/julia-downgrade-compat@v1
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
env:
RETESTITEMS_NWORKERS: 4
RETESTITEMS_NWORKER_THREADS: 2
- uses: julia-actions/julia-processcoverage@v1
with:
directories: src,ext
Expand Down Expand Up @@ -121,5 +118,3 @@ jobs:

env:
BACKEND_GROUP: "CPU"
RETESTITEMS_NWORKERS: 4
RETESTITEMS_NWORKER_THREADS: 2
42 changes: 9 additions & 33 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "Boltz"
uuid = "4544d5e4-abc5-4dea-817f-29e4c205d9c8"
authors = ["Avik Pal <avikpal@mit.edu> and contributors"]
version = "0.3.10"
version = "0.3.11"

[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
Expand All @@ -15,7 +15,7 @@ JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
LazyArtifacts = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
LuxCore = "bb33d45b-7691-41d6-9220-0943567d0623"
LuxDeviceUtils = "34f89e08-e1d5-43b4-8944-0b49ac560553"
MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Expand All @@ -34,49 +34,25 @@ BoltzMetalheadExt = "Metalhead"
BoltzZygoteExt = "Zygote"

[compat]
ADTypes = "1.3"
Aqua = "0.8.7"
ADTypes = "1.5"
ArgCheck = "2.3"
Artifacts = "1.10"
Artifacts = "1.10, 1"
ChainRulesCore = "1.24"
ComponentArrays = "0.15.13"
ConcreteStructs = "0.2.3"
DataInterpolations = "< 5.3"
ExplicitImports = "1.9"
DataInterpolations = "6"
ForwardDiff = "0.10.36"
GPUArraysCore = "0.1.6"
JLD2 = "0.4.48"
LazyArtifacts = "1.10"
Lux = "0.5.60"
LuxCore = "0.1.16"
LuxDeviceUtils = "0.1.26"
LuxLib = "0.3.26"
LuxTestUtils = "0.1.18"
Lux = "0.5.62"
LuxCore = "0.1.21"
MLDataDevices = "1.0.0"
Markdown = "1.10"
Metalhead = "0.9"
NNlib = "0.9.17"
Pkg = "1.10"
NNlib = "0.9.21"
Random = "1.10"
ReTestItems = "1.24.0"
Reexport = "1.2.2"
Statistics = "1.10"
Test = "1.10"
WeightInitializers = "0.1.7, 1"
Zygote = "0.6.70"
julia = "1.10"

[extras]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
DataInterpolations = "82cc6244-b520-54b8-b5a6-8a565e85f1d0"
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11"
LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531"
Metalhead = "dbeba491-748d-5e0e-a39e-b530a07fa0cc"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[targets]
test = ["Aqua", "ComponentArrays", "DataInterpolations", "ExplicitImports", "LuxLib", "LuxTestUtils", "Metalhead", "Pkg", "ReTestItems", "Test", "Zygote"]
4 changes: 2 additions & 2 deletions src/basis.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ using ArgCheck: @argcheck
using ..Boltz: _unsqueeze1
using ChainRulesCore: ChainRulesCore, NoTangent
using ConcreteStructs: @concrete
using LuxDeviceUtils: get_device, LuxCPUDevice
using Markdown: @doc_str
using MLDataDevices: get_device, CPUDevice

const CRC = ChainRulesCore

Expand Down Expand Up @@ -34,7 +34,7 @@ end
x_new = reshape(x, new_x_size)
if grid isa AbstractRange
dev = get_device(x)
grid = dev isa LuxCPUDevice ? collect(grid) : dev(grid)
grid = dev isa CPUDevice ? collect(grid) : dev(grid)
end
grid_shape = ntuple(i -> i == basis.dim ? basis.n : 1, ndims(x) + 1)
grid_new = reshape(grid, grid_shape)
Expand Down
2 changes: 1 addition & 1 deletion src/layers/Layers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ using ChainRulesCore: ChainRulesCore
using ForwardDiff: ForwardDiff
using Lux: Lux, StatefulLuxLayer
using LuxCore: LuxCore, AbstractExplicitLayer, AbstractExplicitContainerLayer
using LuxDeviceUtils: get_device, LuxCPUDevice, LuxCUDADevice
using Markdown: @doc_str
using MLDataDevices: get_device_type, CPUDevice, CUDADevice
using NNlib: NNlib
using Random: AbstractRNG
using WeightInitializers: zeros32, randn32
Expand Down
4 changes: 2 additions & 2 deletions src/layers/tensor_product.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ function TensorProductLayer(basis_fns, out_dim::Int; init_weight::F=randn32) whe
prod(Base.Fix2(getproperty, :n), basis_fns) => out_dim; use_bias=false, init_weight)
return Lux.@compact(; basis_fns=Tuple(basis_fns), dense,
out_dim, dispatch=:TensorProductLayer) do x::AbstractArray # I1 x I2 x ... x T x B
dev = get_device(x)
@argcheck dev isa LuxCPUDevice || dev isa LuxCUDADevice # kron is not widely supported
dev = get_device_type(x)
@argcheck dev <: CPUDevice || dev <: CUDADevice # kron is not widely supported

x_ = Lux._eachslice(x, Val(ndims(x) - 1)) # [I1 x I2 x ... x B] x T
@argcheck length(x_) == length(basis_fns)
Expand Down
41 changes: 41 additions & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
[deps]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
DataInterpolations = "82cc6244-b520-54b8-b5a6-8a565e85f1d0"
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
Hwloc = "0e44f5e4-bd66-52a0-8798-143a42290a1d"
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11"
LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531"
MLDataDevices = "7e8f7934-dd98-4c1a-8fe8-92b47a384d40"
Metalhead = "dbeba491-748d-5e0e-a39e-b530a07fa0cc"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
Aqua = "0.8.7"
ComponentArrays = "0.15.16"
DataInterpolations = "6"
ForwardDiff = "0.10.36"
ExplicitImports = "1.9.0"
GPUArraysCore = "0.1.6"
Hwloc = "3.2.0"
InteractiveUtils = "<0.0.1, 1"
Lux = "0.5.62"
LuxLib = "0.3.39"
LuxTestUtils = "1.1.2"
MLDataDevices = "1.0.0"
Metalhead = "0.9"
Random = "1.10"
Reexport = "1.2.2"
Pkg = "1.10"
ReTestItems = "1.24.0"
Test = "1.10"
Zygote = "0.6.70"
5 changes: 3 additions & 2 deletions test/layer_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
@jet model(x, ps, st)

__f = (x, ps) -> sum(abs2, first(model(x, ps, st)))
@eval @test_gradients $(__f) $x $ps gpu_testing=$(ongpu) atol=1e-3 rtol=1e-3
test_gradients(__f, x, ps; atol=1e-3, rtol=1e-3)
end
end
end
Expand Down Expand Up @@ -104,7 +104,8 @@ end
# @jet tensor_project(x, ps, st)

__f = (x, ps) -> sum(abs2, first(tensor_project(x, ps, st)))
@eval @test_gradients $(__f) $x $ps gpu_testing=$(ongpu) atol=1e-3 rtol=1e-3 skip_tracker=true
test_gradients(__f, x, ps; atol=1e-3, rtol=1e-3,
skip_backends=[AutoTracker(), AutoEnzyme()])
end
end
end
Expand Down
22 changes: 16 additions & 6 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
using ReTestItems, Pkg
using ReTestItems, Pkg, InteractiveUtils, Hwloc

@info sprint(io -> versioninfo(io; verbose=true))

const BACKEND_GROUP = lowercase(get(ENV, "BACKEND_GROUP", "all"))
const EXTRA_PKGS = String[]
Expand All @@ -14,10 +16,18 @@ if !isempty(EXTRA_PKGS)
Pkg.instantiate()
end

const BOLTZ_TEST_GROUP = lowercase(get(ENV, "BOLTZ_TEST_GROUP", "all"))
@info "Running tests for group: $BOLTZ_TEST_GROUP"
const RETESTITEMS_NWORKERS = parse(Int, get(ENV, "RETESTITEMS_NWORKERS", "0"))
using Boltz

const BOLTZ_TEST_GROUP = get(ENV, "BOLTZ_TEST_GROUP", "all")
const RETESTITEMS_NWORKERS = parse(
Int, get(ENV, "RETESTITEMS_NWORKERS", string(min(Hwloc.num_physical_cores(), 16))))
const RETESTITEMS_NWORKER_THREADS = parse(Int,
get(ENV, "RETESTITEMS_NWORKER_THREADS",
string(max(Hwloc.num_virtual_cores() ÷ RETESTITEMS_NWORKERS, 1))))

@info "Running tests for group: $BOLTZ_TEST_GROUP with $RETESTITEMS_NWORKERS workers"

ReTestItems.runtests(
@__DIR__; tags=(BOLTZ_TEST_GROUP == "all" ? nothing : [Symbol(BOLTZ_TEST_GROUP)]),
nworkers=ifelse(BACKEND_GROUP ("cuda", "amdgpu"), 0, RETESTITEMS_NWORKERS))
Boltz; tags=(BOLTZ_TEST_GROUP == "all" ? nothing : [Symbol(BOLTZ_TEST_GROUP)]),
nworkers=ifelse(BACKEND_GROUP ("cuda", "amdgpu"), 0, RETESTITEMS_NWORKERS),
nworker_threads=RETESTITEMS_NWORKER_THREADS, testitem_timeout=3600)
11 changes: 6 additions & 5 deletions test/shared_testsetup.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import Reexport: @reexport
@reexport using Boltz, Lux, GPUArraysCore, LuxLib, LuxTestUtils, Random
import Metalhead
using MLDataDevices

LuxTestUtils.jet_target_modules!(["Boltz", "Lux", "LuxLib"])

Expand All @@ -21,18 +22,18 @@ end
cpu_testing() = BACKEND_GROUP == "all" || BACKEND_GROUP == "cpu"
function cuda_testing()
return (BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda") &&
LuxDeviceUtils.functional(LuxCUDADevice)
MLDataDevices.functional(CUDADevice)
end
function amdgpu_testing()
return (BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu") &&
LuxDeviceUtils.functional(LuxAMDGPUDevice)
MLDataDevices.functional(AMDGPUDevice)
end

const MODES = begin
modes = []
cpu_testing() && push!(modes, ("cpu", Array, LuxCPUDevice(), false))
cuda_testing() && push!(modes, ("cuda", CuArray, LuxCUDADevice(), true))
amdgpu_testing() && push!(modes, ("amdgpu", ROCArray, LuxAMDGPUDevice(), true))
cpu_testing() && push!(modes, ("cpu", Array, CPUDevice(), false))
cuda_testing() && push!(modes, ("cuda", CuArray, CUDADevice(), true))
amdgpu_testing() && push!(modes, ("amdgpu", ROCArray, AMDGPUDevice(), true))
modes
end

Expand Down

0 comments on commit 5016b20

Please sign in to comment.