Skip to content

Commit

Permalink
test: lazy install cuda and amdgpu
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Jul 17, 2024
1 parent de3e68e commit c1e8986
Show file tree
Hide file tree
Showing 6 changed files with 51 additions and 38 deletions.
2 changes: 0 additions & 2 deletions LocalPreferences.toml

This file was deleted.

16 changes: 6 additions & 10 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,16 @@ ChainRulesCore = "1.24"
ComponentArrays = "0.15.13"
ConcreteStructs = "0.2.3"
DataInterpolations = "< 5.3"
ExplicitImports = "1.5"
ExplicitImports = "1.9"
ForwardDiff = "0.10.36"
GPUArraysCore = "0.1.6"
JLD2 = "0.4.48"
LazyArtifacts = "1.10"
Lux = "0.5.50"
LuxAMDGPU = "0.2.3"
LuxCUDA = "0.3.2"
LuxCore = "0.1.15"
LuxDeviceUtils = "0.1.21"
Lux = "0.5.60"
LuxCore = "0.1.16"
LuxDeviceUtils = "0.1.26"
LuxLib = "0.3.26"
LuxTestUtils = "0.1.15"
LuxTestUtils = "0.1.18"
Markdown = "1.10"
Metalhead = "0.9"
NNlib = "0.9.17"
Expand All @@ -72,8 +70,6 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
DataInterpolations = "82cc6244-b520-54b8-b5a6-8a565e85f1d0"
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
LuxAMDGPU = "83120cb1-ca15-4f04-bf3b-6967d2e6b60b"
LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda"
LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11"
LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531"
Metalhead = "dbeba491-748d-5e0e-a39e-b530a07fa0cc"
Expand All @@ -83,4 +79,4 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[targets]
test = ["Aqua", "ComponentArrays", "DataInterpolations", "ExplicitImports", "LuxAMDGPU", "LuxCUDA", "LuxLib", "LuxTestUtils", "Metalhead", "Pkg", "ReTestItems", "Test", "Zygote"]
test = ["Aqua", "ComponentArrays", "DataInterpolations", "ExplicitImports", "LuxLib", "LuxTestUtils", "Metalhead", "Pkg", "ReTestItems", "Test", "Zygote"]
3 changes: 1 addition & 2 deletions src/layers/hamiltonian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,7 @@ function HamiltonianNN{FST}(model; autodiff=nothing) where {FST}
Boltz._is_extension_loaded(Val(:Zygote)), AutoZygote(), AutoForwardDiff())
elseif autodiff isa AutoZygote
autodiff = Boltz._is_extension_loaded(Val(:Zygote)) ? autodiff : nothing
else
!(autodiff isa AutoForwardDiff)
elseif !(autodiff isa AutoForwardDiff)
throw(ArgumentError("Invalid autodiff backend: $(autodiff). Available options: \
`AutoForwardDiff`, or `AutoZygote`."))
end
Expand Down
2 changes: 1 addition & 1 deletion test/layer_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ end

@testitem "Tensor Product Layer" setup=[SharedTestSetup] tags=[:layers] begin
@testset "$(mode)" for (mode, aType, dev, ongpu) in MODES
mode === "AMDGPU" && continue
mode === "amdgpu" && continue

@testset "$(basis)" for basis in (Basis.Chebyshev, Basis.Sin, Basis.Cos,
Basis.Fourier, Basis.Legendre, Basis.Polynomial)
Expand Down
28 changes: 20 additions & 8 deletions test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,23 @@
using ReTestItems
using ReTestItems, Pkg

const BOLTZ_TEST_GROUP = lowercase(get(ENV, "BOLTZ_TEST_GROUP", "all"))
@info "Running tests for group: $(BOLTZ_TEST_GROUP)"
const BACKEND_GROUP = lowercase(get(ENV, "BACKEND_GROUP", "all"))
const EXTRA_PKGS = String[]

(BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda") && push!(EXTRA_PKGS, "LuxCUDA")
(BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu") && push!(EXTRA_PKGS, "AMDGPU")

if BOLTZ_TEST_GROUP == "all"
ReTestItems.runtests(@__DIR__)
else
tag = Symbol(LUX_TEST_GROUP)
ReTestItems.runtests(@__DIR__; tags=[tag])
if !isempty(EXTRA_PKGS)
@info "Installing Extra Packages for testing" EXTRA_PKGS=EXTRA_PKGS
Pkg.add(EXTRA_PKGS)
Pkg.update()
Base.retry_load_extensions()
Pkg.instantiate()
end

const BOLTZ_TEST_GROUP = lowercase(get(ENV, "BOLTZ_TEST_GROUP", "all"))
@info "Running tests for group: $BOLTZ_TEST_GROUP"
const RETESTITEMS_NWORKERS = parse(Int, get(ENV, "RETESTITEMS_NWORKERS", "0"))

ReTestItems.runtests(
@__DIR__; tags=(BOLTZ_TEST_GROUP == "all" ? nothing : [Symbol(BOLTZ_TEST_GROUP)]),
nworkers=ifelse(BACKEND_GROUP ("cuda", "amdgpu"), 0, RETESTITEMS_NWORKERS))
38 changes: 23 additions & 15 deletions test/shared_testsetup.jl
Original file line number Diff line number Diff line change
@@ -1,30 +1,38 @@
@testsetup module SharedTestSetup

import Reexport: @reexport
@reexport using Boltz, Lux, LuxCUDA, LuxAMDGPU, LuxLib, LuxTestUtils, Random
@reexport using Boltz, Lux, GPUArraysCore, LuxLib, LuxTestUtils, Random
import Metalhead

const BACKEND_GROUP = get(ENV, "BACKEND_GROUP", "All")
LuxTestUtils.jet_target_modules!(["Boltz", "Lux", "LuxLib"])

CUDA.allowscalar(false)
const BACKEND_GROUP = lowercase(get(ENV, "BACKEND_GROUP", "all"))

cpu_testing() = BACKEND_GROUP == "All" || BACKEND_GROUP == "CPU"
cuda_testing() = (BACKEND_GROUP == "All" || BACKEND_GROUP == "CUDA") && LuxCUDA.functional()
GPUArraysCore.allowscalar(false)

if BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda"
using LuxCUDA
end

if BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu"
using AMDGPU
end

cpu_testing() = BACKEND_GROUP == "all" || BACKEND_GROUP == "cpu"
function cuda_testing()
return (BACKEND_GROUP == "all" || BACKEND_GROUP == "cuda") &&
LuxDeviceUtils.functional(LuxCUDADevice)
end
function amdgpu_testing()
(BACKEND_GROUP == "All" || BACKEND_GROUP == "AMDGPU") && LuxAMDGPU.functional()
return (BACKEND_GROUP == "all" || BACKEND_GROUP == "amdgpu") &&
LuxDeviceUtils.functional(LuxAMDGPUDevice)
end

const MODES = begin
# Mode, Array Type, Device Function, GPU?
cpu_mode = ("CPU", Array, LuxCPUDevice(), false)
cuda_mode = ("CUDA", CuArray, LuxCUDADevice(), true)
amdgpu_mode = ("AMDGPU", ROCArray, LuxAMDGPUDevice(), true)

modes = []
cpu_testing() && push!(modes, cpu_mode)
cuda_testing() && push!(modes, cuda_mode)
amdgpu_testing() && push!(modes, amdgpu_mode)

cpu_testing() && push!(modes, ("cpu", Array, LuxCPUDevice(), false))
cuda_testing() && push!(modes, ("cuda", CuArray, LuxCUDADevice(), true))
amdgpu_testing() && push!(modes, ("amdgpu", ROCArray, LuxAMDGPUDevice(), true))
modes
end

Expand Down

0 comments on commit c1e8986

Please sign in to comment.