diff --git a/Project.toml b/Project.toml index d359023..7f228ee 100644 --- a/Project.toml +++ b/Project.toml @@ -54,7 +54,7 @@ DynamicExpressions = "0.16, 0.17, 0.18, 0.19" ForwardDiff = "0.10.36" Functors = "0.4.12" GPUArraysCore = "0.1.6" -JLD2 = "0.4.48, 0.5" +JLD2 = "0.5" LazyArtifacts = "1.10" Lux = "1" LuxCore = "1" diff --git a/test/Project.toml b/test/Project.toml index a724ae5..ae4da80 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -3,6 +3,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Bumper = "8ce10254-0962-460f-a3d8-1f77fea1446e" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" DataInterpolations = "82cc6244-b520-54b8-b5a6-8a565e85f1d0" +Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" DynamicExpressions = "a40a106e-89c9-4ca8-8020-a735e8728b6b" Enzyme = "7da242da-08ed-463a-9acd-ee780be4f1d9" ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" @@ -10,6 +11,7 @@ ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" Hwloc = "0e44f5e4-bd66-52a0-8798-143a42290a1d" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" Lux = "b2108857-7c20-44ae-9111-449ecde12c47" LuxLib = "82251201-b29d-42c6-8e01-566dec8acb11" LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531" @@ -29,6 +31,7 @@ Aqua = "0.8.7" Bumper = "0.6, 0.7" ComponentArrays = "0.15.16" DataInterpolations = "< 5.3" +Downloads = "1.6" DynamicExpressions = "0.16, 0.17, 0.18, 0.19" Enzyme = "0.12" ExplicitImports = "1.9.0" @@ -36,6 +39,7 @@ ForwardDiff = "0.10.36" GPUArraysCore = "0.1.6" Hwloc = "3.2.0" InteractiveUtils = "<0.0.1, 1" +JLD2 = "0.5" Lux = "1" LuxLib = "1" LuxTestUtils = "1.1.2" diff --git a/test/shared_testsetup.jl b/test/shared_testsetup.jl index 7a4599f..44bfaac 100644 --- a/test/shared_testsetup.jl +++ b/test/shared_testsetup.jl @@ -5,7 +5,7 @@ Enzyme.API.runtimeActivity!(true) import Reexport: @reexport @reexport using Boltz, Lux, GPUArraysCore, LuxLib, LuxTestUtils, Random, StableRNGs -using MLDataDevices +using MLDataDevices, JLD2 import Metalhead LuxTestUtils.jet_target_modules!(["Boltz", "Lux", "LuxLib"]) diff --git a/test/testimages/monarch_color.jld2 b/test/testimages/monarch_color.jld2 new file mode 100644 index 0000000..c69970d Binary files /dev/null and b/test/testimages/monarch_color.jld2 differ diff --git a/test/vision_tests.jl b/test/vision_tests.jl index fec2b5c..f7a8603 100644 --- a/test/vision_tests.jl +++ b/test/vision_tests.jl @@ -1,4 +1,37 @@ -@testitem "AlexNet" setup=[SharedTestSetup] tags=[:vision] begin +@testsetup module PretrainedWeightsTestSetup + +using Lux, Downloads, JLD2 + +function normalize_imagenet(data) + cmean = reshape(Float32[0.485, 0.456, 0.406], (1, 1, 3, 1)) + cstd = reshape(Float32[0.229, 0.224, 0.225], (1, 1, 3, 1)) + return (data .- cmean) ./ cstd +end + +@load joinpath(@__DIR__, "testimages", "monarch_color.jld2") monarch_color_224 monarch_color_256 +const MONARCH_224 = monarch_color_224 +const MONARCH_256 = monarch_color_256 + +const TEST_LBLS = readlines(Downloads.download( + "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" +)) + +function imagenet_acctest(model, ps, st, dev; size=224) + ps = ps |> dev + st = Lux.testmode(st) |> dev + TEST_X = size == 224 ? MONARCH_224 : + (size == 256 ? MONARCH_256 : error("size must be 224 or 256")) + x = TEST_X |> dev + ypred = first(model(x, ps, st)) |> collect |> vec + top5 = TEST_LBLS[sortperm(ypred; rev=true)] + return "monarch" in top5 +end + +export imagenet_acctest + +end + +@testitem "AlexNet" setup=[SharedTestSetup, PretrainedWeightsTestSetup] tags=[:vision] begin for (mode, aType, dev, ongpu) in MODES @testset "pretrained: $(pretrained)" for pretrained in [true, false] model = Vision.AlexNet(; pretrained) @@ -9,6 +42,10 @@ @jet model(img, ps, st) @test size(first(model(img, ps, st))) == (1000, 2) + if pretrained + @test imagenet_acctest(model, ps, st, dev) + end + GC.gc(true) end end @@ -56,7 +93,7 @@ end end end -@testitem "ResNet" setup=[SharedTestSetup] tags=[:vision] begin +@testitem "ResNet" setup=[SharedTestSetup, PretrainedWeightsTestSetup] tags=[:vision] begin for (mode, aType, dev, ongpu) in MODES, depth in [18, 34, 50, 101, 152] @testset for pretrained in [false, true] model = Vision.ResNet(depth; pretrained) @@ -67,12 +104,16 @@ end @jet model(img, ps, st) @test size(first(model(img, ps, st))) == (1000, 2) + if pretrained + @test imagenet_acctest(model, ps, st, dev) + end + GC.gc(true) end end end -@testitem "ResNeXt" setup=[SharedTestSetup] tags=[:vision] begin +@testitem "ResNeXt" setup=[SharedTestSetup, PretrainedWeightsTestSetup] tags=[:vision] begin for (mode, aType, dev, ongpu) in MODES @testset for (depth, cardinality, base_width) in [ (50, 32, 4), (101, 32, 8), (101, 64, 4), (152, 64, 4)] @@ -87,13 +128,17 @@ end @jet model(img, ps, st) @test size(first(model(img, ps, st))) == (1000, 2) + if pretrained + @test imagenet_acctest(model, ps, st, dev) + end + GC.gc(true) end end end end -@testitem "WideResNet" setup=[SharedTestSetup] tags=[:vision] begin +@testitem "WideResNet" setup=[SharedTestSetup, PretrainedWeightsTestSetup] tags=[:vision] begin for (mode, aType, dev, ongpu) in MODES, depth in [50, 101, 152] @testset for pretrained in [false, true] depth == 152 && pretrained && continue @@ -106,12 +151,16 @@ end @jet model(img, ps, st) @test size(first(model(img, ps, st))) == (1000, 2) + if pretrained + @test imagenet_acctest(model, ps, st, dev) + end + GC.gc(true) end end end -@testitem "SqueezeNet" setup=[SharedTestSetup] tags=[:vision] begin +@testitem "SqueezeNet" setup=[SharedTestSetup, PretrainedWeightsTestSetup] tags=[:vision] begin for (mode, aType, dev, ongpu) in MODES @testset for pretrained in [false, true] model = Vision.SqueezeNet(; pretrained) @@ -122,12 +171,16 @@ end @jet model(img, ps, st) @test size(first(model(img, ps, st))) == (1000, 2) + if pretrained + @test imagenet_acctest(model, ps, st, dev) + end + GC.gc(true) end end end -@testitem "VGG" setup=[SharedTestSetup] tags=[:vision] begin +@testitem "VGG" setup=[SharedTestSetup, PretrainedWeightsTestSetup] tags=[:vision] begin for (mode, aType, dev, ongpu) in MODES, depth in [11, 13, 16, 19] @testset for pretrained in [false, true], batchnorm in [false, true] model = Vision.VGG(depth; batchnorm, pretrained) @@ -138,6 +191,10 @@ end @jet model(img, ps, st) @test size(first(model(img, ps, st))) == (1000, 2) + if pretrained + @test imagenet_acctest(model, ps, st, dev) + end + GC.gc(true) end end