|
1 | | -using Flux, Test |
| 1 | +using Flux, Test, Zygote |
2 | 2 | using Flux: cpu, gpu |
3 | 3 | using Statistics: mean |
4 | | -using LinearAlgebra: I, cholesky, Cholesky |
| 4 | +using LinearAlgebra: I, cholesky, Cholesky, Adjoint |
5 | 5 | using SparseArrays: sparse, SparseMatrixCSC, AbstractSparseArray |
| 6 | +using CUDA |
| 7 | +CUDA.allowscalar(false) |
6 | 8 |
|
7 | 9 | @testset "CUDA" begin |
8 | 10 | x = randn(5, 5) |
|
48 | 50 | # construct from CuArray |
49 | 51 | x = [1, 3, 2] |
50 | 52 | y = Flux.onehotbatch(x, 0:3) |
51 | | - @test_skip begin # https://github.com/FluxML/OneHotArrays.jl/issues/16 |
| 53 | + |
| 54 | + # https://github.com/FluxML/OneHotArrays.jl/issues/16 |
52 | 55 | y2 = Flux.onehotbatch(x |> gpu, 0:3) |
53 | 56 | @test y2.indices isa CuArray |
54 | 57 | @test y2 |> cpu == y |
55 | | - end |
56 | 58 | end |
57 | 59 |
|
58 | 60 | @testset "onecold gpu" begin |
@@ -104,19 +106,19 @@ end |
104 | 106 | # Trivial functions |
105 | 107 | @test gradient(x -> sum(abs, gpu(x)), a)[1] isa Matrix |
106 | 108 | @test gradient(x -> sum(gpu(x)), a)[1] isa Matrix |
107 | | - @test_skip gradient(x -> sum(gpu(x)), a')[1] isa Matrix # sum(::Adjoint{T,CuArray}) makes a Fill |
| 109 | + @test_broken gradient(x -> sum(gpu(x)), a')[1] isa Matrix # sum(::Adjoint{T,CuArray}) makes a Fill |
108 | 110 | @test gradient(x -> sum(abs, cpu(x)), ca)[1] isa CuArray |
109 | 111 | # This test should really not go through indirections and pull out Fills for efficiency |
110 | 112 | # but we forcefully materialise. TODO: remove materialising CuArray here |
111 | 113 | @test gradient(x -> sum(cpu(x)), ca)[1] isa CuArray # This involves FillArray, which should be GPU compatible |
112 | | - @test gradient(x -> sum(cpu(x)), ca')[1] isa Adjoint{Float32, <:CuArray} |
| 114 | + @test gradient(x -> sum(cpu(x)), ca')[1] isa CuArray |
113 | 115 |
|
114 | 116 | # Even more trivial: no movement |
115 | 117 | @test gradient(x -> sum(abs, cpu(x)), a)[1] isa Matrix |
116 | | - @test_broken gradient(x -> sum(abs, cpu(x)), a')[1] isa Matrix |
| 118 | + @test gradient(x -> sum(abs, cpu(x)), a')[1] isa Matrix |
117 | 119 | @test gradient(x -> sum(cpu(x)), a)[1] isa typeof(gradient(sum, a)[1]) # FillArray |
118 | 120 | @test gradient(x -> sum(abs, gpu(x)), ca)[1] isa CuArray |
119 | | - @test_broken gradient(x -> sum(abs, gpu(x)), ca')[1] isa CuArray |
| 121 | + @test gradient(x -> sum(abs, gpu(x)), ca')[1] isa CuArray |
120 | 122 |
|
121 | 123 | # More complicated, Array * CuArray is an error |
122 | 124 | g0 = gradient(x -> sum(abs, (a * (a * x))), a)[1] |
|
198 | 200 | post2 = Flux.DataLoader((x=X, y=Y); batchsize=7, shuffle=false) |> gpu |
199 | 201 | for (p, q) in zip(pre2, post2) |
200 | 202 | @test p.x == q.x |
201 | | - @test_skip p.y == q.y # https://github.com/FluxML/OneHotArrays.jl/issues/28 -- MethodError: getindex(::OneHotArrays.OneHotMatrix{UInt32, CuArray{UInt32, 1, CUDA.Mem.DeviceBuffer}}, ::Int64, ::Int64) is ambiguous |
| 203 | + @test_broken p.y == q.y # https://github.com/FluxML/OneHotArrays.jl/issues/28 -- MethodError: getindex(::OneHotArrays.OneHotMatrix{UInt32, CuArray{UInt32, 1, CUDA.Mem.DeviceBuffer}}, ::Int64, ::Int64) is ambiguous |
202 | 204 | end |
203 | 205 |
|
204 | 206 | @test collect(pre2) isa Vector{<:NamedTuple{(:x, :y)}} |
|
0 commit comments