Skip to content

Commit

Permalink
updates
Browse files Browse the repository at this point in the history
  • Loading branch information
CarloLucibello committed Nov 29, 2024
1 parent 37f2d28 commit 1909c0c
Show file tree
Hide file tree
Showing 5 changed files with 204 additions and 27 deletions.
8 changes: 7 additions & 1 deletion GNNlib/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ version = "0.2.2"
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
Expand All @@ -18,15 +19,20 @@ AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"

[extensions]
GNNlibCUDAExt = "CUDA"
GNNlibAMDGPUExt = "AMDGPU"
GNNlibCUDAExt = "CUDA"

# GPUArraysCore is not needed as a direct dependency
# but pinning it to 0.1 avoids problems when we do Pkg.add("CUDA") in testing
# See https://github.com/JuliaGPU/CUDA.jl/issues/2564

[compat]
AMDGPU = "1"
CUDA = "4, 5"
ChainRulesCore = "1.24"
DataStructures = "0.18"
GNNGraphs = "1.0"
GPUArraysCore = "0.1"
LinearAlgebra = "1"
MLUtils = "0.4"
NNlib = "0.9"
Expand Down
8 changes: 8 additions & 0 deletions GNNlib/test/Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
[deps]
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c"
GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Expand All @@ -10,3 +14,7 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
GPUArraysCore = "0.1"
47 changes: 21 additions & 26 deletions GNNlib/test/msgpass.jl
Original file line number Diff line number Diff line change
@@ -1,33 +1,19 @@
@testitem "msgpass" setup=[TestModuleNNlib] begin
@testitem "msgpass" setup=[TestModuleGNNlib] begin
using .TestModuleGNNlib
#TODO test all graph types
GRAPH_T = :coo
in_channel = 10
out_channel = 5
num_V = 6
num_E = 14
T = Float32

adj = [0 1 0 0 0 0
1 0 0 1 1 1
0 0 0 0 0 1
0 1 0 0 1 0
0 1 0 1 0 1
0 1 1 0 1 0]

X = rand(T, in_channel, num_V)
E = rand(T, in_channel, num_E)

g = GNNGraph(adj, graph_type = GRAPH_T)
g = TEST_GRAPHS[1]
out_channel = 10
num_V = g.num_nodes
num_E = g.num_edges

@testset "propagate" begin
function message(xi, xj, e)
@test xi === nothing
@test e === nothing
ones(T, out_channel, size(xj, 2))
ones(Float32, out_channel, size(xj, 2))
end

m = propagate(message, g, +, xj = X)
m = propagate(message, g, +, xj = g.x)

@test size(m) == (out_channel, num_V)

Expand All @@ -40,7 +26,7 @@
end

@testset "apply_edges" begin
m = apply_edges(g, e = E) do xi, xj, e
m = apply_edges(g, e = g.e) do xi, xj, e
@test xi === nothing
@test xj === nothing
ones(out_channel, size(e, 2))
Expand All @@ -49,15 +35,15 @@
@test m == ones(out_channel, num_E)

# With NamedTuple input
m = apply_edges(g, xj = (; a = X, b = 2X), e = E) do xi, xj, e
m = apply_edges(g, xj = (; a = g.x, b = 2g.x), e = g.e) do xi, xj, e
@test xi === nothing
@test xj.b == 2 * xj.a
@test size(xj.a, 2) == size(xj.b, 2) == size(e, 2)
ones(out_channel, size(e, 2))
end

# NamedTuple output
m = apply_edges(g, e = E) do xi, xj, e
m = apply_edges(g, e = g.e) do xi, xj, e
@test xi === nothing
@test xj === nothing
(; a = ones(out_channel, size(e, 2)))
Expand Down Expand Up @@ -85,7 +71,7 @@
Adj = map(x -> x > 0 ? 1 : 0, A)
X = rand(10, n)

g = GNNGraph(A, ndata = X, graph_type = GRAPH_T)
g = GNNGraph(A, ndata = X, graph_type = :coo)

function spmm_copyxj_fused(g)
propagate(copy_xj,
Expand All @@ -107,7 +93,7 @@
Adj = map(x -> x > 0 ? 1 : 0, A)
X = rand(10, n)

g = GNNGraph(A, ndata = X, edata = A.nzval, graph_type = GRAPH_T)
g = GNNGraph(A, ndata = X, edata = A.nzval, graph_type = :coo)

function spmm_unfused(g)
propagate((xi, xj, e) -> reshape(e, 1, :) .* xj,
Expand Down Expand Up @@ -139,3 +125,12 @@
end

end

@testitem "msgpass GPU" setup=[TestModuleGNNlib] begin
using .TestModuleGNNlib
n, m = 10, 20
g = rand_graph(n, m, graph_type = :coo)
x = rand(Float32, 2, n)
f(g, x) = propagate(copy_xj, g, +, xj = x)
test_gradients(f, g, x; test_gpu=true, test_grad_f=false, compare_finite_diff=false)
end
164 changes: 164 additions & 0 deletions GNNlib/test/test_module.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,30 @@
@testmodule TestModuleGNNlib begin

using Pkg

### GPU backends settings ############
# tried to put this in __init__ but is not executed for some reason

## Uncomment below to change the default test settings
ENV["GNN_TEST_CUDA"] = "true"
# ENV["GNN_TEST_AMDGPU"] = "true"
# ENV["GNN_TEST_Metal"] = "true"

to_test(backend) = get(ENV, "GNN_TEST_$(backend)", "false") == "true"
has_dependecies(pkgs) = all(pkg -> haskey(Pkg.project().dependencies, pkg), pkgs)
deps_dict = Dict(:CUDA => ["CUDA", "cuDNN"], :AMDGPU => ["AMDGPU"], :Metal => ["Metal"])

for (backend, deps) in deps_dict
if to_test(backend)
if !has_dependecies(deps)
Pkg.add(deps)
end
@eval using $backend
@eval $backend.allowscalar(false)
end
end
######################################

import Reexport: @reexport

@reexport using GNNlib
Expand All @@ -8,5 +33,144 @@ import Reexport: @reexport
@reexport using MLUtils
@reexport using SparseArrays
@reexport using Test, Random, Statistics
@reexport using MLDataDevices
using Functors: fmapstructure_with_path
using Graphs
using ChainRulesTestUtils, FiniteDifferences
using Zygote
using SparseArrays

# from this module
export D_IN, D_OUT, GRAPH_TYPES, TEST_GRAPHS,
test_gradients, finitediff_withgradient,
check_equal_leaves


const D_IN = 3
const D_OUT = 5

function finitediff_withgradient(f, x...)
y = f(x...)
# We set a range to avoid domain errors
fdm = FiniteDifferences.central_fdm(5, 1, max_range=1e-2)
return y, FiniteDifferences.grad(fdm, f, x...)
end

function check_equal_leaves(a, b; rtol=1e-4, atol=1e-4)
fmapstructure_with_path(a, b) do kp, x, y
if x isa AbstractArray
# @show kp
@test x y rtol=rtol atol=atol
# elseif x isa Number
# @show kp
# @test x ≈ y rtol=rtol atol=atol
end
end
end

function test_gradients(
f,
graph::GNNGraph,
xs...;
rtol=1e-5, atol=1e-5,
test_gpu = false,
test_grad_f = true,
test_grad_x = true,
compare_finite_diff = true,
loss = (f, g, xs...) -> mean(f(g, xs...)),
)

if !test_gpu && !compare_finite_diff
error("You should either compare finite diff vs CPU AD \
or CPU AD vs GPU AD.")
end

## Let's make sure first that the forward pass works.
l = loss(f, graph, xs...)
@test l isa Number
if test_gpu
gpu_dev = gpu_device(force=true)
cpu_dev = cpu_device()
graph_gpu = graph |> gpu_dev
xs_gpu = xs |> gpu_dev
f_gpu = f |> gpu_dev
l_gpu = loss(f_gpu, graph_gpu, xs_gpu...)
@test l_gpu isa Number
end

if test_grad_x
# Zygote gradient with respect to input.
y, g = Zygote.withgradient((xs...) -> loss(f, graph, xs...), xs...)

if compare_finite_diff
# Cast to Float64 to avoid precision issues.
f64 = f |> Flux.f64
xs64 = xs .|> Flux.f64
y_fd, g_fd = finitediff_withgradient((xs...) -> loss(f64, graph, xs...), xs64...)
@test y y_fd rtol=rtol atol=atol
check_equal_leaves(g, g_fd; rtol, atol)
end

if test_gpu
# Zygote gradient with respect to input on GPU.
y_gpu, g_gpu = Zygote.withgradient((xs...) -> loss(f_gpu, graph_gpu, xs...), xs_gpu...)
@test get_device(g_gpu) == get_device(xs_gpu)
@test y_gpu y rtol=rtol atol=atol
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
end
end

if test_grad_f
# Zygote gradient with respect to f.
y, g = Zygote.withgradient(f -> loss(f, graph, xs...), f)

if compare_finite_diff
# Cast to Float64 to avoid precision issues.
f64 = f |> Flux.f64
ps, re = Flux.destructure(f64)
y_fd, g_fd = finitediff_withgradient(ps -> loss(re(ps),graph, xs...), ps)
g_fd = (re(g_fd[1]),)
@test y y_fd rtol=rtol atol=atol
check_equal_leaves(g, g_fd; rtol, atol)
end

if test_gpu
# Zygote gradient with respect to f on GPU.
y_gpu, g_gpu = Zygote.withgradient(f -> loss(f,graph_gpu, xs_gpu...), f_gpu)
# @test get_device(g_gpu) == get_device(xs_gpu)
@test y_gpu y rtol=rtol atol=atol
check_equal_leaves(g_gpu |> cpu_dev, g; rtol, atol)
end
end
return true
end


function generate_test_graphs(graph_type)
adj1 = [0 1 0 1
1 0 1 0
0 1 0 1
1 0 1 0]

g1 = GNNGraph(adj1,
ndata = rand(Float32, D_IN, 4);
graph_type)

adj_single_vertex = [0 0 0 1
0 0 0 0
0 0 0 1
1 0 1 0]

g_single_vertex = GNNGraph(adj_single_vertex,
ndata = rand(Float32, D_IN, 4);
graph_type)

return (g1, g_single_vertex)
end

GRAPH_TYPES = [:coo, :dense, :sparse]
TEST_GRAPHS = [generate_test_graphs(:coo)...,
generate_test_graphs(:dense)...,
generate_test_graphs(:sparse)...]

end # module
4 changes: 4 additions & 0 deletions GraphNeuralNetworks/test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
GraphNeuralNetworks = "cffab07f-9bc2-4db1-8861-388f63bf7694"
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
Expand All @@ -13,3 +14,6 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
GPUArraysCore = "0.1"

0 comments on commit 1909c0c

Please sign in to comment.