Skip to content

Commit

Permalink
Merge branch 'branch-25.02' into renumber_for_tigergraph
Browse files Browse the repository at this point in the history
  • Loading branch information
ChuckHastings committed Jan 31, 2025
2 parents 9e19ad8 + dc435cf commit 2b9721b
Show file tree
Hide file tree
Showing 92 changed files with 1,649 additions and 1,098 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.5",
"CUDA": "12.8",
"PYTHON_PACKAGE_MANAGER": "conda",
"BASE": "rapidsai/devcontainers:25.02-cpp-mambaforge-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.5-conda"
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.8-conda"
],
"hostRequirements": {"gpu": "optional"},
"features": {
Expand All @@ -20,7 +20,7 @@
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.5-envs}"],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.8-envs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cugraph,type=bind,consistency=consistent",
Expand All @@ -29,7 +29,7 @@
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.5-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,20 @@
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.5",
"CUDA": "12.8",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:25.02-cpp-cuda12.5-ucx1.18.0-openmpi-ubuntu22.04"
"BASE": "rapidsai/devcontainers:25.02-cpp-cuda12.8-ucx1.18.0-openmpi-ubuntu22.04"
}
},
"runArgs": [
"--rm",
"--name",
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.5-pip"
"${localEnv:USER:anon}-rapids-${localWorkspaceFolderBasename}-25.02-cuda12.8-pip"
],
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/cuda:25.2": {
"version": "12.5",
"version": "12.8",
"installcuBLAS": true,
"installcuSOLVER": true,
"installcuRAND": true,
Expand All @@ -28,15 +28,15 @@
"ghcr.io/rapidsai/devcontainers/features/cuda",
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.5-venvs}"],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.8-venvs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/cugraph,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.5-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ jobs:
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
arch: '["amd64"]'
cuda: '["12.5"]'
cuda: '["12.8"]'
node_type: cpu32
build_command: |
sccache -z;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ dependencies:
- cuda-nvtx-dev
- cuda-profiler-api
- cuda-python>=12.6.2,<13.0a0
- cuda-version=12.5
- cuda-version=12.8
- cudf==25.2.*,>=0.0.0a0
- cupy>=12.0.0
- cxx-compiler
Expand Down Expand Up @@ -77,4 +77,4 @@ dependencies:
- torchmetrics
- ucx-py==0.42.*,>=0.0.0a0
- wheel
name: all_cuda-125_arch-x86_64
name: all_cuda-128_arch-x86_64
3 changes: 1 addition & 2 deletions cpp/include/cugraph/detail/decompress_edge_partition.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -29,7 +29,6 @@
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/optional.h>
#include <thrust/sequence.h>
#include <thrust/tuple.h>

Expand Down
84 changes: 43 additions & 41 deletions cpp/include/cugraph/edge_partition_device_view.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -26,10 +26,10 @@
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>

#include <cuda/std/optional>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/optional.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/tuple.h>
Expand All @@ -43,18 +43,18 @@ namespace cugraph {
namespace detail {

template <typename vertex_t>
__device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck_impl(
__device__ cuda::std::optional<vertex_t> major_hypersparse_idx_from_major_nocheck_impl(
raft::device_span<vertex_t const> dcs_nzd_vertices, vertex_t major)
{
// we can avoid binary search (and potentially improve performance) if we add an auxiliary array
// or cuco::static_map (at the expense of additional memory)
auto it =
thrust::lower_bound(thrust::seq, dcs_nzd_vertices.begin(), dcs_nzd_vertices.end(), major);
return it != dcs_nzd_vertices.end()
? (*it == major ? thrust::optional<vertex_t>{static_cast<vertex_t>(
? (*it == major ? cuda::std::optional<vertex_t>{static_cast<vertex_t>(
thrust::distance(dcs_nzd_vertices.begin(), it))}
: thrust::nullopt)
: thrust::nullopt;
: cuda::std::nullopt)
: cuda::std::nullopt;
}

template <typename vertex_t, typename edge_t, typename return_type_t, bool multi_gpu, bool use_dcs>
Expand Down Expand Up @@ -490,7 +490,7 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_value_start_offset_;
}

__host__ __device__ thrust::optional<vertex_t> major_hypersparse_first() const noexcept
__host__ __device__ cuda::std::optional<vertex_t> major_hypersparse_first() const noexcept
{
return major_hypersparse_first_;
}
Expand Down Expand Up @@ -528,15 +528,16 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_range_first_ + major_offset;
}

__device__ thrust::optional<vertex_t> major_idx_from_major_nocheck(vertex_t major) const noexcept
__device__ cuda::std::optional<vertex_t> major_idx_from_major_nocheck(
vertex_t major) const noexcept
{
if (major_hypersparse_first_ && (major >= *major_hypersparse_first_)) {
auto major_hypersparse_idx =
detail::major_hypersparse_idx_from_major_nocheck_impl(*dcs_nzd_vertices_, major);
return major_hypersparse_idx
? thrust::make_optional((*major_hypersparse_first_ - major_range_first_) +
*major_hypersparse_idx)
: thrust::nullopt;
? cuda::std::make_optional((*major_hypersparse_first_ - major_range_first_) +
*major_hypersparse_idx)
: cuda::std::nullopt;
} else {
return major - major_range_first_;
}
Expand All @@ -554,60 +555,60 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
}

// major_hypersparse_idx: index within the hypersparse segment
__device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
__device__ cuda::std::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
vertex_t major) const noexcept
{
if (dcs_nzd_vertices_) {
return detail::major_hypersparse_idx_from_major_nocheck_impl(*dcs_nzd_vertices_, major);
} else {
return thrust::nullopt;
return cuda::std::nullopt;
}
}

// major_hypersparse_idx: index within the hypersparse segment
__device__ thrust::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
__device__ cuda::std::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
vertex_t major_hypersparse_idx) const noexcept
{
return dcs_nzd_vertices_
? thrust::optional<vertex_t>{(*dcs_nzd_vertices_)[major_hypersparse_idx]}
: thrust::nullopt;
? cuda::std::optional<vertex_t>{(*dcs_nzd_vertices_)[major_hypersparse_idx]}
: cuda::std::nullopt;
}

__host__ __device__ vertex_t minor_from_minor_offset_nocheck(vertex_t minor_offset) const noexcept
{
return minor_range_first_ + minor_offset;
}

// FIxME: better return thrust::optional<raft::device_span<vertex_t const>> for consistency (see
// dcs_nzd_range_bitmap())
__host__ __device__ thrust::optional<vertex_t const*> dcs_nzd_vertices() const
// FIxME: better return cuda::std::optional<raft::device_span<vertex_t const>> for consistency
// (see dcs_nzd_range_bitmap())
__host__ __device__ cuda::std::optional<vertex_t const*> dcs_nzd_vertices() const
{
return dcs_nzd_vertices_ ? thrust::optional<vertex_t const*>{(*dcs_nzd_vertices_).data()}
: thrust::nullopt;
return dcs_nzd_vertices_ ? cuda::std::optional<vertex_t const*>{(*dcs_nzd_vertices_).data()}
: cuda::std::nullopt;
}

__host__ __device__ thrust::optional<vertex_t> dcs_nzd_vertex_count() const
__host__ __device__ cuda::std::optional<vertex_t> dcs_nzd_vertex_count() const
{
return dcs_nzd_vertices_
? thrust::optional<vertex_t>{static_cast<vertex_t>((*dcs_nzd_vertices_).size())}
: thrust::nullopt;
? cuda::std::optional<vertex_t>{static_cast<vertex_t>((*dcs_nzd_vertices_).size())}
: cuda::std::nullopt;
}

__host__ __device__ thrust::optional<raft::device_span<uint32_t const>> dcs_nzd_range_bitmap()
__host__ __device__ cuda::std::optional<raft::device_span<uint32_t const>> dcs_nzd_range_bitmap()
const
{
return dcs_nzd_range_bitmap_
? thrust::make_optional<raft::device_span<uint32_t const>>(
? cuda::std::make_optional<raft::device_span<uint32_t const>>(
(*dcs_nzd_range_bitmap_).data(), (*dcs_nzd_range_bitmap_).size())
: thrust::nullopt;
: cuda::std::nullopt;
}

private:
// should be trivially copyable to device

thrust::optional<raft::device_span<vertex_t const>> dcs_nzd_vertices_{thrust::nullopt};
thrust::optional<raft::device_span<uint32_t const>> dcs_nzd_range_bitmap_{thrust::nullopt};
thrust::optional<vertex_t> major_hypersparse_first_{thrust::nullopt};
cuda::std::optional<raft::device_span<vertex_t const>> dcs_nzd_vertices_{cuda::std::nullopt};
cuda::std::optional<raft::device_span<uint32_t const>> dcs_nzd_range_bitmap_{cuda::std::nullopt};
cuda::std::optional<vertex_t> major_hypersparse_first_{cuda::std::nullopt};

vertex_t major_range_first_{0};
vertex_t major_range_last_{0};
Expand Down Expand Up @@ -790,10 +791,10 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t

__host__ __device__ vertex_t major_value_start_offset() const { return vertex_t{0}; }

__host__ __device__ thrust::optional<vertex_t> major_hypersparse_first() const noexcept
__host__ __device__ cuda::std::optional<vertex_t> major_hypersparse_first() const noexcept
{
assert(false);
return thrust::nullopt;
return cuda::std::nullopt;
}

__host__ __device__ constexpr vertex_t major_range_first() const noexcept { return vertex_t{0}; }
Expand Down Expand Up @@ -823,7 +824,8 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
return major_offset;
}

__device__ thrust::optional<vertex_t> major_idx_from_major_nocheck(vertex_t major) const noexcept
__device__ cuda::std::optional<vertex_t> major_idx_from_major_nocheck(
vertex_t major) const noexcept
{
return major_offset_from_major_nocheck(major);
}
Expand All @@ -834,34 +836,34 @@ class edge_partition_device_view_t<vertex_t, edge_t, multi_gpu, std::enable_if_t
}

// major_hypersparse_idx: index within the hypersparse segment
__device__ thrust::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
__device__ cuda::std::optional<vertex_t> major_hypersparse_idx_from_major_nocheck(
vertex_t major) const noexcept
{
assert(false);
return thrust::nullopt;
return cuda::std::nullopt;
}

// major_hypersparse_idx: index within the hypersparse segment
__device__ thrust::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
__device__ cuda::std::optional<vertex_t> major_from_major_hypersparse_idx_nocheck(
vertex_t major_hypersparse_idx) const noexcept
{
assert(false);
return thrust::nullopt;
return cuda::std::nullopt;
}

__host__ __device__ vertex_t minor_from_minor_offset_nocheck(vertex_t minor_offset) const noexcept
{
return minor_offset;
}

__host__ __device__ thrust::optional<vertex_t const*> dcs_nzd_vertices() const
__host__ __device__ cuda::std::optional<vertex_t const*> dcs_nzd_vertices() const
{
return thrust::nullopt;
return cuda::std::nullopt;
}

__host__ __device__ thrust::optional<vertex_t> dcs_nzd_vertex_count() const
__host__ __device__ cuda::std::optional<vertex_t> dcs_nzd_vertex_count() const
{
return thrust::nullopt;
return cuda::std::nullopt;
}

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
#include <cugraph/utilities/packed_bool_utils.hpp>
#include <cugraph/utilities/thrust_tuple_utils.hpp>

#include <cuda/std/optional>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/optional.h>

namespace cugraph {

Expand Down Expand Up @@ -182,7 +182,7 @@ template <typename edge_t>
class edge_partition_edge_dummy_property_device_view_t {
public:
using edge_type = edge_t;
using value_type = thrust::nullopt_t;
using value_type = cuda::std::nullopt_t;

static constexpr bool is_packed_bool = false;
static constexpr bool has_packed_bool_element = false;
Expand All @@ -194,7 +194,7 @@ class edge_partition_edge_dummy_property_device_view_t {
{
}

__device__ auto get(edge_t offset) const { return thrust::nullopt; }
__device__ auto get(edge_t offset) const { return cuda::std::nullopt; }
};

} // namespace detail
Expand Down
Loading

0 comments on commit 2b9721b

Please sign in to comment.