Skip to content

Commit

Permalink
Replace thrust::tuple with cuda::std::tuple
Browse files Browse the repository at this point in the history
The latter is more or less deprecated and we want to move towards standard types
  • Loading branch information
miscco committed Feb 6, 2025
1 parent e999f15 commit 78f9483
Show file tree
Hide file tree
Showing 162 changed files with 2,187 additions and 2,077 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -238,15 +238,15 @@ void look_into_vertex_and_edge_partitions(

if (renumber_map) {
thrust::for_each(thrust::host,
thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_zip_iterator(cuda::std::make_tuple(
h_vertices_in_this_proces.begin(),
thrust::make_counting_iterator(renumbered_vertex_id_of_local_first))),
thrust::make_zip_iterator(thrust::make_tuple(
thrust::make_zip_iterator(cuda::std::make_tuple(
h_vertices_in_this_proces.end(),
thrust::make_counting_iterator(renumbered_vertex_id_of_local_last))),
[comm_rank](auto old_and_new_id_pair) {
auto old_id = thrust::get<0>(old_and_new_id_pair);
auto new_id = thrust::get<1>(old_and_new_id_pair);
auto old_id = cuda::std::get<0>(old_and_new_id_pair);
auto new_id = cuda::std::get<1>(old_and_new_id_pair);
printf("owner rank = %d, original vertex id %d is renumbered to %d\n",
comm_rank,
static_cast<int>(old_id),
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/cugraph/detail/decompress_edge_partition.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,13 @@

#include <rmm/device_uvector.hpp>

#include <cuda/std/tuple>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <thrust/tuple.h>

#include <optional>
#include <tuple>
Expand Down
6 changes: 3 additions & 3 deletions cpp/include/cugraph/edge_partition_device_view.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@
#include <rmm/exec_policy.hpp>

#include <cuda/std/optional>
#include <cuda/std/tuple>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/tuple.h>

#include <cassert>
#include <optional>
Expand Down Expand Up @@ -167,13 +167,13 @@ class edge_partition_device_view_base_t {
}

// major_idx == major offset if CSR/CSC, major_offset != major_idx if DCSR/DCSC
__device__ thrust::tuple<vertex_t const*, edge_t, edge_t> local_edges(
__device__ cuda::std::tuple<vertex_t const*, edge_t, edge_t> local_edges(
vertex_t major_idx) const noexcept
{
auto edge_offset = offsets_[major_idx];
auto local_degree = offsets_[major_idx + 1] - edge_offset;
auto indices = indices_.data() + edge_offset;
return thrust::make_tuple(indices, edge_offset, local_degree);
return cuda::std::make_tuple(indices, edge_offset, local_degree);
}

// major_idx == major offset if CSR/CSC, major_offset != major_idx if DCSR/DCSC
Expand Down
10 changes: 5 additions & 5 deletions cpp/include/cugraph/edge_partition_edge_property_device_view.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class edge_partition_edge_property_device_view_t {
__device__ value_t get(edge_t offset) const
{
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
return static_cast<bool>(*(value_first_ + cugraph::packed_bool_offset(offset)) & mask);
} else {
Expand All @@ -74,7 +74,7 @@ class edge_partition_edge_property_device_view_t {
set(edge_t offset, value_t val) const
{
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
if (val) {
atomicOr(value_first_ + cugraph::packed_bool_offset(offset), mask);
Expand All @@ -93,7 +93,7 @@ class edge_partition_edge_property_device_view_t {
atomic_and(edge_t offset, value_t val) const
{
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
auto old = atomicAnd(value_first_ + cugraph::packed_bool_offset(offset),
val ? uint32_t{0xffffffff} : ~mask);
Expand All @@ -110,7 +110,7 @@ class edge_partition_edge_property_device_view_t {
atomic_or(edge_t offset, value_t val) const
{
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(offset);
auto old =
atomicOr(value_first_ + cugraph::packed_bool_offset(offset), val ? mask : uint32_t{0});
Expand All @@ -137,7 +137,7 @@ class edge_partition_edge_property_device_view_t {
elementwise_atomic_cas(edge_t offset, value_t compare, value_t val) const
{
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
cuda::atomic_ref<uint32_t, cuda::thread_scope_device> word(
*(value_first_ + cugraph::packed_bool_offset(offset)));
auto mask = cugraph::packed_bool_mask(offset);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class edge_partition_endpoint_property_device_view_t {
{
auto val_offset = value_offset(offset);
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
return static_cast<bool>(*(value_first_ + cugraph::packed_bool_offset(val_offset)) & mask);
} else {
Expand All @@ -97,7 +97,7 @@ class edge_partition_endpoint_property_device_view_t {
{
auto val_offset = value_offset(offset);
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
auto old = atomicAnd(value_first_ + cugraph::packed_bool_offset(val_offset),
val ? cugraph::packed_bool_full_mask() : ~mask);
Expand All @@ -115,7 +115,7 @@ class edge_partition_endpoint_property_device_view_t {
{
auto val_offset = value_offset(offset);
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
auto mask = cugraph::packed_bool_mask(val_offset);
auto old = atomicOr(value_first_ + cugraph::packed_bool_offset(val_offset),
val ? mask : cugraph::packed_bool_empty_mask());
Expand Down Expand Up @@ -144,7 +144,7 @@ class edge_partition_endpoint_property_device_view_t {
{
auto val_offset = value_offset(offset);
if constexpr (has_packed_bool_element) {
static_assert(is_packed_bool, "unimplemented for thrust::tuple types.");
static_assert(is_packed_bool, "unimplemented for cuda::std::tuple types.");
cuda::atomic_ref<uint32_t, cuda::thread_scope_device> word(
*(value_first_ + cugraph::packed_bool_offset(val_offset)));
auto mask = cugraph::packed_bool_mask(val_offset);
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/cugraph/edge_src_dst_property.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
#include <raft/core/host_span.hpp>

#include <cuda/std/optional>
#include <cuda/std/tuple>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>

#include <optional>
#include <type_traits>
Expand Down
6 changes: 3 additions & 3 deletions cpp/include/cugraph/src_dst_lookup_container.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, NVIDIA CORPORATION.
* Copyright (c) 2024-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -24,7 +24,7 @@

#include <rmm/exec_policy.hpp>

#include <thrust/tuple.h>
#include <cuda/std/tuple>

#include <unordered_map>
#include <vector>
Expand All @@ -34,7 +34,7 @@ namespace cugraph {
template <typename edge_id_t,
typename edge_type_t,
typename vertex_t,
typename value_t = thrust::tuple<vertex_t, vertex_t>>
typename value_t = cuda::std::tuple<vertex_t, vertex_t>>
class lookup_container_t {
template <typename _edge_id_t, typename _edge_type_t, typename _vertex_t, typename _value_t>
struct lookup_container_impl;
Expand Down
57 changes: 30 additions & 27 deletions cpp/include/cugraph/utilities/atomic_ops.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -20,12 +20,12 @@

#include <raft/util/device_atomics.cuh>

#include <cuda/std/tuple>
#include <thrust/detail/type_traits/iterator/is_discard_iterator.h>
#include <thrust/iterator/detail/any_assign.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/iterator_traits.h>
#include <thrust/memory.h>
#include <thrust/tuple.h>

namespace cugraph {

Expand All @@ -36,26 +36,26 @@ __device__ constexpr TupleType thrust_tuple_atomic_and(Iterator iter,
TupleType tup,
std::index_sequence<Is...>)
{
return thrust::make_tuple(
atomicAnd(&(thrust::raw_reference_cast(thrust::get<Is>(*iter))), thrust::get<Is>(tup))...);
return cuda::std::make_tuple(atomicAnd(&(thrust::raw_reference_cast(cuda::std::get<Is>(*iter))),
cuda::std::get<Is>(tup))...);
}

template <typename Iterator, typename TupleType, std::size_t... Is>
__device__ constexpr TupleType thrust_tuple_atomic_or(Iterator iter,
TupleType tup,
std::index_sequence<Is...>)
{
return thrust::make_tuple(
atomicOr(&(thrust::raw_reference_cast(thrust::get<Is>(*iter))), thrust::get<Is>(tup))...);
return cuda::std::make_tuple(
atomicOr(&(thrust::raw_reference_cast(cuda::std::get<Is>(*iter))), cuda::std::get<Is>(tup))...);
}

template <typename Iterator, typename TupleType, std::size_t... Is>
__device__ constexpr TupleType thrust_tuple_atomic_add(Iterator iter,
TupleType tup,
std::index_sequence<Is...>)
{
return thrust::make_tuple(
atomicAdd(&(thrust::raw_reference_cast(thrust::get<Is>(*iter))), thrust::get<Is>(tup))...);
return cuda::std::make_tuple(atomicAdd(&(thrust::raw_reference_cast(cuda::std::get<Is>(*iter))),
cuda::std::get<Is>(tup))...);
}

template <typename Iterator, typename TupleType, std::size_t... Is>
Expand All @@ -64,27 +64,27 @@ __device__ constexpr TupleType thrust_tuple_elementwise_atomic_cas(Iterator iter
TupleType val_tup,
std::index_sequence<Is...>)
{
return thrust::make_tuple(atomicCAS(&(thrust::raw_reference_cast(thrust::get<Is>(*iter))),
thrust::get<Is>(comp_tup),
thrust::get<Is>(val_tup))...);
return cuda::std::make_tuple(atomicCAS(&(thrust::raw_reference_cast(cuda::std::get<Is>(*iter))),
cuda::std::get<Is>(comp_tup),
cuda::std::get<Is>(val_tup))...);
}

template <typename Iterator, typename TupleType, std::size_t... Is>
__device__ constexpr TupleType thrust_tuple_elementwise_atomic_min(Iterator iter,
TupleType tup,
std::index_sequence<Is...>)
{
return thrust::make_tuple(
atomicMin(&(thrust::raw_reference_cast(thrust::get<Is>(*iter))), thrust::get<Is>(tup))...);
return cuda::std::make_tuple(atomicMin(&(thrust::raw_reference_cast(cuda::std::get<Is>(*iter))),
cuda::std::get<Is>(tup))...);
}

template <typename Iterator, typename TupleType, std::size_t... Is>
__device__ constexpr TupleType thrust_tuple_elementwise_atomic_max(Iterator iter,
TupleType tup,
std::index_sequence<Is...>)
{
return thrust::make_tuple(
atomicMax(&(thrust::raw_reference_cast(thrust::get<Is>(*iter))), thrust::get<Is>(tup))...);
return cuda::std::make_tuple(atomicMax(&(thrust::raw_reference_cast(cuda::std::get<Is>(*iter))),
cuda::std::get<Is>(tup))...);
}

} // namespace detail
Expand Down Expand Up @@ -114,7 +114,7 @@ __device__
atomic_and(Iterator iter, T value)
{
return detail::thrust_tuple_atomic_and(
iter, value, std::make_index_sequence<thrust::tuple_size<T>::value>{});
iter, value, std::make_index_sequence<cuda::std::tuple_size<T>::value>{});
}

template <typename Iterator, typename T>
Expand Down Expand Up @@ -142,7 +142,7 @@ __device__
atomic_or(Iterator iter, T value)
{
return detail::thrust_tuple_atomic_or(
iter, value, std::make_index_sequence<thrust::tuple_size<T>::value>{});
iter, value, std::make_index_sequence<cuda::std::tuple_size<T>::value>{});
}

template <typename Iterator, typename T>
Expand All @@ -169,10 +169,11 @@ __device__
T>
atomic_add(Iterator iter, T value)
{
static_assert(thrust::tuple_size<typename thrust::iterator_traits<Iterator>::value_type>::value ==
thrust::tuple_size<T>::value);
static_assert(
cuda::std::tuple_size<typename thrust::iterator_traits<Iterator>::value_type>::value ==
cuda::std::tuple_size<T>::value);
return detail::thrust_tuple_atomic_add(
iter, value, std::make_index_sequence<thrust::tuple_size<T>::value>{});
iter, value, std::make_index_sequence<cuda::std::tuple_size<T>::value>{});
}

template <typename Iterator, typename T>
Expand All @@ -193,7 +194,7 @@ __device__
elementwise_atomic_cas(Iterator iter, T compare, T value)
{
return detail::thrust_tuple_elementwise_atomic_cas(
iter, compare, value, std::make_index_sequence<thrust::tuple_size<T>::value>{});
iter, compare, value, std::make_index_sequence<cuda::std::tuple_size<T>::value>{});
}

template <typename Iterator, typename T>
Expand All @@ -220,10 +221,11 @@ __device__
T>
elementwise_atomic_min(Iterator iter, T const& value)
{
static_assert(thrust::tuple_size<typename thrust::iterator_traits<Iterator>::value_type>::value ==
thrust::tuple_size<T>::value);
static_assert(
cuda::std::tuple_size<typename thrust::iterator_traits<Iterator>::value_type>::value ==
cuda::std::tuple_size<T>::value);
return detail::thrust_tuple_elementwise_atomic_min(
iter, value, std::make_index_sequence<thrust::tuple_size<T>::value>{});
iter, value, std::make_index_sequence<cuda::std::tuple_size<T>::value>{});
}

template <typename Iterator, typename T>
Expand All @@ -250,10 +252,11 @@ __device__
T>
elementwise_atomic_max(Iterator iter, T const& value)
{
static_assert(thrust::tuple_size<typename thrust::iterator_traits<Iterator>::value_type>::value ==
thrust::tuple_size<T>::value);
static_assert(
cuda::std::tuple_size<typename thrust::iterator_traits<Iterator>::value_type>::value ==
cuda::std::tuple_size<T>::value);
return detail::thrust_tuple_elementwise_atomic_max(
iter, value, std::make_index_sequence<thrust::tuple_size<T>::value>{});
iter, value, std::make_index_sequence<cuda::std::tuple_size<T>::value>{});
}

template <typename Iterator, typename T>
Expand Down
Loading

0 comments on commit 78f9483

Please sign in to comment.