Skip to content

Commit

Permalink
Merge changes from latest NanoVDB
Browse files Browse the repository at this point in the history
Signed-off-by: Matthew Cong <[email protected]>
  • Loading branch information
matthewdcong committed Jan 29, 2025
1 parent 2148e41 commit bc6d4f7
Show file tree
Hide file tree
Showing 30 changed files with 1,923 additions and 274 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/nanovdb.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ jobs:
--cargs=\'
-DUSE_EXPLICIT_INSTANTIATION=OFF
-DNANOVDB_USE_CUDA=ON
-DCMAKE_CUDA_ARCHITECTURES="80"
-DNANOVDB_USE_OPENVDB=ON
-DCMAKE_INSTALL_PREFIX=`pwd`
-DUSE_BLOSC=OFF
Expand Down Expand Up @@ -127,6 +128,7 @@ jobs:
-DMSVC_COMPRESS_PDB=ON
-DUSE_EXPLICIT_INSTANTIATION=OFF
-DNANOVDB_USE_CUDA=ON
-DCMAKE_CUDA_ARCHITECTURES="80"
-DNANOVDB_USE_OPENVDB=ON
-DVCPKG_TARGET_TRIPLET=${VCPKG_DEFAULT_TRIPLET}
-DCMAKE_TOOLCHAIN_FILE=\"${VCPKG_INSTALLATION_ROOT}\\scripts\\buildsystems\\vcpkg.cmake\"
Expand Down
4 changes: 4 additions & 0 deletions nanovdb/nanovdb/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ if(NANOVDB_USE_CUDA)
get_target_property(VDB_MSVC_RUNTIME_SELECTION openvdb MSVC_RUNTIME_LIBRARY)
endif()
endif()

find_package(CUDAToolkit)
endif()

if(NANOVDB_USE_OPENVDB)
Expand Down Expand Up @@ -169,8 +171,10 @@ set(NANOVDB_INCLUDE_FILES
# NanoVDB cuda header files
set(NANOVDB_INCLUDE_CUDA_FILES
cuda/DeviceBuffer.h
cuda/DeviceStreamMap.h
cuda/GridHandle.cuh
cuda/NodeManager.cuh
cuda/UnifiedBuffer.h
)

# NanoVDB io header files
Expand Down
55 changes: 46 additions & 9 deletions nanovdb/nanovdb/GridHandle.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,14 @@ class GridHandle
public:
using BufferType = BufferT;

/// @brief Move constructor from a host buffer
/// @brief Move constructor from a dual host-device buffer
/// @param buffer buffer containing one or more NanoGrids that will be moved into this GridHandle
/// @throw Will throw and error with the buffer does not contain a valid NanoGrid!
/// @note The implementation of this template specialization is in nanovdb/cuda/GridHandle.cuh since it requires CUDA
template<typename T = BufferT, typename util::enable_if<BufferTraits<T>::hasDeviceDual, int>::type = 0>
GridHandle(T&& buffer);

/// @brief Move constructor from a dual host-device buffer
/// @brief Move constructor from a host buffer
/// @param buffer buffer containing one or more NanoGrids that will be moved into this GridHandle
/// @throw Will throw and error with the buffer does not contain a valid NanoGrid!
template<typename T = BufferT, typename util::disable_if<BufferTraits<T>::hasDeviceDual, int>::type = 0>
Expand Down Expand Up @@ -112,17 +113,23 @@ class GridHandle
template<typename U = BufferT>
typename util::enable_if<BufferTraits<U>::hasDeviceDual, void*>::type
deviceData() { return mBuffer.deviceData(); }
template<typename U = BufferT>
typename util::enable_if<BufferTraits<U>::hasDeviceDual, void*>::type
deviceData(int device) { return mBuffer.deviceData(device); }

//@{
/// @brief Returns the size in bytes of the raw memory buffer managed by this GridHandle.
uint64_t size() const { return mBuffer.size(); }
[[deprecated("Use GridHandle::bufferSize instead.")]] uint64_t size() const { return mBuffer.size(); }
uint64_t bufferSize() const { return mBuffer.size(); }
//@}

//@{
/// @brief Return true if this handle is empty, i.e. has no allocated memory
bool empty() const { return this->size() == 0; }
bool isEmpty() const { return this->size() == 0; }
bool empty() const { return mBuffer.size() == 0; }
bool isEmpty() const { return mBuffer.size() == 0; }
//@}

/// @brief Return true if this handle contains any grids
/// @brief Return true if this handle is not empty, i.e. contains at least one grid
operator bool() const { return !this->empty(); }

/// @brief Returns a const host pointer to the @a n'th NanoVDB grid encoded in this GridHandle.
Expand Down Expand Up @@ -152,7 +159,7 @@ class GridHandle

/// @brief Return a const pointer to the @a n'th grid encoded in this GridHandle on the device, e.g. GPU
/// @tparam ValueT Value type of the grid point to be returned
/// @param n Index if of the grid pointer to be returned
/// @param n Index of the grid pointer to be returned
/// @param verbose if non-zero error messages will be printed in case something failed
/// @warning Note that the return pointer can be NULL if the GridHandle was not initialized, @a n is invalid,
/// or if the template parameter does not match the specified grid.
Expand All @@ -164,13 +171,25 @@ class GridHandle
/// @note This method is only available if the buffer supports devices
template<typename U = BufferT>
typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
deviceUpload(void* stream = nullptr, bool sync = true) { mBuffer.deviceUpload(stream, sync); }
deviceUpload(void* stream, bool sync = true) { mBuffer.deviceUpload(stream, sync); }

/// @brief Upload the host buffer to a specefic device buffer. It device buffer doesn't exist it's created first
/// @param device Device to upload host data to
/// @param stream cuda stream
/// @param sync if false the memory copy is asynchronous
template<typename U = BufferT>
typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
deviceUpload(int device = 0, void* stream = nullptr, bool sync = true) { mBuffer.deviceUpload(device, stream, sync); }

/// @brief Download the grid to from the device, e.g. from GPU to CPU
/// @note This method is only available if the buffer supports devices
template<typename U = BufferT>
typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
deviceDownload(void* stream = nullptr, bool sync = true) { mBuffer.deviceDownload(stream, sync); }
deviceDownload(void* stream, bool sync = true) { mBuffer.deviceDownload(stream, sync); }

template<typename U = BufferT>
typename util::enable_if<BufferTraits<U>::hasDeviceDual, void>::type
deviceDownload(int device = 0, void* stream = nullptr, bool sync = true) { mBuffer.deviceDownload(device, stream, sync); }

/// @brief Check if the buffer is this handle has any padding, i.e. if the buffer is larger than the combined size of all its grids
/// @return true is the combined size of all grid is smaller than the buffer size
Expand All @@ -184,6 +203,23 @@ class GridHandle
/// @return Return the byte size of the specified grid
uint64_t gridSize(uint32_t n = 0) const {return mMetaData[n].size; }

/// @brief compute the total sum of memory footprints of all the grids in this buffer
/// @return the number of bytes occupied by all grids associated with this buffer
uint64_t totalGridSize() const {
uint64_t sum = 0;
for (auto &m : mMetaData) sum += m.size;
NANOVDB_ASSERT(sum <= mBuffer.size());
return sum;
}

/// @brief compute the size of unusedstorage in this buffer
/// @return the number of unused bytes in this buffer.
uint64_t freeSize() const {return mBuffer.size() - this->totalGridSize();}

/// @brief Test if this buffer has any unused storage left, i.e. memory not occupied by grids
/// @return true if there is no extra storage left in this buffer, i.e. empty or fully occupied with grids
bool isFull() const { return this->totalGridSize() == mBuffer.size(); }

/// @brief Return the GridType of the @a n'th grid in this GridHandle
/// @param n index of the grid (assumed to be less than gridCount())
/// @return Return the GridType of the specified grid
Expand Down Expand Up @@ -315,6 +351,7 @@ inline __hostdev__ void cpyGridHandleMeta(const GridData *data, GridHandleMetaDa
}
}// void cpyGridHandleMeta(const GridData *data, GridHandleMetaData *meta)

// template specialization of move constructor from a host buffer
template<typename BufferT>
template<typename T, typename util::disable_if<BufferTraits<T>::hasDeviceDual, int>::type>
GridHandle<BufferT>::GridHandle(T&& buffer)
Expand Down
34 changes: 34 additions & 0 deletions nanovdb/nanovdb/NanoVDB.h
Original file line number Diff line number Diff line change
Expand Up @@ -1159,6 +1159,21 @@ class Mask
__hostdev__ uint64_t* words() { return mWords; }
__hostdev__ const uint64_t* words() const { return mWords; }

template<typename WordT>
__hostdev__ WordT getWord(uint32_t n) const
{
static_assert(util::is_same<WordT, uint8_t, uint16_t, uint32_t, uint64_t>::value);
NANOVDB_ASSERT(n*8*sizeof(WordT) < WORD_COUNT);
return reinterpret_cast<WordT*>(mWords)[n];
}
template<typename WordT>
__hostdev__ void setWord(WordT w, uint32_t n)
{
static_assert(util::is_same<WordT, uint8_t, uint16_t, uint32_t, uint64_t>::value);
NANOVDB_ASSERT(n*8*sizeof(WordT) < WORD_COUNT);
reinterpret_cast<WordT*>(mWords)[n] = w;
}

/// @brief Assignment operator that works with openvdb::util::NodeMask
template<typename MaskT = Mask>
__hostdev__ typename util::enable_if<!util::is_same<MaskT, Mask>::value, Mask&>::type operator=(const MaskT& other)
Expand Down Expand Up @@ -1228,6 +1243,25 @@ class Mask
{
on ? this->setOnAtomic(n) : this->setOffAtomic(n);
}
/*
template<typename WordT>
__device__ inline void setWordAtomic(WordT w, uint32_t n)
{
static_assert(util::is_same<WordT, uint8_t, uint16_t, uint32_t, uint64_t>::value);
NANOVDB_ASSERT(n*8*sizeof(WordT) < WORD_COUNT);
if constexpr(util::is_same<WordT,uint8_t>::value) {
mask <<= x;
} else if constexpr(util::is_same<WordT,uint16_t>::value) {
unsigned int mask = w;
if (n >> 1) mask <<= 16;
atomicOr(reinterpret_cast<unsigned int*>(this) + n, mask);
} else if constexpr(util::is_same<WordT,uint32_t>::value) {
atomicOr(reinterpret_cast<unsigned int*>(this) + n, w);
} else {
atomicOr(reinterpret_cast<unsigned long long int*>(this) + n, w);
}
}
*/
#endif
/// @brief Set the specified bit on or off.
__hostdev__ void set(uint32_t n, bool on)
Expand Down
2 changes: 1 addition & 1 deletion nanovdb/nanovdb/NodeManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ NodeManagerHandle<BufferT> createNodeManager(const NanoGrid<BuildT> &grid,
}

return handle;// // is converted to r-value so return value is move constructed!
}
}// createNodeManager

} // namespace nanovdb

Expand Down
Loading

0 comments on commit bc6d4f7

Please sign in to comment.