diff --git a/common/trtexec_vendor/CMakeLists.txt b/common/trtexec_vendor/CMakeLists.txt
deleted file mode 100644
index 220e7e6d06f81..0000000000000
--- a/common/trtexec_vendor/CMakeLists.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-cmake_minimum_required(VERSION 3.14)
-project(trtexec_vendor)
-
-if(NOT CMAKE_CXX_STANDARD)
- set(CMAKE_CXX_STANDARD 17)
- set(CMAKE_CXX_STANDARD_REQUIRED ON)
- set(CMAKE_CXX_EXTENSIONS OFF)
-endif()
-if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
- add_compile_options(-Wall -Wextra -Wpedantic)
-endif()
-
-find_package(ament_cmake REQUIRED)
-find_package(CUDA)
-find_package(cudnn_cmake_module REQUIRED)
-find_package(CUDNN)
-find_package(tensorrt_cmake_module REQUIRED)
-find_package(TENSORRT)
-
-if(NOT (${CUDA_FOUND} AND ${CUDNN_FOUND} AND ${TENSORRT_FOUND}))
- message(WARNING "cuda, cudnn, tensorrt libraries are not found")
- return()
-endif()
-
-if(${TENSORRT_VERSION} VERSION_LESS 8.2.1)
- message(WARNING "The tensorrt version less than 8.2.1 isn't supported.")
- return()
-endif()
-
-set(TRTEXEC_DEFAULT_BIN /usr/src/tensorrt/bin/trtexec)
-if(NOT EXISTS TRTEXEC_DEFAULT_BIN)
- include(FetchContent)
- if(${TENSORRT_VERSION} VERSION_EQUAL 8.4.2)
- set(TENSORRT_VERSION 8.4.1)
- elseif(${TENSORRT_VERSION} VERSION_LESS_EQUAL 8.2.5 AND ${TENSORRT_VERSION} VERSION_GREATER 8.2.1)
- set(TENSORRT_VERSION 8.2.1)
- endif()
- fetchcontent_declare(tensorrt
- GIT_REPOSITORY https://github.com/NVIDIA/TensorRT
- GIT_TAG ${TENSORRT_VERSION}
- GIT_SUBMODULES ""
- )
- fetchcontent_getproperties(tensorrt)
- if(NOT tensorrt_POPULATED)
- fetchcontent_populate(tensorrt)
- endif()
- set(TRTEXEC_SOURCES
- ${tensorrt_SOURCE_DIR}/samples/trtexec/trtexec.cpp
- ${tensorrt_SOURCE_DIR}/samples/common/sampleEngines.cpp
- ${tensorrt_SOURCE_DIR}/samples/common/sampleInference.cpp
- ${tensorrt_SOURCE_DIR}/samples/common/sampleOptions.cpp
- ${tensorrt_SOURCE_DIR}/samples/common/sampleReporting.cpp
- ${tensorrt_SOURCE_DIR}/samples/common/logger.cpp
- )
- if(${TENSORRT_VERSION} VERSION_GREATER_EQUAL 8.4)
- list(APPEND TRTEXEC_SOURCES
- ${tensorrt_SOURCE_DIR}/samples/common/sampleUtils.cpp
- )
- endif()
- cuda_add_executable(${PROJECT_NAME}
- ${TRTEXEC_SOURCES}
- )
- target_link_libraries(${PROJECT_NAME}
- ${TENSORRT_LIBRARIES}
- )
- target_include_directories(${PROJECT_NAME}
- PRIVATE ${tensorrt_SOURCE_DIR}/samples/common
- )
-
- set_target_properties(${PROJECT_NAME}
- PROPERTIES OUTPUT_NAME trtexec
- )
-
- install(TARGETS ${PROJECT_NAME}
- ARCHIVE DESTINATION lib
- LIBRARY DESTINATION lib
- RUNTIME DESTINATION bin
- )
-endif()
-
-if(BUILD_TESTING)
- find_package(ament_lint_auto REQUIRED)
- ament_lint_auto_find_test_dependencies()
-endif()
-
-ament_environment_hooks("${CMAKE_CURRENT_SOURCE_DIR}/env-hooks/${PROJECT_NAME}.sh.in")
-
-ament_package()
diff --git a/common/trtexec_vendor/env-hooks/trtexec_vendor.sh.in b/common/trtexec_vendor/env-hooks/trtexec_vendor.sh.in
deleted file mode 100644
index 7c2ef456ecfec..0000000000000
--- a/common/trtexec_vendor/env-hooks/trtexec_vendor.sh.in
+++ /dev/null
@@ -1,5 +0,0 @@
-TRTEXEC_DEFAULT_BIN_DIR=/usr/src/tensorrt/bin
-TRTEXEC_DEFAULT_BIN=$TRTEXEC_DEFAULT_BIN_DIR/trtexec
-if [ -f $TRTEXEC_DEFAULT_BIN ]; then
- ament_prepend_unique_value PATH $TRTEXEC_DEFAULT_BIN_DIR
-fi
diff --git a/common/trtexec_vendor/package.xml b/common/trtexec_vendor/package.xml
deleted file mode 100644
index f197858060f3a..0000000000000
--- a/common/trtexec_vendor/package.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
- trtexec_vendor
- 0.1.0
- The vendor package of trtexec
- Daisuke Nishimatsu
- Apache 2.0
-
- ament_cmake
- cudnn_cmake_module
- tensorrt_cmake_module
-
- ament_lint_auto
- autoware_lint_common
-
-
- ament_cmake
-
-
diff --git a/perception/lidar_centerpoint/include/lidar_centerpoint/cuda_utils.hpp b/perception/lidar_centerpoint/include/lidar_centerpoint/cuda_utils.hpp
index c1d84666f9a10..5ab26d75a0a41 100644
--- a/perception/lidar_centerpoint/include/lidar_centerpoint/cuda_utils.hpp
+++ b/perception/lidar_centerpoint/include/lidar_centerpoint/cuda_utils.hpp
@@ -56,10 +56,10 @@ namespace cuda
inline void check_error(const ::cudaError_t e, const char * f, int n)
{
if (e != ::cudaSuccess) {
- std::stringstream s;
+ ::std::stringstream s;
s << ::cudaGetErrorName(e) << " (" << e << ")@" << f << "#L" << n << ": "
<< ::cudaGetErrorString(e);
- throw std::runtime_error{s.str()};
+ throw ::std::runtime_error{s.str()};
}
}
@@ -69,13 +69,13 @@ struct deleter
};
template
-using unique_ptr = std::unique_ptr;
+using unique_ptr = ::std::unique_ptr;
template
-typename std::enable_if::value, cuda::unique_ptr>::type make_unique(
- const std::size_t n)
+typename ::std::enable_if<::std::is_array::value, cuda::unique_ptr>::type make_unique(
+ const ::std::size_t n)
{
- using U = typename std::remove_extent::type;
+ using U = typename ::std::remove_extent::type;
U * p;
CHECK_CUDA_ERROR(::cudaMalloc(reinterpret_cast(&p), sizeof(U) * n));
return cuda::unique_ptr{p};
@@ -107,7 +107,7 @@ inline T * get_next_ptr(size_t num_elem, void *& workspace, size_t & workspace_s
{
size_t size = get_size_aligned(num_elem);
if (size > workspace_size) {
- throw std::runtime_error("Workspace is too small!");
+ throw ::std::runtime_error("Workspace is too small!");
}
workspace_size -= size;
T * ptr = reinterpret_cast(workspace);
diff --git a/perception/tensorrt_yolo/lib/include/cuda_utils.hpp b/perception/tensorrt_yolo/lib/include/cuda_utils.hpp
index a3b53a73720a1..8a8a21d99e2cb 100644
--- a/perception/tensorrt_yolo/lib/include/cuda_utils.hpp
+++ b/perception/tensorrt_yolo/lib/include/cuda_utils.hpp
@@ -57,10 +57,10 @@ namespace cuda
void check_error(const ::cudaError_t e, const char * f, int n)
{
if (e != ::cudaSuccess) {
- std::stringstream s;
+ ::std::stringstream s;
s << ::cudaGetErrorName(e) << " (" << e << ")@" << f << "#L" << n << ": "
<< ::cudaGetErrorString(e);
- throw std::runtime_error{s.str()};
+ throw ::std::runtime_error{s.str()};
}
}
@@ -69,13 +69,13 @@ struct deleter
void operator()(void * p) const { CHECK_CUDA_ERROR(::cudaFree(p)); }
};
template
-using unique_ptr = std::unique_ptr;
+using unique_ptr = ::std::unique_ptr;
template
-typename std::enable_if::value, cuda::unique_ptr>::type make_unique(
- const std::size_t n)
+typename ::std::enable_if<::std::is_array::value, cuda::unique_ptr>::type make_unique(
+ const ::std::size_t n)
{
- using U = typename std::remove_extent::type;
+ using U = typename ::std::remove_extent::type;
U * p;
CHECK_CUDA_ERROR(::cudaMalloc(reinterpret_cast(&p), sizeof(U) * n));
return cuda::unique_ptr{p};
@@ -107,7 +107,7 @@ inline T * get_next_ptr(size_t num_elem, void *& workspace, size_t & workspace_s
{
size_t size = get_size_aligned(num_elem);
if (size > workspace_size) {
- throw std::runtime_error("Workspace is too small!");
+ throw ::std::runtime_error("Workspace is too small!");
}
workspace_size -= size;
T * ptr = reinterpret_cast(workspace);