I am building a ROS node that utilizes CUDA and PyTorch within the node, and the node is built inside a docker container. I am struggling to get past the line of my CmakeLists “find_package(CUDA REQUIRED)” which is indicating that CUDA is not being installed correctly. The error as well as the current workflow is provided below. As a new user, I can only put one embedded media item, so the code will just be pasted in.
This is the error:
The Dockerfile first runs this install-libraries script which installs CUDA according to this website
install opencv
sudo apt-get update && sudo apt-get install -y cmake g++ wget unzip software-properties-common
sudo add-apt-repository universe
sudo apt-get update
Install CUDA
sudo wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-ubuntu2204.pin
sudo mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
sudo dpkg -i cuda-tegra-repo-ubuntu2204-12-5-local_12.5.1-1_arm64.deb
sudo cp /var/cuda-tegra-repo-ubuntu2204-12-5-local/cuda-*-keyring.gpg /usr/share/keyrings/
sudo apt-get update
sudo apt-get -y install cuda-toolkit-12-5 cuda-compat-12-5
After, the Dockerfile sets the necessary environment variables for CUDA:
Run install libraries script
WORKDIR /root/
COPY ./install-libraries.sh install-libraries.sh
RUN chmod +x ./install-libraries.sh
RUN ./install-libraries.sh
#ENV PATH=/usr/local/bin:$PATH
ENV CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-12.5
ENV CUDA_NVCC_EXECUTABLE=/usr/local/cuda-12.5/bin/nvcc
ENV CUDA_INCLUDE_DIRS=/usr/local/cuda-12.5/include
ENV CUDA_CUDART_LIBRARY=/usr/local/cuda-12.5/lib64/libcudart.so
ENV PATH=/usr/local/cuda-12.5/bin:$PATH
ENV LD_LIBRARY_PATH=/usr/local/cuda-12.5/lib64:$LD_LIBRARY_PATH
ARG CCACHE_DIR
RUN mkdir -p $CCACHE_DIR
Below is the relevant section of my CMakeLists:
cmake_minimum_required(VERSION 3.5)
project(ros2_node)
Default to C++17
set(CMAKE_CXX_STANDARD 17)
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES “Clang”)
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
find_package( OpenCV REQUIRED )
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(rclcpp_action REQUIRED)
find_package(std_msgs REQUIRED)
find_package(action_msgs REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(rosidl_default_generators REQUIRED)
find_package(rosidl_typesupport_cpp REQUIRED)
find_package(std_srvs REQUIRED)
find_package(cv_bridge REQUIRED)
Manually set Torch and CUDA paths
set(CMAKE_CUDA_STANDARD 14)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)
set(CUDA_TOOLKIT_ROOT_DIR /usr/local/cuda-12.5)
set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc)
list(APPEND CMAKE_PREFIX_PATH ${CUDA_TOOLKIT_ROOT_DIR})
set(CUDA_NVCC_EXECUTABLE /usr/local/cuda-12.5/bin/nvcc)
set(CUDA_INCLUDE_DIRS /usr/local/cuda-12.5/include)
set(CUDA_CUDART_LIBRARY /usr/local/cuda-12.5/lib64/libcudart.so)
find_package( CUDA REQUIRED ) #this is line 33
For now, the rest of the install-libraries script regarding cuDNN and PyTorch are not relevant as I cannot even get past correctly integrating CUDA in the CMakeLists. I would like to point out that my current dockerfile and CMakeLists format work for other nodes that do not use CUDA, but instead use opencv and all the other libraries listed besides CUDA and Torch.