Issue colcon build isaac_ros-dev package gxf_isaac_ros_unet: nvcc fatal : Unsupported gpu architecture 'compute_89'

My setup:

Issac Sim v4.5.0

ROS2 Humble

Ubuntu Version 22.04 Jazzy

NVIDIA NVRM Version Kernel Module 570.86.10

GCC version 11.4.0 (Ubuntu 11.4.0 ~22.04)

Cuda Compilation Tools, Release 12.8, V12.8.61

Now I verified cuda installation using the samples and also I was able to install cuDNN and TensorRT.

But when I build the packages colcon build, I keep getting this error for compute 89. I use two RTX 4090 for my GPUs btw.

Was is the issue?

--- stderr: gxf_isaac_ros_unet
CMake Error at /usr/local/share/cmake-3.31/Modules/CMakeTestCUDACompiler.cmake:59 (message):
  The CUDA compiler

    "/usr/bin/nvcc"

  is not able to compile a simple test program.

  It fails with the following output:

    Change Dir: '/home/shiven/isaac_ros_dev/build/gxf_isaac_ros_unet/CMakeFiles/CMakeScratch/TryCompile-KdxX5X'
    
    Run Build Command(s): /usr/local/bin/cmake -E env VERBOSE=1 /usr/bin/gmake -f Makefile cmTC_efaee/fast
    /usr/bin/gmake  -f CMakeFiles/cmTC_efaee.dir/build.make CMakeFiles/cmTC_efaee.dir/build
    gmake[1]: Entering directory '/home/shiven/isaac_ros_dev/build/gxf_isaac_ros_unet/CMakeFiles/CMakeScratch/TryCompile-KdxX5X'
    Building CUDA object CMakeFiles/cmTC_efaee.dir/main.cu.o
    /usr/bin/nvcc -forward-unknown-to-host-compiler   -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86  "--generate-code=arch=compute_89,code=[compute_89,sm_89]" "--generate-code=arch=compute_86,code=[compute_86,sm_86]" "--generate-code=arch=compute_80,code=[compute_80,sm_80]" "--generate-code=arch=compute_75,code=[compute_75,sm_75]" "--generate-code=arch=compute_70,code=[compute_70,sm_70]" -MD -MT CMakeFiles/cmTC_efaee.dir/main.cu.o -MF CMakeFiles/cmTC_efaee.dir/main.cu.o.d -x cu -c /home/shiven/isaac_ros_dev/build/gxf_isaac_ros_unet/CMakeFiles/CMakeScratch/TryCompile-KdxX5X/main.cu -o CMakeFiles/cmTC_efaee.dir/main.cu.o
    nvcc fatal   : Unsupported gpu architecture 'compute_89'
    gmake[1]: *** [CMakeFiles/cmTC_efaee.dir/build.make:82: CMakeFiles/cmTC_efaee.dir/main.cu.o] Error 1
    gmake[1]: Leaving directory '/home/shiven/isaac_ros_dev/build/gxf_isaac_ros_unet/CMakeFiles/CMakeScratch/TryCompile-KdxX5X'
    gmake: *** [Makefile:133: cmTC_efaee/fast] Error 2
    
    

  

  CMake will not be able to correctly generate this project.
Call Stack (most recent call first):
  CMakeLists.txt:35 (enable_language)

This is my CMake file:

# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0

cmake_minimum_required(VERSION 3.22.1)
project(gxf_isaac_ros_unet LANGUAGES C CXX)

if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
  add_compile_options(-fPIC -w)
endif()

find_package(ament_cmake_auto REQUIRED)
ament_auto_find_build_dependencies()

# Dependencies
find_package(CUDAToolkit)
find_package(yaml-cpp)

set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode arch=compute_80,code=sm_80")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode arch=compute_86,code=sm_86")

enable_language(CUDA)

# Create extension
ament_auto_add_library(${PROJECT_NAME} SHARED
  gxf/image_segmentation/segmentation_mask_colorizer.cpp
  gxf/image_segmentation/segmentation_mask_colorizer.hpp
  gxf/image_segmentation/segmentation_postprocessing_utils.hpp
  gxf/image_segmentation/segmentation_postprocessor_ext.cpp
  gxf/image_segmentation/segmentation_postprocessor.cpp
  gxf/image_segmentation/segmentation_postprocessor.hpp
  gxf/image_segmentation/segmentation_postprocessor.cu.cpp
  gxf/image_segmentation/segmentation_postprocessor.cu.hpp
  gxf/image_segmentation/segmentation_mask_colorizer.cu.cpp
  gxf/image_segmentation/segmentation_mask_colorizer.cu.hpp
)

# Mark as CUDA files with non-standard extensions
set_source_files_properties(
  gxf/image_segmentation/segmentation_postprocessor.cu.cpp
  gxf/image_segmentation/segmentation_postprocessor.cu.hpp
  gxf/image_segmentation/segmentation_mask_colorizer.cu.cpp
  gxf/image_segmentation/segmentation_mask_colorizer.cu.hpp
  PROPERTIES LANGUAGE CUDA
)

target_link_libraries(${PROJECT_NAME}
  CUDA::cudart
  yaml-cpp
)

target_include_directories(${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/gxf")

set_target_properties(${PROJECT_NAME} PROPERTIES
  BUILD_WITH_INSTALL_RPATH TRUE
  BUILD_RPATH_USE_ORIGIN TRUE
  INSTALL_RPATH_USE_LINK_PATH TRUE
)

# Install the binary file
install(TARGETS ${PROJECT_NAME} DESTINATION share/${PROJECT_NAME}/gxf/lib)


# Embed versioning information into installed files
ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
generate_version_info(${PROJECT_NAME})

ament_auto_package(INSTALL_TO_SHARE)
type or paste code here

Hi @shivengarg8426

Thank you for your post. To better understand, are you working inside the Isaac ROS Docker container?

Best,
Raffaello

1 Like

Yes I am working inside Issac ros.docker container

any update? yes I am working with Isaac ROS Docker container.

Hi @shivengarg8426

Are you running this demo compiling from source? isaac_ros_unet — isaac_ros_docs documentation

I still figure out where your error is coming from.

Best,
Raffaello

Hi @shivengarg8426

What’s the version of Isaac ROS you are using? If you are using docker container launched by Isaac ROS 3.2, the installed nvcc shall be 12.6 and located at /usr/local/cuda/bin/nvcc.
Besides, could you check the version of /usr/bin/nvcc ?

Best,
Ahung

I used nvcc 12.8 but I know switched to nvcc 12.6. I will check and let you know if it fixed the issue