Error: this OpenMP construct is not supported in NVIDIA subset: declare reduction for user defined reductions #pragma omp declare reduction

Hi, I got an error when compiling an opensource code with openmp, nccl, and nvshmem libraries targeting on multi-gpus. Below is the cmakelists.txt:

cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
project(MGG LANGUAGES CXX CUDA)


#############################################################################

find_package(MPI REQUIRED)
if(MPI_FOUND)
    message(STATUS "MPI found")
    SET(CMAKE_CXX_COMPILER mpicxx)   
endif()

# set($ENV{PATH} "/home/linzhiheng/thirdparty/openmpi/bin/:$ENV{PATH}")

#set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda-12/")
#set(CUDA_TOOLKIT_ROOT_DIR "/home/linzhiheng/cuda-12.2/")
set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda-12.1/")

# set(MPI_HOME "/home/linzhiheng/thirdparty/openmpi/")
# set(MPI_HOME "/data-ssd/home/zhenlin/nvidia/hpc_sdk/")

set(CUDA_HOME "/usr/local/cuda-12.1/")
# set(CUDA_HOME "/home/linzhiheng/cuda-12.2/")
# set(CUDNN_HOME "/home/linzhiheng/thirdparty/cudnn-linux-x86_64-8.9.4.25_cuda12-archive/")

# set(NVSHMEM_HOME "/home/linzhiheng/thirdparty/nvshmem_src_2.9.0-2/build/")
# set(NVSHMEM_HOME "/data-ssd/home/zhenlin/nvidia/hpc_sdk/")
set(NVSHMEM_HOME "/data-ssd/home/zhenlin/nvidia/hpc_sdk/Linux_x86_64/23.5/comm_libs/nvshmem/")
# set(NVSHMEM_HOME "/opt/nvidia/hpc_sdk/")

# set(NCCL_HOME "/home/linzhiheng/nccl/build/")
set(NCCL_HOME "/data-ssd/home/zhenlin/workspace/graphmining/nccl/build/")
# set(NCCL_HOME "/opt/nccl/build/")



include_directories(
    ${NVSHMEM_HOME}/include
    SYSTEM ${MPI_C_INCLUDE_PATH}
    SYSTEM ${MPI_INCLUDE_PATH}
    ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}
)


#############################################################################
set(CUDA_ARCHS 80)
set(SM_ARCH "sm_86")
set($ENV{NVCC_GENCODE} ${SM_ARCH})
set($ENV{NVSHMEM_USE_GDRCOPY} "0")

set(CMAKE_CUDA_ARCHITECTURES 80)
#############################################################################
#link_directories(/usr/lib/x86_64-linux-gnu)
link_directories(${NVSHMEM_HOME}/lib)
# link_directories(${NVSHMEM_HOME}/Linux_x86_64/23.5/comm_libs/nvshmem/lib)

# link_directories(${CUDNN_HOME}/lib64)
# link_directories(${MPI_HOME}/lib)
# link_directories(${MPI_HOME}/Linux_x86_64/23.5/comm_libs/mpi/lib)

link_directories(/usr/local/lib)
link_directories(${NCCL_HOME}/lib)
link_directories(${CUDA_HOME}/lib64)

add_definitions(-w)

find_package(OpenMP)
if (OPENMP_FOUND)
    message(STATUS "OpenMP found")
    set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
    set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
    set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
endif()

find_package(MPI REQUIRED)
if(MPI_FOUND)
    SET(CMAKE_CXX_COMPILER mpicxx)   
endif()

find_package(Threads REQUIRED)

#############################################################################
add_executable(jupiter_gpm
    common/graph_partition.cc
    common/graph.cc
    common/scheduler.cc
    common/VertexSet.cc
    include/common.h
    include/graph.h
    include/graph_gpu.h
    include/graph_partition.h
    src/main.cc
    src/jupiter_gpm.cu
)

target_include_directories(jupiter_gpm 
    PRIVATE 
    ${NVSHMEM_HOME}/include 
    ${CUDA_HOME}/include 
    # ${MPI_HOME}/include 
    SYSTEM ${MPI_C_INCLUDE_PATH} 
    # ${CUDNN_HOME}/include
    include
    ${CUCO_HOME}/include
)

target_compile_options(jupiter_gpm PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:
                        -fopenmp
                        # -std=c++11
                        -std=c++17
                        -Xcompiler 
                        -pthread 
                        -rdc=true 
                        -ccbin g++ 
                        -arch ${SM_ARCH}
                        --expt-relaxed-constexpr
                        --expt-extended-lambda
                       >)

set_target_properties(jupiter_gpm PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
set_target_properties(jupiter_gpm PROPERTIES CUDA_ARCHITECTURES "80")

target_link_libraries(jupiter_gpm
    nvshmem 
    cuda
    #mpi_cxx 
    mpi 
    cublas 
    # cudnn 
    gomp 
    curand
    OpenMP::OpenMP_CXX 
    ${MPI_C_LIBRARIES}
)
target_link_libraries(jupiter_gpm nvidia-ml)

My nvcc is 12.1. How could I fix this problem?

This forum is specifically for nvcc. nvcc knows nothing about OpenMP.

I believe the error is coming via compilation with the HPC SDK, and I think your question belongs on that forum.