parse use of undefined value '%sym_ftn_sqrt_297_p0'

Hi again,

This is my code:

ATTRIBUTES(global) SUBROUTINE ForkCollapse3Dto2D (ipointerStart,&
																									ipointerEnd, &
																									ipointerBlock, &
																									numbasis, &
																									kp0, &
																									kpt, &
																									igall, igall_size, &
																									cx,&
																									cvs1, &
																									csm, csm_row, csm_col)
	USE cudafor
	IMPLICIT NONE
	INTEGER,	INTENT(IN) 	:: 	ipointerStart, ipointerBlock, ipointerEnd
	INTEGER,	INTENT(IN)	::	numbasis, kp0, kpt, igall_size, csm_row, csm_col
	INTEGER,	INTENT(IN)	::	igall(igall_size)
	COMPLEX*16,	INTENT(IN)::	cx(numbasis)
	COMPLEX*16,	INTENT(IN)::	cvs1(2, kp0, numbasis)
	COMPLEX*16,	INTENT(INOUT)	::	csm(csm_row, csm_col)
	!Working variables
	INTEGER	::	ns, ipindexStart, ipindexEnd, ipindexBlock, ipindex, ipointer, i, istat, ipointer
	
	!GPGPU Kernel Declaration
	TYPE(dim3)	::	dimGrid, dimBlock
	
	i = (blockidx%x-1) * blockdim%x + threadidx%x
	ipointer = ipointerStart + i - 1
	if(ipointer .gt. ipointerEnd) return
	
	ns = (ipointer - 1) * kpt
	ipindexStart = igall(ipointer)
	ipindexEnd   = igall(ipointer+1)-1
	ipindexBlock = ipindexEnd - ipindexStart + 1
	
! 	do ipindex = ipindexStart, ipindexEnd
!       csm(1:2, ns+1 : ns+kpt) = csm(1:2, ns+1 : ns+kpt) + cvs1(1:2,1:kpt,ipindex) * cx(ipindex)
! 	enddo

	dimBlock = dim3(128,1,1)
	dimGrid = dim3(ceiling(real(kpt/dimBlock%x)),1,1)
	
	CALL collapse3Dto2D <<<dimGrid, dimBlock>>> (ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)
	
END SUBROUTINE ForkCollapse3Dto2D

While compiling pgfortran, I get this error:

Error: /tmp/pgcudaforOYhb4kn9RGNL.gpu (289, 47): parse use of undefined value ‘%sym_ftn_sqrt_297_p0’
PGF90-F-0155-Compiler failed to translate accelerator region (see -Minfo messages): Device compiler exited with error status code (cacxmp_gpu.f90: 1)

I don’t understand the compiler error but I know that the error comes from :

CALL collapse3Dto2D <<<dimGrid, dimBlock>>> (ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)

The

collapse3Dto2D

is an empty kernel

ATTRIBUTES(device) SUBROUTINE collapse3Dto2D (ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)
	USE cudafor
	
END SUBROUTINE collapse3Dto2D

The empty subroutine is tasked to perform these 3 commented line:

! 	do ipindex = ipindexStart, ipindexEnd
!       csm(1:2, ns+1 : ns+kpt) = csm(1:2, ns+1 : ns+kpt) + cvs1(1:2,1:kpt,ipindex) * cx(ipindex)
! 	enddo

How do I solve this problem?

Regards,
Phoon

Hi Phoon,

This looks like a compiler error but I’m thinking that it might be because of your coding error which may be sending the compiler down a bad path.

For device subroutines, don’t use the chevron syntax. That’s only for global subroutines. In other words, change:

   dimBlock = dim3(128,1,1) 
   dimGrid = dim3(ceiling(real(kpt/dimBlock%x)),1,1) 
    
   CALL collapse3Dto2D <<<dimGrid, dimBlock>>> (ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)

To just:

   CALL collapse3Dto2D(ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)

If the error persists after this change, please post a full reproducible example and I’ll investigate the root cause.

Thanks,
Mat

Hi Mat,

To clarify, I would like to do dynamic parallelism. So by using:

CALL collapse3Dto2D(ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)

Am I still using dynamic parallelism?

Can you kindly provide me an example code to show how dynamic parallelism used without using the triple chervon?

Regards,
Phoon

To clarify, I would like to do dynamic parallelism.

In that case, you’ll want to have collapse3Dto2D have a “global” attribute.

module foo

contains

ATTRIBUTES(global) SUBROUTINE collapse3Dto2D (ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)
   USE cudafor
   integer, value :: ns, kpat, ipindexStart, ipindexEnd, numbasis
   complex(8) ::  cx(:), cvs1(:,:,:), csm(:,:)

END SUBROUTINE collapse3Dto2D

ATTRIBUTES(global) SUBROUTINE ForkCollapse3Dto2D (ipointerStart,&
                                                                           ipointerEnd, &
                                                                           ipointerBlock, &
                                                                           numbasis, &
                                                                           kp0, &
                                                                           kpt, &
                                                                           igall, igall_size, &
                                                                           cx,&
                                                                           cvs1, &
                                                                           csm, csm_row, csm_col)
   USE cudafor
   IMPLICIT NONE
   INTEGER,   INTENT(IN)    ::    ipointerStart, ipointerBlock, ipointerEnd
   INTEGER,   INTENT(IN)   ::   numbasis, kp0, kpt, igall_size, csm_row, csm_col
   INTEGER,   INTENT(IN)   ::   igall(igall_size)
   COMPLEX*16,   INTENT(IN)::   cx(numbasis)
   COMPLEX*16,   INTENT(IN)::   cvs1(2, kp0, numbasis)
   COMPLEX*16,   INTENT(INOUT)   ::   csm(csm_row, csm_col)
   !Working variables
   INTEGER   ::   ns, ipindexStart, ipindexEnd, ipindexBlock, ipindex, ipointer, i, istat

   !GPGPU Kernel Declaration
   TYPE(dim3)   ::   dimGrid, dimBlock

   i = (blockidx%x-1) * blockdim%x + threadidx%x
   ipointer = ipointerStart + i - 1
   if(ipointer .gt. ipointerEnd) return

   ns = (ipointer - 1) * kpt
   ipindexStart = igall(ipointer)
   ipindexEnd   = igall(ipointer+1)-1
   ipindexBlock = ipindexEnd - ipindexStart + 1

!    do ipindex = ipindexStart, ipindexEnd
!       csm(1:2, ns+1 : ns+kpt) = csm(1:2, ns+1 : ns+kpt) + cvs1(1:2,1:kpt,ipindex) * cx(ipindex)
!    enddo

   dimBlock = dim3(128,1,1)
   dimGrid = dim3(ceiling(real(kpt/dimBlock%x)),1,1)

   CALL collapse3Dto2D <<<dimGrid, dimBlock>>> (ns, kpt, ipindexStart, ipindexEnd, numbasis, cx, cvs1, csm)

END SUBROUTINE ForkCollapse3Dto2D

end module foo

Thank you and have a nice day!