I have a very simple piece of code:
#ifndef BASE_H
#define BASE_H
#include <cmath>
class Base
{
public:
Base():x(0.0),y(0.0){}
Base(double _x, double _y):x(_x),y(_y){}
/*virtual */double GetR(){return std::sqrt(x*x+y*y);}
public:
double x, y;
};
class Base1 : public Base
{
public:
Base1():Base(),z(0.0){}
Base1(double _x, double _y, double _z):Base(_x,_y),z(_z){}
double GetR(){return std::sqrt(x*x+y*y+z*z);}
private:
double z;
};
#endif//BASE_H
and
#include <cmath>
#include <iostream>
#include "Base.h"
int main(int argc, char ** argv)
{
Base1 b1(1,2,3);
std::cout<<"1: "<<std::endl;
Base * b=&b1;
const int N=1000;
double arr[N]{0.0};
#pragma acc data create(arr) copy(b1,b[0:0])
{
#pragma acc parallel loop gang vector present(arr,b)
for(int i=0; i<N; ++i)
{
arr[i]=b->GetR();
}
std::cout<<"2: "<<std::endl;
#pragma acc update host(arr)
for(int i=0; i<N; ++i) std::cout<<arr[i]<<" ";
std::cout<<std::endl;
}
return 0;
}
I found that C++ inheritance works on GPU (if to write
Base1 * b=&b1;
everything works properly). But if to uncomment
/*virtual*/
and make GetR() a virtual function, the compilation fails.
I use the following compile line:
cmake . -DCMAKE_C_COMPILER=pgcc -DCMAKE_CXX_COMPILER=pgc++ -DCMAKE_C_FLAGS="-acc -Minfo=acc -mcmodel=medium -ta=tesla:cc30 -Minline -Mcuda=cuda10.1" -DCMAKE_CXX_FLAGS="-acc -Minfo=acc -mcmodel=medium -ta=tesla:cc30 -Minline -Mcuda=cuda10.1" -DCMAKE_CXX_STANDARD=17
Can we use virtual functions on GPU or not?
My task is the following.
I need to have an interface class Base_class with virtual function Function() that will be inherited by some subclasses
(Sub1, Sub2, Sub3, Sub4), where this virtual function will be redefined.
I want to create on CPU the array “pointer”:
Base_class * pointer[4];
pointer[0]=new Sub1();
pointer[1]=new Sub2();
pointer[2]=new Sub3();
pointer[3]=new Sub4();
and then copy it to GPU.
Will i then be able to call from the code on GPU:
for(int i=0; i<4; ++i) pointer[i]->Function();
where Function() is a virtual function of base class, redefined in Sub1-4 sublasses?
Will it work on GPU?
If not, are there any ways to do it on GPU without C++ virtual functions or not?