How to use opengl3.3 with CUDA using OpenGL Interoperability

I want to render a mesh whose vertices and surfaces are populated and constantly updated inside the GPU.

For simple rendering using opengl3.3 alone, I have brought out the data from the GPU to CPU and did the following steps:

//Initialisation and opengl version setup
bool initOpenGL() {
// init glfw and glew
// setup callback functions for handling keyboard and mouse
// create window and setup viewport

if(all_ok)
return true;
else
return false;

}
Here is the main():

int main(){
if (!initOpenGL())
{
// An error occured
std::cerr << “initialization failed” << std::endl;
return -1;
}

// load and compile shader logic

ShaderProgram myShader;
myShader.loadShaders("shaders/shader_name.vert", "shaders/shader_name.frag");

//Load Mesh
Mesh myMesh;

/*
For simple demo here, I am loading wavefront file but in the problem the vertices and the surfaces changes continuously, that means the vbos and vaos would be changing in the game loop .
*/

myMesh.loadOBJ("models/some_mesh.obj");

//load texture for texturing the wavefront object (again for demonstration purpose)

Texture2D myTexture;

texture.loadTexture("textures/some_image.png", true);

/* Setup light pos and other stuff like model  world position */
[..]

while (!glfwWindowShouldClose(gWindow)){
    //Setup model, view, projection matrices
    [..]

    //setup uniforms in shaders like:
    myShader.use();
    myShader.setUniform("model", glm::mat4(1.0)); 
    myShader.setUniform("view", view);
    myShader.setUniform("projection", projection);
    myShader.setUniform("viewPos", viewPos);
    [...]

    // DRAW mesh
    myTexture.bind(0); 
    myMesh.draw(); // Renders the OBJ mesh
    myTexture.unbind(0);
}

}
The loadOBJ() which is used to populate an array containing the surface info of the mesh by parsing the wavefront .obj file:

bool Mesh::loadOBJ(const std::string &filename){
//parse obj file

// iterate over each face to construct one flat array *mVertices* containing the position, normal and texCoord info like:
mVertices = [[vec3(x,y,z), vec3(nx,ny,nx), vec2(tx,ty)], [...], [...], ...]

/* mVertices is an array of 'Vertex'. where :

    struct Vertex{
    glm::vec3 position;
    glm::vec3 normal;
    glm::vec2 texCoords;
    };

*/  
//initialize VBOs and VAOs
glGenVertexArrays(1, &mVAO);
glGenBuffers(1, &mVBO);

glBindVertexArray(mVAO);
glBindBuffer(GL_ARRAY_BUFFER, mVBO);
glBufferData(GL_ARRAY_BUFFER, mVertices.size() * sizeof(Vertex), &mVertices[0], GL_STATIC_DRAW);

// Vertex Positions
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid *)0);
glEnableVertexAttribArray(0);

// Normals attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid *)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);

// Vertex Texture Coords
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid *)(6 * sizeof(GLfloat)));
glEnableVertexAttribArray(2);

// unbind to make sure other code does not change it somewhere else
glBindVertexArray(0);

}
Here is the draw() which is called in each game loop

glBindVertexArray(mVAO);
glDrawArrays(GL_TRIANGLES, 0, mVertices.size());
glBindVertexArray(0);
For loading the texture :

bool Texture2D::loadTexture(const string &fileName, bool generateMipMaps)
{
int width, height, components;

// Use stbi image library to load our image
unsigned char *imageData = stbi_load(fileName.c_str(), &width, &height, &components, STBI_rgb_alpha);

if (imageData == NULL)
{
    std::cerr << "Error loading texture '" << fileName << "'" << std::endl;
    return false;
}

// Invert image
int widthInBytes = width * 4;
unsigned char *top = NULL;
unsigned char *bottom = NULL;
unsigned char temp = 0;
int halfHeight = height / 2;
for (int row = 0; row < halfHeight; row++)
{
    top = imageData + row * widthInBytes;
    bottom = imageData + (height - row - 1) * widthInBytes;
    for (int col = 0; col < widthInBytes; col++)
    {
        temp = *top;
        *top = *bottom;
        *bottom = temp;
        top++;
        bottom++;
    }
}

glGenTextures(1, &mTexture);
glBindTexture(GL_TEXTURE_2D, mTexture); 


glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);

if (generateMipMaps)
    glGenerateMipmap(GL_TEXTURE_2D);

stbi_image_free(imageData);
glBindTexture(GL_TEXTURE_2D, 0); // unbind texture when done so we don't accidentally mess up our mTexture

return true;

}
and for binding texture I use

glActiveTexture(GL_TEXTURE0 + texUnit);
glBindTexture(GL_TEXTURE_2D, mTexture);
I have demonstrated the question using parsing of a wavefront file , but since the mesh is constantly changing, I populate the mVertices in each game loop with correct coordinates and normals. But the approach is wrong since I am bringing GPUs data back to CPU and then rendering.

How to use OpenGL Interoperability in this case.