Render Topology with Depth Test and mPool VBO

I am trying to render the topology with depth test because right now in the example all topology lines are on top of the original texture.

My idea is to copy numLevel of mPool[0] to numLevel of OpenGL VBOs and use geometry shader to draw boxes on FBO with depth texture, then compose it with gvdb texture like in the example glDepthMap. So there should be only numLevel draw calls. Is there any drawback of this method? GPU memory, or speed wise?

I think I succeed. But for small amount of nodes so far. My method is slower than g3Print, where the program loop through nodes pool on cpu and draw each boxes by directly drawing a line on the screen. But I assume my method will achieve better performance when there is a lot of nodes to draw. Also, the whole point of this new method is to hide node boxes that is behind the model.

This is a really neat change, if your changes to the g3dPrint example are small would you mind sharing them? If it was a very involved process don’t stress about it. It looks good though!

I did it in a separate project. But there are some code snippet you can have

void Simulation::RenderTopology()
	Matrix4F vp = gvdb_.getScene()->getCamera()->getProjMatrix();
	vp *= gvdb_.getScene()->getCamera()->getViewMatrix().data;
	glUniform1f(0, voxel_size_);
	glUniformMatrix4fv(1, 1, GL_FALSE,;

	for (uchar lvl = draw_from_level_; lvl < MAXLEV; lvl++)

		uint64 node_count = gvdb_.mPool->getPoolTotalCnt(0, lvl);

		// Skip unused higher levels
		if (node_count <= 0) break;
		int buffer_idx = lvl - draw_from_level_;
		// Get GPU DataPtr for only index position of nodes on this level
		void *pool_ptr = (void *)gvdb_.mPool->getPoolGPU(0, lvl);
		// Create OpenGL VAO and VBO for node,
		size_t pool_vbo_size = node_count * sizeof(Node);
		// Decode node pool vbo with vao
		glBindBuffer(GL_ARRAY_BUFFER, gl_gvdb_node_vbo_[lvl]);
		glBufferData(GL_ARRAY_BUFFER, pool_vbo_size, 0, GL_DYNAMIC_DRAW);

		struct ALIGN(16) GVDB_API Node {
		public:							//						Size:	Range:
		uchar		mLev;			// Tree Level			1 byte	Max = 0 to 255
		uchar		mFlags;			// Flags				1 byte	true - used, false - discard
		uchar		mPriority;		// Priority				1 byte
		uchar		pad;			//						1 byte
		Vector3DI	mPos;			// Pos in Index-space	12 byte <--------------Here!!!
		Vector3DI	mValue;			// Value in Atlas		12 byte
		Vector3DF	mVRange;		// Value min, max, ave	12 byte
		uint64		mParent;		// Parent ID			8 byte	Pool0 reference
		uint64		mChildList;		// Child List			8 byte	Pool1 reference									?#ifdef USE_BITMASKS
		uint64		mMask;			// Start of BITMASK.	8 byte
		// HEADER TOTAL			64 bytes
		glVertexAttribPointer(0, 3, GL_INT, GL_FALSE, sizeof(Node), (GLvoid *)(4 * sizeof(uchar)));

		// Copy node pool to vbo
		cudaGraphicsResource *pool_vbo_cuda_res;
		void *pool_vbo_cuda_res_mapped_ptr;
		checkCudaErrors(cudaGraphicsGLRegisterBuffer(&pool_vbo_cuda_res, gl_gvdb_node_vbo_[lvl], cudaGraphicsRegisterFlagsWriteDiscard));
		checkCudaErrors(cudaGraphicsMapResources(1, &pool_vbo_cuda_res, 0));
		checkCudaErrors(cudaGraphicsResourceGetMappedPointer(&pool_vbo_cuda_res_mapped_ptr, &pool_vbo_size, pool_vbo_cuda_res));
		checkCudaErrors(cudaMemcpy(pool_vbo_cuda_res_mapped_ptr, pool_ptr, pool_vbo_size, cudaMemcpyDeviceToDevice));
		checkCudaErrors(cudaGraphicsUnmapResources(1, &pool_vbo_cuda_res, 0));

		// Assign uniform
		float cover = gvdb_.getCover(lvl).x;
		glUniform1f(2, cover);
		uchar color_lvl = lvl + 1;
		glUniform3f(3, (float)(color_lvl >> 2 & 1), (float)(color_lvl >> 1 & 1), (float)(color_lvl & 1));
		glDrawArrays(GL_POINTS, 0, node_count);
		glBindBuffer(GL_ARRAY_BUFFER, 0);



Thank you!