How to get frame from VideoBuffer in Deepstream Service Maker c++

• Hardware Platform (Jetson / GPU) : Jetson AGX Orin
• DeepStream Version : 7.0
• JetPack Version (valid for Jetson only): 6.0
• TensorRT Version : with deep stream 7.0 docker image (8.6.2.3-1+cuda12.2)
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
Can you give an example of getting image from VideoBuffer?
Get ideas from here: link
Here is my implemented code but not working.

        VideoBuffer video_buffer = buffer;
        auto width = video_buffer.width();
        auto height = video_buffer.height();
        auto format = video_buffer.format();
        std::cout << "Video Buffer: " << " Width = " << width
                  << " Height = " << height << " Format = " << format
                  << std::endl;
                  
        video_buffer.read([&](const void* data, size_t size) -> size_t {
            const unsigned char* p_byte = (const unsigned char*) data;
            for (auto p = p_byte; p < p_byte + size; p++) {
                // take a peek on the data
            }
            cv::Mat yuv_image(height + height / 2, width, CV_8UC1, (void*)p_byte);
            cv::Mat rgb_image;
            cv::cvtColor(yuv_image, rgb_image, cv::COLOR_YUV2RGB_I420);
            cv::imwrite("output_image.jpg", rgb_image);
            return size;
        });

Please let me know if you have any hints on how to solve this issue.
Thanks in advance

which sample are you testing? how did you use video_buffer.read?

All tests in /opt/nvidia/deepstream/deepstream-7.0/service-maker/sources/apps do not have any examples related to VideoBuffer.
I only found a small example at: Service Maker for C/C++ Developers — DeepStream documentation
Base on link. I’m pretty sure my code doesn’t work because I don’t know how to handle p_byte
Here is my full C++ code

#include <gst/gstbuffer.h>
#include <spdlog/spdlog.h>

#include <cstdlib>
#include <iostream>
#include <opencv2/core/mat.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>
#include <string>

#include "pipeline.hpp"

using namespace deepstream;
#define PGIE_CLASS_ID_HEAD 1
#define PGIE_CLASS_ID_PERSON 0

class SampleBufferObserver : public BufferProbe::IBufferOperator {
   public:
    virtual probeReturn handleBuffer(BufferProbe &probe, Buffer &buffer) {
        VideoBuffer video_buffer = buffer;
        auto width = video_buffer.width();
        auto height = video_buffer.height();
        auto format = video_buffer.format();
        // std::cout << "Video Buffer: " << " Width = " << width
        //           << " Height = " << height << " Format = " << format
        //           << std::endl;
                  
        video_buffer.read([&](const void* data, size_t size) -> size_t {
            const unsigned char* p_byte = (const unsigned char*) data;
            for (auto p = p_byte; p < p_byte + size; p++) {
                // take a peek on the data
                
            }
            cv::Mat yuv_image(height + height / 2, width, CV_8UC1, (void*)p_byte);
            cv::Mat rgb_image;
            cv::cvtColor(yuv_image, rgb_image, cv::COLOR_YUV2RGB_I420);
            cv::imwrite("output_image.jpg", rgb_image);
            return size;
        });

        BatchMetadata batch_meta = video_buffer.getBatchMetadata();
        batch_meta.iterate([](const FrameMetadata &frame_data) {
            auto head_count = 0;
            auto person_count = 0;
            auto batch_id = frame_data.padIndex();
            auto frame_num = frame_data.frameNum();
            frame_data.iterate([&](const ObjectMetadata &object_data) {
                auto class_id = object_data.classId();
                if (class_id == PGIE_CLASS_ID_HEAD) {
                    head_count++;
                } else if (class_id == PGIE_CLASS_ID_PERSON) {
                    person_count++;
                }

                // auto local_id = object_data.objectId();
                // auto conf = object_data.confidence();
                // spdlog::info("Local ID : {}", local_id);
                // spdlog::info("Conf : {}", conf);
            });

            // std::cout << "Object Counter: " << " Pad Idx = "
            //           << frame_data.padIndex()
            //           << " Frame Number = " << frame_data.frameNum()
            //           << " Head Count = " << head_count
            //           << " Person Count = " << person_count << std::endl;
        });
        return probeReturn::Probe_Ok;
    }
};


int main(int argc, char *argv[]) {
    // export
    // NVDS_MODULE_PATH="/opt/nvidia/deepstream/deepstream/service-maker/sources/modules"
    if (argc < 2) {
        std::cout << "Usage: " << argv[0] << " <YAML config file>" << std::endl;
        return 0;
    }
    std::string file = argv[1];
    std::string suffix = "yml";
    spdlog::info(" Pipeline running on File: {}", file);
    if (std::equal(suffix.rbegin(), suffix.rend(), file.rbegin())) {
        try {
            Pipeline pipeline("Test pipeline", file);
            pipeline
                .attach("capsfilter",
                        new BufferProbe("counter", new SampleBufferObserver))
                .attach("tracker", "measure_fps_probe", "measure_fps", "src")
                .start()
                .wait();
        } catch (const std::exception &e) {
            std::cerr << e.what() << std::endl;
            return -1;
        }
    }

    return 0;
};

Currently there is no ready-made code. please refer to C++ version sample opt\nvidia\deepstream\deepstream\sources\apps\sample_apps\deepstream-image-meta-test\deepstream_image_meta_test.c. nvds_obj_enc_process is used to save frame.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.