Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) GPU
• DeepStream Version 6.1
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only) 510.47.03
• Issue Type( questions, new requirements, bugs) questions
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
I’m trying to develop a gstreamer pipeline with deepstream plugin and since the input is mp4 file, so I want enable infinite loop. The pipeline is like following:
gst-launch-1.0 uridecodebin uri="file:///home/user/test.mp4" ! nvstreammux ! tee name=tee0 ! queue ! tee name=t1 ! queue ! nveglglessink tee1. ! queue ! nvvideoconvert ! video/x-raw(memory:NVMM),format=NV12 ! nvv4l2h264enc bitrate=4000000 iframeinterval=30 ! flvmux ! rtmpsink location=rtmp://127.0.0.1:1935/live/test tee0. ! queue ! nvvideoconvert ! video/x-raw(memory:NVMM),format=RGBA ! appsink
I implement the file loop trick from /opt/nvidia/deepstream/deepstream-6.1/sources/apps/apps-common/src/deepstream_source_bin.c
, like following:
static gboolean cb_seek_decoded_file(gpointer user_data)
{
VideoPipeline* vp = static_cast<VideoPipeline*>(user_data);
LOG_INFO("============================================");
LOG_INFO("cb_seek_decoded_file called({})", pipeline_id);
LOG_INFO("============================================");
gst_element_set_state(vp->m_pipeline, GST_STATE_PAUSED);
if (!gst_element_seek(vp->m_pipeline, 1.0, GST_FORMAT_TIME,
(GstSeekFlags)(GST_SEEK_FLAG_KEY_UNIT | GST_SEEK_FLAG_FLUSH),
GST_SEEK_TYPE_SET, 0, GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE)) {
LOG_WARN("Failed to seed the source file in pipeline");
}
gst_element_set_state(vp->m_pipeline, GST_STATE_PLAYING);
return false;
}
static GstPadProbeReturn cb_reset_stream_probe(
GstPad* pad,
GstPadProbeInfo* info,
gpointer user_data)
{
VideoPipeline* vp = static_cast<VideoPipeline*>(user_data);
GstEvent* event = GST_EVENT(info->data);
if (info->type & GST_PAD_PROBE_TYPE_BUFFER) {
GST_BUFFER_PTS(GST_BUFFER(info->data)) += vp->m_prev_accumulated_base;
}
if (info->type & GST_PAD_PROBE_TYPE_EVENT_BOTH) {
if (GST_EVENT_TYPE(event) == GST_EVENT_EOS) {
g_timeout_add(1, cb_seek_decoded_file, vp);
}
if (GST_EVENT_TYPE(event) == GST_EVENT_SEGMENT) {
GstSegment *segment;
gst_event_parse_segment(event, (const GstSegment **) &segment);
segment->base = vp->m_accumulated_base;
vp->m_prev_accumulated_base = vp->m_accumulated_base;
vp->m_accumulated_base += segment->stop;
}
switch(GST_EVENT_TYPE(event)) {
case GST_EVENT_EOS:
case GST_EVENT_QOS:
case GST_EVENT_SEGMENT:
case GST_EVENT_FLUSH_START:
case GST_EVENT_FLUSH_STOP:
return GST_PAD_PROBE_DROP;
default:
break;
}
}
return GST_PAD_PROBE_OK;
}
static void cb_decodebin_child_added(GstChildProxy* child_proxy, GObject* object,
gchar* name, gpointer user_data)
{
VideoPipeline* vp = static_cast<VideoPipeline*>(user_data);
LOG_INFO("cb_decodebin_child_added called({},'{}' added)", pipeline_id, name);
if (g_strrstr(name, "nvv4l2decoder") == name) {
g_object_set(object, "cudadec-memtype", 2, nullptr);
if (g_strstr_len(vp->m_config.src_uri.c_str(), -1, "file:/") ==
vp->m_config.src_uri.c_str() && vp->m_config.file_loop) {
GstPad* gst_pad = gst_element_get_static_pad(GST_ELEMENT(object), "sink");
vp->m_dec_sink_probe = gst_pad_add_probe(gst_pad, (GstPadProbeType)(
GST_PAD_PROBE_TYPE_EVENT_BOTH | GST_PAD_PROBE_TYPE_EVENT_FLUSH |
GST_PAD_PROBE_TYPE_BUFFER), cb_reset_stream_probe, static_cast<void*>(vp), nullptr);
gst_object_unref(gst_pad);
vp->m_decoder = GST_ELEMENT(object);
gst_object_ref(object);
} else if (g_strstr_len(vp->m_config.src_uri.c_str(), -1, "rtsp:/") ==
vp->m_config.src_uri.c_str()) {
vp->m_decoder = GST_ELEMENT(object);
gst_object_ref(object);
}
} else if ((g_strrstr(name, "h264parse") == name) ||
(g_strrstr(name, "h265parse") == name)) {
LOG_INFO("set config-interval of {} to {}", name, -1);
g_object_set(object, "config-interval", -1, nullptr);
}
done:
return;
}
But if I use nvstreammux in my pipeline, file loop seek event will fail.
And the problem is I also need to access the GstBuffer via the GstSample output by appsink. But the only way I know is that I need to use nvstreammux to hold a NvBufSurface structure so I can map the data pointer. The callback function of appsink ‘new-sample’ signal is like following:
static GstFlowReturn cb_appsink_new_sample(
GstElement* appsink,
gpointer user_data)
{
// LOG_INFO("cb_appsink_new_sample called");
VideoPipeline* vp = static_cast<VideoPipeline*>(user_data);
GstSample* sample = nullptr;
const GstStructure* info = nullptr;
GstBuffer* buffer = nullptr;
GstMapInfo map;
GstCaps* caps = nullptr;
int sample_width = 0;
int sample_height = 0;
const char* sample_format;
NvBufSurface* surface;
NvDsMetaList* l_frame;
NvDsBatchMeta* batch_meta;
if (!vp->m_dumped) {
GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(vp->m_pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "video-pipeline");
vp->m_dumped = true;
}
g_signal_emit_by_name(appsink, "pull-sample", &sample);
if (!sample) {
return GST_FLOW_OK;
}
// if (vp->m_putFrameFunc) {
// vp->m_putFrameFunc(sample, vp->m_putFrameArgs);
// } else {
// gst_sample_unref(sample);
// }
buffer = gst_sample_get_buffer(sample);
if (buffer == nullptr) {
LOG_ERROR("Can't get buffer from sample.");
goto err;
}
gst_buffer_map(buffer, &map, GST_MAP_READ);
caps = gst_sample_get_caps(sample);
if (caps == nullptr) {
LOG_ERROR("Can't get caps from sample.");
goto err;
}
info = gst_caps_get_structure(caps, 0);
if (info == nullptr) {
LOG_ERROR("Can't get info from sample.");
goto err;
}
// appsink algorithm productor queue produce
{
// init a tmpMat with gst buffer address: deep copy
batch_meta = gst_buffer_get_nvds_batch_meta(buffer);
surface = (NvBufSurface *)map.data;
uint32_t frame_width, frame_height, frame_pitch;
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta*)(l_frame->data);
frame_width = surface->surfaceList[frame_meta->batch_id].width;
frame_height = surface->surfaceList[frame_meta->batch_id].height;
frame_pitch = surface->surfaceList[frame_meta->batch_id].pitch;
if (NvBufSurfaceMap(surface, 0, 0, NVBUF_MAP_READ_WRITE)) {
LOG_ERROR("NVMM map failed.");
goto err;
}
cv::Mat tmpMat(frame_height, frame_width, CV_8UC4, (unsigned char*)surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0], frame_pitch);
tmpMat = tmpMat.clone();
// cv::imwrite("appsink.jpg", tmpMat);
vp->m_productQueue->product(std::make_shared<cv::Mat>(tmpMat));
}
NvBufSurfaceUnMap(surface, 0, 0);
}
err:
if (buffer) {
gst_buffer_unmap(buffer, &map);
}
if (sample) {
gst_sample_unref(sample);
}
return GST_FLOW_OK;
}
So anything wrong with my pipeline or codes? Or whether there is another way to access the GstBuffer data output by appsink?
Thanks.