I’m building an application whitch needs to pull video frames out. I use nvcamerasrc as input and appsink as output of pipeline1. Appsrc as input and xvimagesink as output in pipeline2. when I ran, I got a error like allocater xxxxx failed something like this. Someone told me that nvcamerasrc use memory in NVMM, appsink needs data in common memory. In case of that, i thought nvvidconv is the best choice. I use nvvidconv in gst-launch-1.0, everything works well.
gst-launch-1.0 nvcamerasrc ! 'video/x-raw(memory:NVMM), format=I420' ! nvvidconv ! 'video/x-raw, format=I420' ! xvimagesink
I have tested the same pipeline in application. I created two capsfilters as pipeline above, and connect them as below
gst_element_link_many(nvcamerasrc, capsfilter0, nvvidconv, capsfilter1, xvimagesink)
Strange things happened. Xvimagesink appears, and image captured from nvcamerasrc appears. But there is only one frame. I have checked the log: remove memory specific metadata GstVideoMetaAPI and remove metadata GstVideoMetaAPI. It’s really confused, and can’t find any clues on google.
And then, I tried to connect it to appsink, to run the application without that kind of testpipeline, then I got the error: basetransform gstbasetransform.c:1414:gst_base_transform_setcaps: transform could not transform video/x-raw(memory:NVMM), width=(int)320, height=(int)240, format=(string)I420, framerate=(fraction)2/1 in anything we support
Here’s my source code:
#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#define VIDEO_WIDTH 320
#define VIDEO_HEIGHT 240
#define VIDEO_FORMAT "I420"
#define PIXEL_SIZE 8
GstElement *pipeline, *pipeline2, *appsink, *appsrc, *capsfilter0, *capsfilter1, *nvvidconv, *xvimagesink, *nvcamerasrc, *queue, *encoder, *rtph265pay;
gint count = 0;
GMainLoop *loop;
static GstFlowReturn new_sample (GstElement *appsink) {
GstSample *sample;
GstBuffer *app_buffer, *buffer;
GstElement *source;
GstFlowReturn ret;
/* get the sample from appsink */
sample = gst_app_sink_pull_sample ((GstAppSink*)appsink);
if(sample)
{
count = count + 1;
g_print("get%d\n",count);
}
buffer = gst_sample_get_buffer (sample);
/* make a copy */
app_buffer = gst_buffer_make_writable (buffer);
/* get source an push new buffer */
gst_sample_unref (sample);
source = gst_bin_get_by_name (GST_BIN (pipeline2), "appsrc");
ret = gst_app_src_push_buffer ((GstAppSrc*)source, app_buffer);
gst_object_unref (source);
return ret;
}
static GstPadProbeReturn
have_data (GstPad *pad,
GstPadProbeInfo *info,
gpointer user_data)
{
g_print("appsrc get one\n");
return GST_PAD_PROBE_OK;
}
gint main (gint argc, gchar *argv[])
{
GstPad *pad;
/* init GStreamer */
gst_init (NULL, NULL);
loop = g_main_loop_new (NULL, FALSE);
/* setup pipeline */
pipeline = gst_pipeline_new ("pipeline");
pipeline2 = gst_pipeline_new ("pipeline2");
nvcamerasrc = gst_element_factory_make ("nvcamerasrc", "nvcamerasrc");
queue = gst_element_factory_make ("queue", "queue");
nvvidconv = gst_element_factory_make ("nvvidconv", "nvvidconv");
appsink = gst_element_factory_make ("appsink", "appsink");
appsrc = gst_element_factory_make ("appsrc", "appsrc");
capsfilter0 = gst_element_factory_make ("capsfilter", "capsfilter0");
capsfilter1 = gst_element_factory_make ("capsfilter", "capsfilter1");
xvimagesink = gst_element_factory_make ("xvimagesink", "xvimagesink");
/* setup */
GstCaps *caps = gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, VIDEO_FORMAT,
"width", G_TYPE_INT, VIDEO_WIDTH,
"height", G_TYPE_INT, VIDEO_HEIGHT,
"framerate", GST_TYPE_FRACTION, 2, 1,
//"interlace-mode", G_TYPE_STRING, "progressive",
//"pixel-aspect-ratio", GST_TYPE_FRACTION,1,1,
NULL);
GstCaps *capsconverted = gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, VIDEO_FORMAT,
"width", G_TYPE_INT, VIDEO_WIDTH,
"height", G_TYPE_INT, VIDEO_HEIGHT,
"framerate", GST_TYPE_FRACTION, 2, 1,
//"interlace-mode", G_TYPE_STRING, "progressive",
//"pixel-aspect-ratio", GST_TYPE_FRACTION,1,1,
NULL);
GstCapsFeatures *features = gst_caps_features_new("memory:NVMM", NULL);
gst_caps_set_features(caps, 0, features);
g_object_set (capsfilter0, "caps", caps, NULL);
g_object_set (capsfilter1, "caps", capsconverted, NULL);
gst_bin_add_many (GST_BIN (pipeline), nvcamerasrc, queue, nvvidconv, capsfilter0, capsfilter1, appsink, NULL);
gst_bin_add_many (GST_BIN (pipeline2), appsrc, xvimagesink, NULL);
gst_element_link_many (nvcamerasrc, capsfilter0, nvvidconv, capsfilter1, appsink, NULL);
if(!gst_element_link(appsrc, xvimagesink))
{
g_print("bad");
}
/* setup appsink */
g_object_set (appsink, "emit-signals", TRUE, "caps", caps, NULL);
g_object_set (appsrc, "caps", capsconverted, NULL);
g_signal_connect (appsink, "new-sample", G_CALLBACK (new_sample), NULL);
g_object_set (G_OBJECT (appsrc),
"stream-type", 0,
"format", GST_FORMAT_TIME, NULL);
pad = gst_element_get_static_pad (nvcamerasrc, "src");
gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER,
(GstPadProbeCallback) have_data, NULL, NULL);
gst_object_unref (pad);
/* play */
gst_element_set_state (pipeline , GST_STATE_PLAYING);
gst_element_set_state (pipeline2, GST_STATE_PLAYING);
//gst_debug_set_default_threshold(GST_LEVEL_WARNING);
g_main_loop_run (loop);
/* clean up */
gst_element_set_state (pipeline , GST_STATE_NULL);
gst_element_set_state (pipeline2, GST_STATE_NULL);
gst_object_unref (GST_OBJECT (pipeline ));
gst_object_unref (GST_OBJECT (pipeline2));
g_main_loop_unref (loop);
return 0;
}
in code, I use a probe to check to where data stream stops. It seems data stream cannot pass from nvcamerasrc to nvvidconv.
- why I can’t get proper results from application in C code while pipeline works well in command line?
- why I don’t identical error when using diiferent sink like appink and xvimagesink?
- what’s the “remove meta gstvideometaAPI” means? what’t wrong with it? Why still can I get the first one frame?
- What’s the reason that nvvidconv can’t transform frames “in anything we support” in appilication? while it does can transform, or to say it can work properly at least before 3. strange error occured.
The part of appsink_src is ok. I have test it on vmware ubuntu, using v4l2 instead of nvcamerasrc. Without the use of nvvidconv, it’s easy to pass frame stream to appsink.
It’s really urgent. Thanks a lot to everyone has a patience to have a look at this question !