nvvidconv works bad in application

I’m building an application whitch needs to pull video frames out. I use nvcamerasrc as input and appsink as output of pipeline1. Appsrc as input and xvimagesink as output in pipeline2. when I ran, I got a error like allocater xxxxx failed something like this. Someone told me that nvcamerasrc use memory in NVMM, appsink needs data in common memory. In case of that, i thought nvvidconv is the best choice. I use nvvidconv in gst-launch-1.0, everything works well.

gst-launch-1.0 nvcamerasrc ! 'video/x-raw(memory:NVMM), format=I420' ! nvvidconv ! 'video/x-raw, format=I420' ! xvimagesink

I have tested the same pipeline in application. I created two capsfilters as pipeline above, and connect them as below

gst_element_link_many(nvcamerasrc, capsfilter0, nvvidconv, capsfilter1, xvimagesink)

Strange things happened. Xvimagesink appears, and image captured from nvcamerasrc appears. But there is only one frame. I have checked the log: remove memory specific metadata GstVideoMetaAPI and remove metadata GstVideoMetaAPI. It’s really confused, and can’t find any clues on google.

And then, I tried to connect it to appsink, to run the application without that kind of testpipeline, then I got the error: basetransform gstbasetransform.c:1414:gst_base_transform_setcaps: transform could not transform video/x-raw(memory:NVMM), width=(int)320, height=(int)240, format=(string)I420, framerate=(fraction)2/1 in anything we support

Here’s my source code:

#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>

#define VIDEO_WIDTH   320
#define VIDEO_HEIGHT  240
#define VIDEO_FORMAT  "I420"
#define PIXEL_SIZE    8
 
GstElement *pipeline, *pipeline2, *appsink, *appsrc, *capsfilter0, *capsfilter1, *nvvidconv, *xvimagesink, *nvcamerasrc, *queue, *encoder, *rtph265pay;

gint count = 0;

GMainLoop *loop;

static GstFlowReturn new_sample (GstElement *appsink) {
  GstSample *sample;
  GstBuffer *app_buffer, *buffer;
  GstElement *source;
  GstFlowReturn ret;

  /* get the sample from appsink */
  sample = gst_app_sink_pull_sample ((GstAppSink*)appsink);

  if(sample)
    {
	count = count + 1;
	g_print("get%d\n",count);
    }

  buffer = gst_sample_get_buffer (sample);

  /* make a copy */
  app_buffer = gst_buffer_make_writable (buffer);
  
  /* get source an push new buffer */
  gst_sample_unref (sample);
  source = gst_bin_get_by_name (GST_BIN (pipeline2), "appsrc");
  ret = gst_app_src_push_buffer ((GstAppSrc*)source, app_buffer);
  gst_object_unref (source);

  return ret;

} 

static GstPadProbeReturn
   have_data (GstPad          *pad,
              GstPadProbeInfo *info,
              gpointer         user_data)
{
  
  g_print("appsrc get one\n");

  return GST_PAD_PROBE_OK;
}


gint main (gint argc, gchar *argv[])
{ 
    GstPad *pad;

    /* init GStreamer */
    gst_init (NULL, NULL);
    loop = g_main_loop_new (NULL, FALSE);
 
    /* setup pipeline */
    pipeline    = gst_pipeline_new ("pipeline");
    pipeline2   = gst_pipeline_new ("pipeline2");

    nvcamerasrc = gst_element_factory_make ("nvcamerasrc",  "nvcamerasrc");
    queue       = gst_element_factory_make ("queue",        "queue");
    nvvidconv   = gst_element_factory_make ("nvvidconv",    "nvvidconv");
    appsink     = gst_element_factory_make ("appsink",      "appsink");
    appsrc      = gst_element_factory_make ("appsrc",       "appsrc");
    capsfilter0 = gst_element_factory_make ("capsfilter",  "capsfilter0");
    capsfilter1 = gst_element_factory_make ("capsfilter",  "capsfilter1");
    xvimagesink = gst_element_factory_make ("xvimagesink",  "xvimagesink");
 
    /* setup */

    GstCaps *caps = gst_caps_new_simple ("video/x-raw",
				         "format",    G_TYPE_STRING,     VIDEO_FORMAT,
				         "width",     G_TYPE_INT,        VIDEO_WIDTH,
				         "height",    G_TYPE_INT,        VIDEO_HEIGHT,
				         "framerate", GST_TYPE_FRACTION, 2, 1,
					 //"interlace-mode", G_TYPE_STRING, "progressive",
                                         //"pixel-aspect-ratio", GST_TYPE_FRACTION,1,1,
				         NULL);

    GstCaps *capsconverted = gst_caps_new_simple ("video/x-raw",
				         "format",    G_TYPE_STRING,     VIDEO_FORMAT,
				         "width",     G_TYPE_INT,        VIDEO_WIDTH,
				         "height",    G_TYPE_INT,        VIDEO_HEIGHT,
				         "framerate", GST_TYPE_FRACTION, 2, 1,
					 //"interlace-mode", G_TYPE_STRING, "progressive",
                                         //"pixel-aspect-ratio", GST_TYPE_FRACTION,1,1,
				         NULL);
    GstCapsFeatures *features = gst_caps_features_new("memory:NVMM", NULL);
    gst_caps_set_features(caps, 0, features);

    g_object_set (capsfilter0, "caps", caps, NULL);
    g_object_set (capsfilter1, "caps", capsconverted, NULL);

    gst_bin_add_many (GST_BIN (pipeline),  nvcamerasrc, queue, nvvidconv, capsfilter0, capsfilter1, appsink, NULL);
    gst_bin_add_many (GST_BIN (pipeline2), appsrc, xvimagesink, NULL);

    gst_element_link_many (nvcamerasrc, capsfilter0, nvvidconv, capsfilter1, appsink, NULL);
    if(!gst_element_link(appsrc, xvimagesink))
{
g_print("bad");
}
 
    /* setup appsink */
   
    g_object_set (appsink, "emit-signals", TRUE, "caps", caps, NULL);
    g_object_set (appsrc, "caps", capsconverted, NULL);
    g_signal_connect (appsink, "new-sample", G_CALLBACK (new_sample), NULL);
    g_object_set (G_OBJECT (appsrc),
        "stream-type", 0,
        "format", GST_FORMAT_TIME, NULL); 

    pad = gst_element_get_static_pad (nvcamerasrc, "src");
    gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER,
      (GstPadProbeCallback) have_data, NULL, NULL);
    gst_object_unref (pad);

    /* play */
    gst_element_set_state (pipeline , GST_STATE_PLAYING);
    gst_element_set_state (pipeline2, GST_STATE_PLAYING);

    //gst_debug_set_default_threshold(GST_LEVEL_WARNING);
    g_main_loop_run (loop);
 
    /* clean up */
    gst_element_set_state (pipeline , GST_STATE_NULL);
    gst_element_set_state (pipeline2, GST_STATE_NULL);
    gst_object_unref (GST_OBJECT (pipeline ));
    gst_object_unref (GST_OBJECT (pipeline2));
    g_main_loop_unref (loop);
 
    return 0;
}

in code, I use a probe to check to where data stream stops. It seems data stream cannot pass from nvcamerasrc to nvvidconv.

  1. why I can’t get proper results from application in C code while pipeline works well in command line?
  2. why I don’t identical error when using diiferent sink like appink and xvimagesink?
  3. what’s the “remove meta gstvideometaAPI” means? what’t wrong with it? Why still can I get the first one frame?
  4. What’s the reason that nvvidconv can’t transform frames “in anything we support” in appilication? while it does can transform, or to say it can work properly at least before 3. strange error occured.

The part of appsink_src is ok. I have test it on vmware ubuntu, using v4l2 instead of nvcamerasrc. Without the use of nvvidconv, it’s easy to pass frame stream to appsink.

It’s really urgent. Thanks a lot to everyone has a patience to have a look at this question !

Hi,
For running ‘nvvidconv ! appsink’, you may refer to
https://devtalk.nvidia.com/default/topic/1010111/jetson-tx1/nvmm-memory/post/5158652/#5158652

Also we suggest you use NVIDIA nveglglesink and nvoverlaysink. Please check gstreamer user guide:
https://developer.nvidia.com/embedded/dlc/l4t-accelerated-gstreamer-guide-28-2-ga

Thank you for your reply, but it doesn’t make sense.

  1. I don’t know the difference between that application and mine. The only thing I can figure out is that: He used gst_parse_launch. I used element_factory_make, and link them together with capsfilters defined as same as him. I also apply the callback function. What’s the difference between these two?

  2. nvoverlaysink and nveglglesink are just a window to display the frames, aren’t they? How can they substitute appsink without ability to extract frame data from pipeline?

I’m so grateful for your quick reply, but could you answer to the point I listed in my question? nvvidconv works well in command line but not in application?

For 1: I see some differences, such as appsink_eos call back, but my first question would be : have you tried it ? It is quite simple to transpose your case within this sample. If it fails, post the errors and it may help to figure out what’s going wrong.

For 2: IMHO, main point is that these sinks read frames from NVMM memory, so you wouldn’t have to use nvvidconv.

Hi,
You may adapt the working launch string into your code for a trial:

launch_stream
<< "nvcamerasrc ! "
<< "video/x-raw(memory:NVMM), width="<< w <<", height="<< h <<", framerate=30/1 ! " 
<< "nvvidconv ! "
<< "video/x-raw, format=I420, width="<< w <<", height="<< h <<" ! "
<< "appsink name=mysink ";

Set capsfilter0 to video/x-raw(memory:NVMM),width=1920,height=1080,framerate=30/1
Set capsfilter1 to video/x-raw,format=I420,width=1920,height=1080

Yes, i did it. Thanks for the help !
But I still dont know whats the difference. The only thing I see is eos callback. Can you give more details about the error I’v got and posted. Why coul nvvidconv work in command line and in form of “launch_stream”, but can’t work well in gst_element_factory_make(), gst_bin_add_many(), gst_element_link_many()

Hi,
It actually is not about nvvidconv but gstreamer coding. Not sure but I guess caps has to be configured precisely in calling the APIs. It looks to have certain auto caps selection in launching a pipeline.

You should get more information about difference between coding and launching a pipeline at
http://gstreamer-devel.966125.n4.nabble.com/

It does help when running that sample. But when I add “top left right bottom” after nvvidconv, the isssue came out again.“cannot transform xxxxx in anything we support.”
I have writen a lib for nvvidconv to downsample the frame picture to 1/2. It’s expected to get the stream having size of 320240 after nvivafilter plugin with the input size of 640480. I need to crop the video frames from 640480 to 320240 at the top left corner of pristine images. But failed again. It seems like if we change the stream size, the errror appears. I had a try at changing format from NV12( the only format supported as the sink pad of nvivafilter) to I420, it successed.
I just wonder how to use the features to crop frames in nvvidconv? It really important to crop the video to get the proper view. Thanks a lot for your reply, and wish to got a soon.

Is there anyone can help? The error about nvvidconv.

Hi,
We need to reproduce it and then do further check. Please share the problematic pipeline.

launch_stream
<< "nvcamerasrc ! "
<< "video/x-raw(memory:NVMM), width=640, height=480, framerate=30/1 ! " 
<< "nvvidconv top=0 bottom=240 left=0 right=320 ! "
<< "video/x-raw, format=I420, width=320, height=240 ! "
<< "appsink name=mysink ";

xvimagesink, autovideosink, appsink or whatever, the error occurs that " we cannot transform xxxx to anything we support". And the src of the error is nvvidconv plugin.

Hi,
We don’t hit the issue in running the pipeline:

nvidia@tegra-ubuntu:~$ head -1 /etc/nv_tegra_release
# R28 (release), REVISION: 2.0, GCID: 10567845, BOARD: t210ref, EABI: aarch64, DATE: Fri Mar  2 04:58:16 UTC 2018
nvidia@tegra-ubuntu:~$ export DISPLAY=:0
nvidia@tegra-ubuntu:~$ gst-launch-1.0 nvcamerasrc num-buffers=300 ! 'video/x-raw(memory:NVMM),width=640,height=480,format=I420' ! nvvidconv top=0 bottom=240 left=0 right=320  ! 'video/x-raw,format=I420,width=320,height=240' ! xvimagesink
Setting pipeline to PAUSED ...

Available Sensor modes :
2592 x 1944 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
2592 x 1458 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
1280 x 720 FR=120.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock

NvCameraSrc: Trying To Set Default Camera Resolution. Selected sensorModeIndex = 1 WxH = 2592x1458 FrameRate = 30.000000 ...

Got EOS from element "pipeline0".
Execution ended after 0:00:10.283179309
Setting pipeline to PAUSED ...
Setting pipeline to READY ...
Setting pipeline to NULL ...
Freeing pipeline ...

yes. Thanks for your reply but the situation is different. I have tried it, it works well in command line, but I can’t make nvidconv working in C code application. You have pushed a link to me:https://devtalk.nvidia.com/default/topic/1010111/jetson-tx1/nvmm-memory/post/5158652/#5158652
I change my code to fit the example and nvvidconv did move the video stream from NVMM memory to common memory, appsink works well. But when I use it to crop the video, it fails. It seems like any changes about size will produce the error. Because I have tried videoscale, videoconvert after nvvidconv to crop the size from 640480 to 320240, but it failed again, with the same error before.

Hi,
If gst-launch-1.0 works, it is mostly like an issue in the code. You may try to precisely configure width, height, format, framerate.

yes, I have done that. But I still fails. width, height, format, framerate, even pix-ratio.

You may add:

handle = g_signal_connect( gst_pipeline, "deep-notify", G_CALLBACK(gst_object_default_deep_notify ), NULL );

after pipeline creation for getting caps as in verbose mode and compare to the caps used in working case with gst-launch-1.0 -v.

Thx a lot for your advice. I will then have a try on it !

Hi,
We are able to run

launch_stream
<< "nvcamerasrc ! "
<< "video/x-raw(memory:NVMM), width=640,height=480,framerate=30/1,format=NV12 ! " 
<< "nvvidconv top=0 bottom=240 left=0 right=320 ! "
<< "video/x-raw,format=I420,width=320,height=240 ! "
<< "appsink name=mysink ";
nvidia@tegra-ubuntu:~$ g++ -Wall -std=c++11  test2.cpp -o test $(pkg-config --cflags --libs gstreamer-app-1.0)
nvidia@tegra-ubuntu:~$ ./test
Using launch string: nvcamerasrc ! video/x-raw(memory:NVMM), width=640,height=480,framerate=30/1,format=NV12 ! nvvidconv top=0 bottom=240 left=0 right=320 ! video/x-raw,format=I420,width=320,height=240 ! appsink name=mysink

Available Sensor modes :
2592 x 1944 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
2592 x 1458 FR=30.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10
1280 x 720 FR=120.000000 CF=0x1109208a10 SensorModeType=4 CSIPixelBitDepth=10 DynPixelBitDepth=10

NvCameraSrc: Trying To Set Default Camera Resolution. Selected sensorModeIndex = 1 WxH = 2592x1458 FrameRate = 30.000000 ...

map.size = 115200
map.size = 115200
map.size = 115200
map.size = 115200
map.size = 115200
map.size = 115200

FYR.