Hi,
We have the same observation and it is the constraint of hardware encoder. 7200x6000 is a very large resolution(height is even larger than standard 8K). The large resolution increases latency.
We observe the behavior by running this test sample:
$ gst-launch-1.0 videotestsrc num-buffers=1 ! video/x-raw,format=NV12,width=7200,height=6000 ! filesink location=/home/nvidia/a.yuv
$ g++ -Wall -std=c++11 a.cpp -o test $(pkg-config --cflags --libs gstreamer-app-1.0)
$ ./test
#include <cstdlib>
#include <gst/gst.h>
#include <gst/gstinfo.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <glib-unix.h>
#include <dlfcn.h>
#include <cstring>
#include <iostream>
#include <sstream>
#include <thread>
using namespace std;
#define USE(x) ((void)(x))
static GstPipeline *gst_pipeline = nullptr;
static string launch_string;
static GstElement *appsrc_;
GstClockTime timestamp = 0;
static int w = 7200;
static int h = 6000;
static void *ptr = nullptr;
static void appsink_eos(GstAppSink * appsink, gpointer user_data)
{
printf("app sink receive eos\n");
}
static GstFlowReturn new_buffer(GstAppSink *appsink, gpointer user_data)
{
GstSample *sample = NULL;
g_signal_emit_by_name (appsink, "pull-sample", &sample,NULL);
if (sample)
{
GstBuffer *buffer = NULL;
GstCaps *caps = NULL;
GstMapInfo map = {0};
caps = gst_sample_get_caps (sample);
if (!caps)
{
printf("could not get snapshot format\n");
}
gst_caps_get_structure (caps, 0);
buffer = gst_sample_get_buffer (sample);
gst_buffer_map (buffer, &map, GST_MAP_READ);
printf("map.size = %lu\n", map.size);
gst_buffer_unmap(buffer, &map);
gst_sample_unref (sample);
}
else
{
g_print ("could not make snapshot\n");
}
return GST_FLOW_OK;
}
static gboolean feed_function(gpointer user_data) {
GstBuffer *buffer;
guint size;
GstFlowReturn ret;
GstMapInfo map = {0};
size = (w*h*3)/2;
buffer = gst_buffer_new_allocate (NULL, size, NULL);
buffer->pts = timestamp;
gst_buffer_map (buffer, &map, GST_MAP_WRITE);
memcpy(map.data, ptr , size);
gst_buffer_unmap(buffer, &map);
g_signal_emit_by_name (appsrc_, "push-buffer", buffer, &ret);
gst_buffer_unref(buffer);
timestamp += 66666666;
printf("fed one buffer \n");
return G_SOURCE_CONTINUE;
}
int main(int argc, char** argv) {
USE(argc);
USE(argv);
gst_init (&argc, &argv);
GMainLoop *main_loop;
main_loop = g_main_loop_new (NULL, FALSE);
ostringstream launch_stream;
GstAppSinkCallbacks callbacks = {appsink_eos, NULL, new_buffer};
launch_stream
<< "appsrc name=mysource ! "
<< "video/x-raw,width="<< w <<",height="<< h <<",framerate=30/1,format=NV12 ! "
<< "nvvidconv ! video/x-raw(memory:NVMM),format=NV12 ! "
<< "nvv4l2h265enc maxperf-enable=1 ! appsink name=mysink ";
launch_string = launch_stream.str();
g_print("Using launch string: %s\n", launch_string.c_str());
GError *error = nullptr;
gst_pipeline = (GstPipeline*) gst_parse_launch(launch_string.c_str(), &error);
if (gst_pipeline == nullptr) {
g_print( "Failed to parse launch: %s\n", error->message);
return -1;
}
if(error) g_error_free(error);
appsrc_ = gst_bin_get_by_name(GST_BIN(gst_pipeline), "mysource");
gst_app_src_set_stream_type(GST_APP_SRC(appsrc_), GST_APP_STREAM_TYPE_STREAM);
guint size;
size = (w*h*3)/2;
FILE *fp = fopen ("/home/nvidia/a.yuv", "rb");
ptr = malloc(size);
fread(ptr, size, 1, fp);
fclose(fp);
GstElement *appsink_ = gst_bin_get_by_name(GST_BIN(gst_pipeline), "mysink");
gst_app_sink_set_callbacks (GST_APP_SINK(appsink_), &callbacks, NULL, NULL);
gst_element_set_state((GstElement*)gst_pipeline, GST_STATE_PLAYING);
for (int i=0; i<15; i++) {
feed_function(nullptr);
usleep(66666);
}
gst_element_set_state((GstElement*)gst_pipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(gst_pipeline));
g_main_loop_unref(main_loop);
free(ptr);
g_print("going to exit \n");
return 0;
}