Hello!
I have Jetson Nano computer with a camera e-CAM30_CUNANO connected by MIPI.
On the Jetson Nano side I run pipeline:
gst-launch-1.0 v4l2src device=/dev/video0 ! “video/x-raw, format=(string)UYVY, width=(int)640, height=(int)480” ! nvvidconv ! “video/x-raw(memory:NVMM), format=(string)I420” ! omxh264enc qp-range=35,35:35,35:-1,-1 ! mpegtsmux ! udpsink clients=192.168.223.103:5000 sync=false
On client side I wrote this code for receiving video:
JetsonVideoTransmissionV1.h
class JetsonVideoTransmissionV1 : public InputSource {
public:
JetsonVideoTransmissionV1() : InputSource(INPUT_KIND_VIDEO, "JetsonVideoTransmissionV1")
{
m_pipeline = nullptr;
m_src = nullptr;
m_demuxer = nullptr;
m_h264parse = nullptr;
m_avdec_h264 = nullptr;
m_videoconvert = nullptr;
m_video_queue = nullptr;
m_video_sink = nullptr;
}
virtual unsigned getData(void *p, size_t size, time_processing_t& time);
virtual void configure(Config::Section *sec, bool onfly);
virtual void start();
virtual void stop();
virtual ~JetsonVideoTransmissionV1();
GstElement* getVideoQueue(){return m_video_queue;}
unsigned long long getVideo(cv::Mat &frame);
private:
int m_rotate;
unsigned m_show_video_not_point;
unsigned m_black_point_level;
unsigned m_white_point_level;
int m_cam_resolution;
std::string m_ip_address;
int m_port;
unsigned long m_timeout;
long m_max_buffer;
bool m_video_sync;
bool m_video_async;
bool m_video_drop;
GstElement *m_pipeline;
GstStateChangeReturn m_state;
GstElement *m_src;
GstElement *m_demuxer;
GstElement *m_h264parse;
GstElement *m_avdec_h264;
GstElement *m_videoconvert;
GstElement *m_video_queue;
GstElement *m_video_sink;
ACE_Thread_Mutex m_mutex;
//ACE_Condition<ACE_Thread_Mutex> m_condition;
cv::Mat m_frame;
};
JetsonVideoTransmissionV1.cpp
#include "JetsonVideoTransmissionV1.h"
#include "filter.h"
#include <vector>
#include <algorithm>
#include <string>
#include <map>
// list of most possible data source for gstreamer
const std::vector<std::string> SOURCE_LIST{
"filesrc",
"multifilesrc",
"souphttpsrc",
"giosrc",
"rtspsrc",
"fakesrc",
"v4l2src",
"udpsrc"
};
// https://www.e-consystems.com/ar0330-lowlight-cameramodule.asp
enum RESOL_CAM30_CUNANO{CAM30_VGA, CAM30_HD, CAM30_FULLHD, CAM30_3_MP, CAM30_3_4_MP};
const std::map<RESOL_CAM30_CUNANO, std::pair<int, int>> CAM30_CUNANO_RESOLUTION{
{CAM30_VGA, {640, 480}},
{CAM30_HD, {1280, 720}},
{CAM30_FULLHD, {1920, 1080}},
{CAM30_3_MP, {2304, 1296}},
{CAM30_3_4_MP, {2304, 1536}}
};
static void on_pad_added_video(GstElement *element, GstPad *pad, gpointer data)
{
JetsonVideoTransmissionV1 * jetVideo = (JetsonVideoTransmissionV1 *)data;
GstPad *sinkpad;
GstElement *video_queue = jetVideo->getVideoQueue();
LOG_INFO("%s%i: Dynamic pad created, linking tsdemux/video_queue\n", jetVideo->getName(), jetVideo->index());
sinkpad = gst_element_get_static_pad(video_queue, "sink");
gst_pad_link(pad, sinkpad);
gst_object_unref(sinkpad);
}
JetsonVideoTransmissionV1::~JetsonVideoTransmissionV1(){
m_frame.release();
if (m_pipeline != nullptr)
{
m_state = gst_element_set_state(GST_ELEMENT(m_pipeline), GST_STATE_NULL);
gst_object_unref(GST_OBJECT(m_pipeline));
m_pipeline = nullptr;
}
}
static gboolean bus_call(GstBus *bus, GstMessage *message, gpointer data)
{
//const GstStructure *st = gst_message_get_structure(message);
/*const gchar *type_name = GST_MESSAGE_TYPE_NAME(message);
const gchar *src_name = GST_MESSAGE_SRC_NAME(message);
GST_LOG("New %s message from %s: %s", type_name, src_name, st ? gst_structure_get_name(st) : "(null)");*/
switch (GST_MESSAGE_TYPE(message))
{
/*case GST_MESSAGE_INFO:
GError *error;
gst_message_parse_info(message, &error, &parsed_text);
g_print("Info: %s\n", parsed_text);
g_error_free(error);
break;*/
case GST_MESSAGE_ERROR:
{
GError *err;
gchar *parsed_text;
gst_message_parse_error(message, &err, &parsed_text);
g_print("Error: %s\n", err->message);
g_free(parsed_text);
g_error_free(err);
break;
}
case GST_MESSAGE_EOS:
g_print("End of stream\n");
break;
/*case GST_MESSAGE_STREAM_START:
GstStreamStatusType stat_type;
gst_message_parse_stream_status(message, &stat_type, nullptr);
g_print("Starting stream: %i\n", (int)stat_type);
break;
case GST_MESSAGE_ELEMENT:
if (gst_structure_has_name(st, "GstUDPSrcTimeout"))
{
g_print("Timeout\n");
}
break;*/
default:
g_print("Something other\n");
break;
}
return TRUE;
}
void JetsonVideoTransmissionV1::configure(Config::Section *sec, bool onfly){
Assert(sec);
setIndex(sec);
if (!gst_is_initialized())
{
GError *err_init = nullptr;
if (!gst_init_check(nullptr, nullptr, &err_init))
{
LOG_ERROR("%s%i: gstreamer is not initialized with message: %s\n", this->getName(), this->index(), err_init->message);
}
if (err_init != nullptr)
{
g_error_free(err_init);
}
}
m_ip_address = sec->getString("ip_address");
if(m_ip_address == "")
{
InputSourcesException::raise(__FILE__, __LINE__, "IP address are not set!\n");
}
m_port = sec->getInt("port");
if (!m_port)
{
InputSourcesException::raise(__FILE__, __LINE__, "Port are not set!\n");
}
m_timeout = (unsigned long)sec->getInt("timeout");
if (!m_timeout)
{
LOG_INFO("%s%i: timeout is not set\n", this->getName(), this->index());
m_timeout = 0;
}
m_max_buffer = sec->getInt("max_buffers");
if (m_max_buffer < 0)
{
LOG_INFO("%s%i: max buffer is not set\n", this->getName(), this->index());
m_max_buffer = 0;
}
std::string src_kind = sec->getString("src_kind"); // we think it is udpsrc
if (std::find(SOURCE_LIST.begin(), SOURCE_LIST.end(), src_kind) == SOURCE_LIST.end())
{
InputSourcesException::raise(__FILE__, __LINE__, "Unsupported kind source data: %s\n", src_kind.c_str());
}
std::string pipeline_name = std::string(this->getName()) + std::to_string(this->index()) + "_pipeline";
m_video_drop = sec->getBoolean("video_drop");
m_pipeline = gst_pipeline_new(pipeline_name.c_str());
m_src = gst_element_factory_make(src_kind.c_str(), nullptr);
m_demuxer = gst_element_factory_make("tsdemux", nullptr);
m_video_queue = gst_element_factory_make("queue", nullptr);
m_h264parse = gst_element_factory_make("h264parse", nullptr);
m_avdec_h264 = gst_element_factory_make("avdec_h264", nullptr);
m_videoconvert = gst_element_factory_make("videoconvert", nullptr);
m_video_sink = gst_element_factory_make("appsink", nullptr);
if (!m_pipeline || !m_src || !m_demuxer || !m_video_queue || !m_avdec_h264 || !m_avdec_h264 || !m_videoconvert || !m_video_sink)
{
InputSourcesException::raise(__FILE__, __LINE__, "Could not construct pipeline %s!\n", gst_element_get_name(m_pipeline));
}
else
{
LOG_INFO("%s%i: pipeline %s is constructed\n", this->getName(), this->index(), gst_element_get_name(m_pipeline));
}
g_object_set(G_OBJECT(m_src), "address", m_ip_address.c_str(), nullptr);
g_object_set(G_OBJECT(m_src), "port", m_port, nullptr);
g_object_set(G_OBJECT(m_src), "timeout", m_timeout, nullptr);
gchar *ip;
g_object_get(G_OBJECT(m_src), "address", &ip, nullptr);
LOG_INFO("%s%i: udpsrc IP address is set %s\n", this->getName(), this->index(), ip);
gint port;
g_object_get(G_OBJECT(m_src), "port", &port, nullptr);
LOG_INFO("%s%i: udpsrc port is set %i\n", this->getName(), this->index(), port);
guint timeout;
g_object_get(G_OBJECT(m_src), "timeout", &timeout, nullptr);
LOG_INFO("%s%i: udpsrc timeout is set %u nsec\n", this->getName(), this->index(), timeout);
gst_bin_add_many(GST_BIN(m_pipeline), m_src, m_demuxer, m_video_queue, m_h264parse, m_avdec_h264, m_videoconvert, m_video_sink, nullptr);
if (!gst_element_link(m_src, m_demuxer))
{
LOG_ERROR("%s%i: udpsrc and tsdemux are not linked\n", this->getName(), this->index());
}
if (!gst_element_link_many(m_video_queue, m_h264parse, m_avdec_h264, m_videoconvert, m_video_sink, nullptr))
{
LOG_ERROR("%s%i: video_queue, h264parse, avdec_h264, videoconvert, video_sink are not linked\n", this->getName(), this->index());
}
g_signal_connect(m_demuxer, "pad-added", G_CALLBACK(on_pad_added_video), this);
g_object_set(m_video_sink, "emit-signals", TRUE, "max-buffers", m_max_buffer, nullptr);
g_object_set(m_video_sink, "drop", m_video_drop, nullptr);
gboolean emit_signal_v;
g_object_get(G_OBJECT(m_video_sink), "emit-signals", &emit_signal_v, nullptr);
LOG_INFO("%s%i: appsink video emit-signals is set %s\n", this->getName(), this->index(), (emit_signal_v ? "true" : "false"));
guint max_buffers_v;
g_object_get(G_OBJECT(m_video_sink), "max-buffers", &max_buffers_v, nullptr);
LOG_INFO("%s%i: appsink video max-buffers is set %u\n", this->getName(), this->index(), max_buffers_v);
gboolean m_video_drop;
g_object_get(G_OBJECT(m_video_sink), "drop", &m_video_drop, nullptr);
LOG_INFO("%s%i: appsink video drop is set %s\n", this->getName(), this->index(), (m_video_drop ? "true" : "false"));
GstBus *bus;
guint bus_watch_id;
bus = gst_pipeline_get_bus(GST_PIPELINE(m_pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, nullptr);
gst_object_unref(bus);
m_cam_resolution = sec->getInt("cam_resolution");
if (m_cam_resolution < 0 || m_cam_resolution > 4)
{
m_cam_resolution = 1;
LOG_WARNING("%s%i: error in cam_reolution setting, set default = 1\n", this->getName(), this->index());
}
RESOL_CAM30_CUNANO resol = static_cast<RESOL_CAM30_CUNANO>(m_cam_resolution);
m_vConf.m_width = CAM30_CUNANO_RESOLUTION.at(resol).first;
m_vConf.m_height = CAM30_CUNANO_RESOLUTION.at(resol).second;
LOG_INFO("%s%i: width: %i\t height: %i\n", this->getName(), this->index(), m_vConf.m_width, m_vConf.m_height);
/*
some code
*/
m_state = gst_element_set_state(GST_ELEMENT(m_pipeline), GST_STATE_PLAYING);
m_state = gst_element_set_state(GST_ELEMENT(m_pipeline), GST_STATE_PAUSED);
return;
}
void JetsonVideoTransmissionV1::start(){
m_mutex.acquire();
m_state = gst_element_set_state(GST_ELEMENT(m_pipeline), GST_STATE_PLAYING);
if(m_state == GST_STATE_CHANGE_FAILURE){
InputSourcesException::raise(__FILE__, __LINE__, "Can not start %s\n", gst_element_get_name(m_pipeline));
}
m_started = true;
m_mutex.release();
return;
}
void JetsonVideoTransmissionV1::stop(){
m_state = gst_element_set_state(GST_ELEMENT(m_pipeline), GST_STATE_PAUSED);
m_started = false;
return;
}
unsigned JetsonVideoTransmissionV1::getData(void *p, size_t size, time_processing_t& time){
cv::Mat gsFrame;
unsigned long long time_pts = getVideo(m_frame);
if (m_frame.empty())
{
LOG_INFO("%s%i::getData: Failed to read image\n", this->getName(), this->index());
return ++m_frame_id;
}
/*
some code
*/
time = (ACE_OS::gettimeofday().get_msec() - HW_MANAGER->getStartTime()) / 1000.0;
return ++m_frame_id;
}
unsigned long long JetsonVideoTransmissionV1::getVideo(cv::Mat &frame)
{
unsigned long long time_pts;
GstSample *sample_video;
sample_video = gst_app_sink_pull_sample(GST_APP_SINK(m_video_sink));
if (sample_video)
{
GstBuffer *buffer_video = nullptr;
GstCaps *caps_video = nullptr;
GstStructure *structure_video;
GstMapInfo map_info_video;
int width, height;
caps_video = gst_sample_get_caps(sample_video);
if (!caps_video)
{
LOG_ERROR("%s%i::getData: could not get snapshot format\n", this->getName(), this->index());
return 0;
}
structure_video = gst_caps_get_structure(caps_video, 0);
/* we need to get the final caps on the buffer to get the size */
bool res;
res = gst_structure_get_int(structure_video, "width", &width);
res = gst_structure_get_int(structure_video, "height", &height);
if (!res)
{
LOG_ERROR("%s%i::getData: could not get snapshot dimension\n", this->getName(), this->index());
return 0;
}
buffer_video = gst_sample_get_buffer(sample_video);
time_pts = buffer_video->pts;
gst_buffer_map(buffer_video, &map_info_video, (GstMapFlags)(GST_MAP_READ));
frame = cv::Mat(cv::Size(width, height * 3 / 2), CV_8UC1, (char *)map_info_video.data, cv::Mat::AUTO_STEP);
gst_buffer_unmap(buffer_video, &map_info_video);
gst_sample_unref(sample_video);
}
else
{
LOG_ERROR("%s%i::getData: Failed to read image\n", this->getName(), this->index());
return 0;
}
return time_pts;
}
This code works but received video has delay about 2-3 seconds.
Is it possible to fix delay by using gstreamer tools?
P.S.
gstreamer version 1.14.5
In OpenCV using cv::VideoCapture delay is absent with the same pipeline in Jetson Nano side…