The camera captures the h264 video stream,push them to specific udp port,and use vlc(windows) to play it,increase the time watermark on the side, show video play real time
Hi,
Not quite sure about your usecase. Please refer to user guide first.
[url]http://developer.nvidia.com/embedded/dlc/l4t-Jetson-TK1-Multimedia-User-Guide-R21-5[/url]
Just print live RTC date-time on the corner of live video when vlc played the video
I want to On the basis of /home/nvidia/tegra_multimedia_api/samples/10_camera_recording,add the watermark on the demo,format of watermark is (MM/DD/YYYY HH:MM:SS),real-time RTC time.
Hi,
The sample is for TX1/TX2. On TK1, you have to use gstreamer.
on TX1,could you give me a detailed description?It is better to give a demo,very thanks
You can refer to below two posts:
Launch rtsp server
[url]https://devtalk.nvidia.com/default/topic/1018689/jetson-tx2/vlc-playing-gstreamer-flow/post/5187270/#5187270[/url]
Sample pipeline of using clockoverlay
[url]https://devtalk.nvidia.com/default/topic/1025961/jetson-tx2/adding-overlay-to-the-tegra-camera-api-argus-quot-gstvideoencode-quot-sample/post/5219238/#5219238[/url]
- https://github.com/GStreamer/gst-rtsp-server/blob/master/examples/test-launch.c,i compile the code and run ./test-launch “videotestsrc ! omxh265enc ! rtph265pay name=pay0 pt=96”,by vlc,i can see the video,but not see the watermark like the format “MM/DD/YYYY HH:MM:SS”,how can i modify the running parameters?
2.#include <Argus/Argus.h>
#include <gst/gst.h>
#include <stdlib.h>
#include <unistd.h>
#include “Error.h”
#include “Options.h”
#include “PreviewConsumer.h”
#include “GLContext.h”
#include <dirent.h>
#include
namespace ArgusMesa
{
// Globals
static ArgusSamples::EGLDisplayHolder g_display;
static const Argus::Size2D<uint32_t> PREVIEW_STREAM_SIZE(3840, 2160);
class GstFramework
{
protected:
GstState gst_state;
GstElement *m_pipeline;
public:
GMainLoop *loop;
GstFramework()
: gst_state(GST_STATE_NULL)
, m_pipeline(NULL)
, loop(g_main_loop_new(NULL,FALSE)) {}
~GstFramework()
{
shutdown();
}
/**
* Initialize the GStreamer video encoder pipeline.
* @param[in] eglStream --- The EGLStream to consume frames from.
*/
bool initialize(EGLStreamKHR eglStream)
{
// Initialize GStreamer.
gst_init(NULL, NULL);
loop = g_main_loop_new(NULL, FALSE);
GstBus *bus;
guint bus_watch_id;
GstElement *src, *srcfilter, *nvconv;
GstCaps *caps;
/* Create GStreamer Elements */
m_pipeline = gst_pipeline_new(NULL);
src = gst_element_factory_make("nveglstreamsrc", NULL);
srcfilter = gst_element_factory_make("capsfilter", NULL);
nvconv = gst_element_factory_make("nvvidconv", NULL);
GstElement *overlay = gst_element_factory_make("clockoverlay", NULL);
GstElement *enc = gst_element_factory_make("omxh264enc",NULL);
GstElement *filesink = gst_element_factory_make("splitmuxsink", NULL);
if (!m_pipeline || !src || !srcfilter || !nvconv || !overlay || !enc || !filesink) {
g_printerr("One element count not be created.\n");
return false;
}
/* Set up GStreamer Bus Message Handler */
bus = gst_pipeline_get_bus(GST_PIPELINE(m_pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_callback, loop);
gst_object_unref(bus);
/* Configure GStreamer Element properties */
g_object_set(G_OBJECT(src), "display", g_display.get(), NULL);
g_object_set(G_OBJECT(src), "eglstream", eglStream, NULL);
caps = gst_caps_from_string("video/x-raw(memory:NVMM), width=2592, height=1944, framerate=30/1, format=I420");
g_object_set(srcfilter, "caps", caps, NULL);
gst_caps_unref(caps);
g_object_set(filesink, "location", "%05d.264", NULL);
/* Add all elements to the pipeline */
gst_bin_add_many(GST_BIN(m_pipeline),
src, srcfilter, nvconv, overlay, enc, filesink, NULL);
/* Link elements */
gst_element_link_many(src, srcfilter, nvconv, overlay, enc, filesink, NULL);
printf("GST Intialization done\n");
return true;
}
/**
* Watches for messages on the pipeline bus.
* @param[in] bus --- The GStreamer bus that is being watched.
* @param[in] msg --- The message that was received on the bus.
* @param[in] data --- Any user data to be passed into the callback.
*/
static gboolean bus_callback(GstBus *bus, GstMessage *msg, gpointer data)
{
GMainLoop *lp = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_EOS: {
g_main_loop_quit (lp);
break;
}
case GST_MESSAGE_ERROR: {
gchar *debug;
GError *error;
gst_message_parse_error(msg, &error, &debug);
g_free (debug);
g_printerr("Error: %s\n", error->message);
g_error_free(error);
g_main_loop_quit(lp);
break;
}
default:
break;
}
return TRUE;
}
/**
* Stops the pipeline and frees resources.
*/
void shutdown()
{
if (gst_state == GST_STATE_PLAYING)
stopRecording();
if (m_pipeline)
gst_object_unref(m_pipeline);
m_pipeline = NULL;
}
/**
* Start GStreamer pipeline.
*/
bool startRecording()
{
if (!m_pipeline)
ORIGINATE_ERROR("GStreamer pipeline not initialized");
if (gst_state != GST_STATE_NULL)
ORIGINATE_ERROR("GStreamer pipeline already running");
if (gst_element_set_state(m_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE)
ORIGINATE_ERROR("Failed to start pipeline");
gst_state = GST_STATE_PLAYING;
return true;
}
/**
* Stop GStreamer pipeline.
*/
bool stopRecording()
{
if (!m_pipeline)
ORIGINATE_ERROR("GStreamer pipeline not initialized");
if (gst_state != GST_STATE_PLAYING)
ORIGINATE_ERROR("GStreamer pipeline not running");
if (gst_element_set_state(m_pipeline, GST_STATE_NULL) == GST_STATE_CHANGE_FAILURE)
ORIGINATE_ERROR("Failed to stop pipeline");
gst_state = GST_STATE_NULL;
return true;
}
}; // class GstFramework
struct ExecuteOptions
{
uint32_t cameraIndex;
uint32_t captureSeconds;
};
/**
* Executes the GStreamer pipeline with options.
* @param[in] options --- ExecuteOptions struct with
*/
static bool execute(const ExecuteOptions& options)
{
using namespace Argus;
PROPAGATE_ERROR(g_display.initialize(EGL_DEFAULT_DISPLAY));
/* Create CameraProvider */
UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
printf("Argus Version: %s\n", iCameraProvider->getVersion().c_str());
if (!iCameraProvider)
ORIGINATE_ERROR("Failed to open CameraProvider");
/* Get/use the first available CameraDevice */
std::vector<CameraDevice*> cameraDevices;
if (iCameraProvider->getCameraDevices(&cameraDevices) != STATUS_OK)
ORIGINATE_ERROR("Failed to get CameraDevices");
if (cameraDevices.size() == 0)
ORIGINATE_ERROR("No CameraDevices available");
if (cameraDevices.size() <= options.cameraIndex)
ORIGINATE_ERROR("Camera %d not available; there are %d cameras",
options.cameraIndex, (unsigned)cameraDevices.size());
CameraDevice *cameraDevice = cameraDevices[options.cameraIndex];
ICameraProperties *iCameraProperties = interface_cast<ICameraProperties>(cameraDevice);
if (!iCameraProperties)
ORIGINATE_ERROR("Failed to get ICameraProperties interface");
/* Create CaptureSession */
UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(cameraDevice));
ICaptureSession *iSession = interface_cast<ICaptureSession>(captureSession);
if (!iSession)
ORIGINATE_ERROR("Failed to create CaptureSession");
// Get the sensor mode to determine the video output stream resolution.
std::vector<Argus::SensorMode*> sensorModes;
iCameraProperties->getBasicSensorModes(&sensorModes);
if (sensorModes.size() == 0)
ORIGINATE_ERROR("Failed to get sensor modes");
ISensorMode *iSensorMode = interface_cast<ISensorMode>(sensorModes[0]);
printf("Capture Resolution: %dx%d\n", iSensorMode->getResolution().width(), iSensorMode->getResolution().height());
if (!iSensorMode)
ORIGINATE_ERROR("Failed to get sensor mode interface");
/* Set common output stream settings */
UniqueObj<OutputStreamSettings> streamSettings(iSession->createOutputStreamSettings());
IOutputStreamSettings *iStreamSettings = interface_cast<IOutputStreamSettings>(streamSettings);
if (!iStreamSettings)
ORIGINATE_ERROR("Failed to create OutputStreamSettings");
iStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
iStreamSettings->setEGLDisplay(g_display.get());
/* Create video encoder stream */
iStreamSettings->setResolution(PREVIEW_STREAM_SIZE);
UniqueObj<OutputStream> videoStream(iSession->createOutputStream(streamSettings.get()));
IStream *iVideoStream = interface_cast<IStream>(videoStream);
if (!iVideoStream)
ORIGINATE_ERROR("Failed to create video stream");
/* Create capture Request and enable the streams in the Request */
UniqueObj<Request> request(iSession->createRequest(CAPTURE_INTENT_VIDEO_RECORD));
IRequest *iRequest = interface_cast<IRequest>(request);
if (!iRequest)
ORIGINATE_ERROR("Failed to create Request");
if (iRequest->enableOutputStream(videoStream.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to enable video stream in Request");
/* Initialize the GStreamer video encoder consumer */
GstFramework gstPipeline;
if (!gstPipeline.initialize(iVideoStream->getEGLStream()))
ORIGINATE_ERROR("Failed to initialize gstPipeline EGLStream consumer");
if (!gstPipeline.startRecording())
ORIGINATE_ERROR("Failed to start video recording");
/* Perform repeat capture requests for requested number of seconds */
if (iSession->repeat(request.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to start repeat capture requests");
else
g_main_loop_run(gstPipeline.loop);
/* If the GMainLoop stops running, the code below will execute */
iSession->stopRepeat();
/* Wait until all frames have completed before stopping recording. */
/// @todo: Not doing this may cause a deadlock.
iSession->waitForIdle();
/* Stop the pipeline */
if (!gstPipeline.stopRecording())
ORIGINATE_ERROR("Failed to stop pipeline");
gstPipeline.shutdown();
videoStream.reset();
return true;
}
}; // namespace ArgusMesa
int main(int argc, char **argv)
{
printf(“Executing: %s\n”, basename(argv[0]));
ArgusSamples::Value<uint32_t> cameraIndex(0);
ArgusSamples::Value<uint32_t> captureTime(10);
ArgusMesa::ExecuteOptions executeOptions;
executeOptions.cameraIndex = cameraIndex.get();
executeOptions.captureSeconds = captureTime.get();
if (!ArgusMesa::execute(executeOptions))
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
I replace the code to /home/nvidia/tegra_multimedia_api/argus/samples/gstVideoEncode
under the path,mkdir build
cd build cmake … appera error
CMake Error at CMakeLists.txt:33 (find_package):
By not providing “FindArgus.cmake” in CMAKE_MODULE_PATH this project has
asked CMake to find a package configuration file provided by “Argus”, but
CMake did not find one.
Could not find a package configuration file provided by “Argus” with any of
the following names:
ArgusConfig.cmake
argus-config.cmake
Add the installation prefix of “Argus” to CMAKE_PREFIX_PATH or set
“Argus_DIR” to a directory containing one of the above files. If “Argus”
provides a separate development package or SDK, be sure it has been
installed.
– Configuring incomplete, errors occurred!
See also “/home/nvidia/tegra_multimedia_api/argus/samples/gstVideoEncode/build/CMakeFiles/CMakeOutput.log”.
how could i modify it?
You should run
gst-launch-1.0 nvcamerasrc ! nvvidconv ! 'video/x-raw,format=I420' ! tee ! clockoverlay ! omxh264enc ! rtph264pay name=pay0 pt=96
I want to on the basis of /home/nvidia/tegra_multimedia_api/samples/10_camera_recording,and make some modifies,time watermarks need to be added to the captured video,It should need to implement it in code not command line
Hi,
In gstreamer, there is existing elements we can use, but in MMAPIs, you have to implement it by yourself. We have demonstration of CPU processing in 10_camera_recording:
$ ./camera_recording -c
You can refer to it and implement the functionality of clockoverlay.
I use ./camera_recording -c
./camera_recording: invalid option – ‘c’
Usage: camera_recording [OPTIONS]
Options:
-r Set output resolution WxH [Default 640x480]
-f Set output filename [Default output.h264]
-t Set encoder type H264 or H265 [Default H264]
-d Set capture duration [Default 5 seconds]
-s Enable profiling
-v Enable verbose message
-h Print this help
only these messages show above,could you tell me which message i can use?
It is added in r28.2.
For r28.1, please refer to https://devtalk.nvidia.com/default/topic/1026106/jetson-tx1/usage-of-nvbuffer-apis/post/5224382/#5224382
Hi Chetan:
I download main.cpp complie it and run ./camera -d 10 -c,then i played the output.h264,appear a symbol N,shall i need to change the watermark of “N”to the watermark of rtc time
Hi DaneLLL:
static const int array_n[8][8] = {
{ 1, 1, 0, 0, 0, 0, 1, 1 },
{ 1, 1, 1, 0, 0, 0, 1, 1 },
{ 1, 1, 1, 1, 0, 0, 1, 1 },
{ 1, 1, 1, 1, 1, 0, 1, 1 },
{ 1, 1, 0, 1, 1, 1, 1, 1 },
{ 1, 1, 0, 0, 1, 1, 1, 1 },
{ 1, 1, 0, 0, 0, 1, 1, 1 },
{ 1, 1, 0, 0, 0, 0, 1, 1 }
};
could you tell me how to change the watermark into a time pattern,the method to write N,i don’t understand it,very thanks
This is a demonstration of CPU processing. There should have better ways to implement it. Other users can share their experience.
Hi DeneLLL:
could you provide me a demo that shows 2-3 characters in the upper left corner
Many Thanks
Fuhai
hi DaneLLL:
on 28.1 tx1 path /home/nvidia/tegra_multimedia_api/samples/13_recording
compile the demo,which can get 2*2 mosaic video,now how can i print the watermark “N” on each camera video
Many thanks
Please refer to the samples and do integration.