Hi, I’m playing around with libargus for the first time and having a rough time how to channel camera streams through a ROS node. I believe there is a way to convert frames to OpenCV Mats and then use the cv_bridge package to get ROS Images, but I was looking for a more direct and efficient way to do this.
Thanks!
hi nekhera:
you can try gstreamer to create camera streams on Jetson platform with ROS package , reference:https://github.com/peter-moran/jetson_csi_cam, but This package was only tested on Jetson TX2 with L4T R27.1, ROS Kinetic, and the Leopard Imaging IMX377CS CSI camera. maybe you should modify package file to run it on other Jetson platform
Hey Jeffli,
thanks for your answer. Unfortunately, I am bound to using Argus, as I am essentially attempting to transform the syncSensor example into a ROS node. I have gone about attempting the OpenCV route, however imshow achieves nowhere near the expected framerate. This is why I was hoping for a more direct solution. I have attached my main ROS node executable here. Hopefully someone can spot what i might be doing wrong. (I am using 2x Leopard Imaging IMX274 cameras with a TX2 on L4T 32.2.1 and the LI 3CAM carrier board).
Thanks!
hi nekhera:
did you test FPS directly?for example like this with gstreamer:
if this test FPS is OK ,then we need analyse your code secondly I think
gst-launch-1.0 nvcamerasrc ! ‘video/x-raw(memory:NVMM),
width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)60/1’
! nvvidconv ! ‘video/x-raw(memory:NVMM), format=(string)I420’ !
fpsdisplaysink text-overlay=false -v
Update. Going the OpenCV to ROS route I can get accurate framerates (measured through rostopic hz ), however from visual comparison (taking pictures of a stopwatch timer), the images are not adequately synchronized (over 100ms latency in some cases. Not sure why this is since I’m fairly certain I was getting sub 10ms latencies through the syncSensor example through a similar test on the same hardware. I have attached my new code. Any help is appreciated!
bump
Sorry response late, I cannot open the site : https://pastebin.com/8BG6qt1b, any other repo can shared?
Not a problem. Appreciate the reply. Here is the code.
#include <iostream>
#include <iomanip>
#include <csignal>
#include <ros/ros.h>
#include <ros/console.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/image_encodings.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/highgui/highgui.hpp>
#include "Argus/Argus.h"
#include "EGLStream/EGLStream.h"
#include "EGLStream/NV/ImageNativeBuffer.h"
#include "ArgusHelpers.h"
#include "CommonOptions.h"
#include "EGLGlobal.h"
#include "Error.h"
#include "Thread.h"
using namespace Argus;
using namespace EGLStream;
ros::Publisher left_img_pub;
ros::Publisher right_img_pub;
namespace ArgusSamples {
EGLDisplayHolder g_display;
ICaptureSession *iCaptureSession;
IEGLOutputStream *iStreamLeft;
IEGLOutputStream *iStreamRight;
UniqueObj<CameraProvider> cameraProvider;
static const Size2D<uint32_t> STREAM_SIZE(1280, 720);
static const uint32_t FRAMERATE = 6;
#define PRODUCER_PRINT(...) printf("PRODUCER: " __VA_ARGS__)
#define CONSUMER_PRINT(...) printf("CONSUMER: " __VA_ARGS__)
class StereoConsumerThread : public Thread {
public:
explicit StereoConsumerThread(OutputStream *leftStream, OutputStream *rightStream)
: m_leftStream(leftStream), m_rightStream(rightStream) {}
~StereoConsumerThread() {}
private:
virtual bool threadInitialize();
virtual bool threadExecute();
virtual bool threadShutdown();
OutputStream *m_leftStream;
OutputStream *m_rightStream;
UniqueObj<FrameConsumer> m_leftConsumer;
UniqueObj<FrameConsumer> m_rightConsumer;
};
bool StereoConsumerThread::threadInitialize() {
m_leftConsumer = UniqueObj<FrameConsumer>(FrameConsumer::create(m_leftStream));
m_rightConsumer = UniqueObj<FrameConsumer>(FrameConsumer::create(m_rightStream));
if (!m_leftConsumer || !m_rightConsumer) {
ORIGINATE_ERROR("Failed to create FrameConsumers");
}
return true;
}
bool StereoConsumerThread::threadExecute() {
IEGLOutputStream *leftIStream = interface_cast<IEGLOutputStream>(m_leftStream);
IEGLOutputStream *rightIStream = interface_cast<IEGLOutputStream>(m_rightStream);
IFrameConsumer *leftIFrameConsumer = interface_cast<IFrameConsumer>(m_leftConsumer);
IFrameConsumer *rightIFrameConsumer = interface_cast<IFrameConsumer>(m_rightConsumer);
CONSUMER_PRINT("Waiting until producer is connected...\n");
if (leftIStream->waitUntilConnected() != STATUS_OK ||
rightIStream->waitUntilConnected() != STATUS_OK) {
ORIGINATE_ERROR("Stream failed to connect.");
}
CONSUMER_PRINT("Streams connected, processing frames.\n");
int frameCount = 0;
while (true) {
EGLint streamState = EGL_STREAM_STATE_CONNECTING_KHR;
if (!eglQueryStreamKHR(leftIStream->getEGLDisplay(), leftIStream->getEGLStream(),
EGL_STREAM_STATE_KHR, &streamState) ||
(streamState == EGL_STREAM_STATE_DISCONNECTED_KHR)) {
CONSUMER_PRINT("EGL_STREAM_STATE_DISCONNECTED_KHR received\n");
break;
}
if (!eglQueryStreamKHR(rightIStream->getEGLDisplay(), rightIStream->getEGLStream(),
EGL_STREAM_STATE_KHR, &streamState) ||
(streamState == EGL_STREAM_STATE_DISCONNECTED_KHR)) {
CONSUMER_PRINT("EGL_STREAM_STATE_DISCONNECTED_KHR received\n");
break;
}
UniqueObj<Frame> left_frame(leftIFrameConsumer->acquireFrame());
UniqueObj<Frame> right_frame(rightIFrameConsumer->acquireFrame());
if (!left_frame || !right_frame) {
break;
}
IFrame *left_iframe = interface_cast<IFrame>(left_frame);
IFrame *right_iframe = interface_cast<IFrame>(right_frame);
if (!left_iframe || !right_iframe) {
break;
}
Image *left_image = left_iframe->getImage();
Image *right_image = right_iframe->getImage();
if (!left_image || !right_image) {
break;
}
NV::IImageNativeBuffer *left_inative_buffer = interface_cast<NV::IImageNativeBuffer>(left_image);
NV::IImageNativeBuffer *right_inative_buffer = interface_cast<NV::IImageNativeBuffer>(right_image);
if (!left_inative_buffer || !right_inative_buffer) {
break;
}
int left_fd = left_inative_buffer->createNvBuffer(STREAM_SIZE, NvBufferColorFormat_ABGR32, NvBufferLayout_Pitch);
int right_fd = right_inative_buffer->createNvBuffer(STREAM_SIZE, NvBufferColorFormat_ABGR32, NvBufferLayout_Pitch);
void *left_pdata = NULL;
void *right_pdata = NULL;
NvBufferMemMap(left_fd, 0, NvBufferMem_Read, &left_pdata);
NvBufferMemMap(right_fd, 0, NvBufferMem_Read, &right_pdata);
NvBufferMemSyncForCpu(left_fd, 0, &left_pdata);
NvBufferMemSyncForCpu(right_fd, 0, &right_pdata);
cv::Mat left_imgbuf = cv::Mat(STREAM_SIZE.height(), STREAM_SIZE.width(), CV_8UC4, left_pdata);
cv::Mat right_imgbuf = cv::Mat(STREAM_SIZE.height(), STREAM_SIZE.width(), CV_8UC4, right_pdata);
cv::Mat left_display_img;
cv::Mat right_display_img;
cvtColor(left_imgbuf, left_display_img, CV_RGBA2BGR);
cvtColor(right_imgbuf, right_display_img, CV_RGBA2BGR);
NvBufferMemUnMap(left_fd, 0, &left_pdata);
NvBufferMemUnMap(right_fd, 0, &right_pdata);
cv_bridge::CvImage left_out_msg;
cv_bridge::CvImage right_out_msg;
left_out_msg.encoding = sensor_msgs::image_encodings::BGR8;
left_out_msg.image = left_display_img;
right_out_msg.encoding = sensor_msgs::image_encodings::BGR8;
right_out_msg.image = right_display_img;
left_img_pub.publish(left_out_msg.toImageMsg());
right_img_pub.publish(right_out_msg.toImageMsg());
int ret = NvBufferDestroy(left_fd);
if (ret < 0) CONSUMER_PRINT("Failed to Destroy buff");
ret = NvBufferDestroy(right_fd);
if (ret < 0) CONSUMER_PRINT("Failed to Destroy buff");
}
CONSUMER_PRINT("No more frames. Cleaning up.\n");
PROPAGATE_ERROR(requestShutdown());
return true;
}
bool StereoConsumerThread::threadShutdown() {
CONSUMER_PRINT("Done.\n");
return true;
}
std::unique_ptr<StereoConsumerThread> stereoConsumer;
static void argusSigintHandler(int sig) {
iCaptureSession->stopRepeat();
iCaptureSession->waitForIdle();
PRODUCER_PRINT("Captures complete, disconnecting producer.\n");
iStreamLeft->disconnect();
iStreamRight->disconnect();
if (!stereoConsumer->shutdown()) {
PRODUCER_PRINT("Error. Consumer shutdown failed");
}
cameraProvider.reset();
if (!g_display.cleanup()) {
PRODUCER_PRINT("Error. Display cleanup failed");
}
PRODUCER_PRINT("Done -- exiting.\n");
ros::shutdown();
}
static bool execute() {
PROPAGATE_ERROR(g_display.initialize());
cameraProvider = UniqueObj<CameraProvider>(CameraProvider::create());
ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
if (!iCameraProvider) {
ORIGINATE_ERROR("Failed to get ICameraProvider interface");
}
printf("Argus Version: %s\n", iCameraProvider->getVersion().c_str());
std::vector<CameraDevice *> cameraDevices;
iCameraProvider->getCameraDevices(&cameraDevices);
if (cameraDevices.size() < 2) {
ORIGINATE_ERROR("Insufficient number of sensors available");
}
std::vector<CameraDevice *> lrCameras;
lrCameras.push_back(cameraDevices[0]);
lrCameras.push_back(cameraDevices[1]);
UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(lrCameras));
iCaptureSession = interface_cast<ICaptureSession>(captureSession);
if (!iCaptureSession) {
ORIGINATE_ERROR("Failed to get capture session interface");
}
UniqueObj<OutputStreamSettings> streamSettings(
iCaptureSession->createOutputStreamSettings(STREAM_TYPE_EGL));
IOutputStreamSettings *iStreamSettings =
interface_cast<IOutputStreamSettings>(streamSettings);
IEGLOutputStreamSettings *iEGLStreamSettings =
interface_cast<IEGLOutputStreamSettings>(streamSettings);
if (!iStreamSettings || !iEGLStreamSettings) {
ORIGINATE_ERROR("Failed to create OutputStreamSettings");
}
iEGLStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
iEGLStreamSettings->setResolution(STREAM_SIZE);
iEGLStreamSettings->setEGLDisplay(g_display.get());
iEGLStreamSettings->setMetadataEnable(true);
PRODUCER_PRINT("Creating left stream.\n");
iStreamSettings->setCameraDevice(lrCameras[0]);
UniqueObj<OutputStream> streamLeft(
iCaptureSession->createOutputStream(streamSettings.get()));
iStreamLeft = interface_cast<IEGLOutputStream>(streamLeft);
if (!iStreamLeft) {
ORIGINATE_ERROR("Failed to create left stream");
}
PRODUCER_PRINT("Creating right stream.\n");
iStreamSettings->setCameraDevice(lrCameras[1]);
UniqueObj<OutputStream> streamRight(
iCaptureSession->createOutputStream(streamSettings.get()));
iStreamRight = interface_cast<IEGLOutputStream>(streamRight);
if (!iStreamRight) {
ORIGINATE_ERROR("Failed to create right stream");
}
PRODUCER_PRINT("Launching stereo consumer.\n");
stereoConsumer = std::make_unique<StereoConsumerThread>(streamLeft.get(), streamRight.get());
PROPAGATE_ERROR(stereoConsumer->initialize());
PROPAGATE_ERROR(stereoConsumer->waitRunning());
UniqueObj<Request> request(iCaptureSession->createRequest());
IRequest *iRequest = interface_cast<IRequest>(request);
if (!iRequest) {
ORIGINATE_ERROR("Failed to create Request");
}
ISourceSettings *iSourceSettings =
interface_cast<ISourceSettings>(iRequest->getSourceSettings());
iSourceSettings->setFrameDurationRange(Range<uint64_t>(1e9 / FRAMERATE));
iRequest->enableOutputStream(streamLeft.get());
iRequest->enableOutputStream(streamRight.get());
PRODUCER_PRINT("Starting repeat capture requests.\n");
if (iCaptureSession->repeat(request.get()) != STATUS_OK) {
ORIGINATE_ERROR("Failed to start repeat capture request for preview");
}
ros::spin();
return true;
}
};
int main(int argc, char *argv[]) {
ros::init(argc, argv, "argus_stereo_sync_node");
ros::NodeHandle nh;
signal(SIGINT, ArgusSamples::argusSigintHandler);
left_img_pub = nh.advertise<sensor_msgs::Image>("/camera/left/image_raw", 1);
right_img_pub = nh.advertise<sensor_msgs::Image>("/camera/right/image_raw", 1);
if (!ArgusSamples::execute()) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
hi nekhera:
I reviewed your code ,yeah ,almost same with samples,only opencv related.
maybe these reasons caused framerate unexpected
1)cvtColor fun is slow
2)all the variables defined local in thread,like cv::Mat left_display_img are construct/destruct in thread
my suggestion:
we should find where are bottleneck code first, so
1)could you comment out the line from 134-139, all opencv code
cv::Mat left_imgbuf = cv::Mat(STREAM_SIZE.height(), STREAM_SIZE.width(), CV_8UC4, left_pdata);
cv::Mat right_imgbuf = cv::Mat(STREAM_SIZE.height(), STREAM_SIZE.width(), CV_8UC4, right_pdata);
cv::Mat left_display_img;
cv::Mat right_display_img;
cvtColor(left_imgbuf, left_display_img, CV_RGBA2BGR);
cvtColor(right_imgbuf, right_display_img, CV_RGBA2BGR);
2)then
donot declare these two para in thread ,set thread member like m_leftConsumer and init them( left_out_msg.image = cv image), pass to thread
cv_bridge::CvImage left_out_msg;
cv_bridge::CvImage right_out_msg;
if after these modify ,framerate is OK, then we need uncomment step by step, if can find which code is bottleneck then we can think about how to handle it
Hi Jeffli,
apologies for the late reply. I have tried your suggestions, and they do not fix the framerate problem. In a separate test I noted that if the resolution is lowered to 480x270, the framerate is 60fps as expected. I also found a method here (Syncsensor - multiple camera, jpeg Consumer - #3 by sathish.nitt76) that uses CUDA streams to convert to gpumats - could this be of use?
Appreciate the help!
Through further testing i’ve determined that the lines:
int left_fd = left_inative_buffer->createNvBuffer(STREAM_SIZE, NvBufferColorFormat_ABGR32, NvBufferLayout_Pitch);
int right_fd = right_inative_buffer->createNvBuffer(STREAM_SIZE, NvBufferColorFormat_ABGR32, NvBufferLayout_Pitch);
are the bottleneck, taking about 30 ms on each iteration. To achieve 60fps each iteration cannot take more than 1000/60 = 16.67ms
Further testing with cameras set at 3840x2160 resolution and 60 fps. I get the following outputs.
No changes: 8 fps
Moving object creation to constructor: 10 fps
commenting out cvtColor: 48 fps
commenting out all openCV code: 55 fps
hi nekhera:
obviously, we should remove cvtColor which is mainly bottleneck. so remove these two conversion, just keep RGB sequence and convert it to ROS variable like this
sensor_msgs::ImagePtr left_out_msg = cv_bridge::CvImage(std_msgs::Header(), “rgb8”, left_imgbuf).toImageMsg();
left_img_pub.publish(left_out_msg);
maybe it can be faster
Thanks again for the reply. I will try this soon. In the mean while I am attempting a somewhat different approach. I am attempting to use the cudaBayerDemosaic sample and then use cuMemcpyDtoH function to copy the CUdeviceptr containing the demosaiced data to the ros image data. This works well and I can get a clean 60fps output using a single camera. Next I attempt to merge this code with the syncSensor sample to use both cameras. However if I create the captureSession using the CamerDevice* vector of two cameras (as opposed to one) I suddenly get no output whatsoever. Any ideas?
#include <ros/ros.h>
#include <ros/console.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/image_encodings.h>
#include <sensor_msgs/fill_image.h>
#include <Argus/Argus.h>
#include "ArgusHelpers.h"
#include "CommonOptions.h"
#include "CUDAHelper.h"
#include "EGLGlobal.h"
#include "Error.h"
#include "Thread.h"
#include "CudaBayerDemosaicKernel.h"
using namespace Argus;
ros::Publisher left_image_pub;
ros::Publisher right_image_pub;
namespace ArgusSamples {
EGLDisplayHolder g_display;
static const Size2D<uint32_t> STREAM_SIZE(3840, 2160);
static const uint32_t FRAMERATE = 60;
#define PRODUCER_PRINT(...) printf("PRODUCER: " __VA_ARGS__)
#define CONSUMER_PRINT(...) printf("CONSUMER: " __VA_ARGS__)
class StereoConsumer : public Thread {
public:
explicit StereoConsumer(EGLDisplay display, EGLStreamKHR stream, Size2D<uint32_t> size);
~StereoConsumer();
private:
virtual bool threadInitialize();
virtual bool threadExecute();
virtual bool threadShutdown();
EGLDisplay m_eglDisplay;
EGLStreamKHR m_inputStream;
Size2D<uint32_t> m_bayerSize;
Size2D<uint32_t> m_outputSize;
CUcontext m_cudaContext;
CUeglStreamConnection m_cudaStreamConnection;
CUdeviceptr m_rgbaBuffer;
uint8_t* img_buffer;
};
StereoConsumer::StereoConsumer(EGLDisplay display, EGLStreamKHR stream, Size2D<uint32_t> size)
: m_eglDisplay(display), m_inputStream(stream), m_bayerSize(size) {
m_outputSize.width() = m_bayerSize.width() / 2;
m_outputSize.height() = m_bayerSize.height() / 2;
}
StereoConsumer::~StereoConsumer() {
shutdown();
}
bool StereoConsumer::threadInitialize() {
PROPAGATE_ERROR(initCUDA(&m_cudaContext));
CUresult cuResult = cuEGLStreamConsumerConnect(&m_cudaStreamConnection, m_inputStream);
if (cuResult != CUDA_SUCCESS) {
ORIGINATE_ERROR("Unable to connect as a consumer (%s)", getCudaErrorString(cuResult));
}
size_t bufferSize = m_outputSize.width() * m_outputSize.height() * sizeof(uint32_t);
cuResult = cuMemAlloc(&m_rgbaBuffer, bufferSize);
if (cuResult != CUDA_SUCCESS) {
ORIGINATE_ERROR("Failed to allocate CUDA buffer");
}
img_buffer = new uint8_t[4 * m_outputSize.width() * m_outputSize.height()];
return true;
}
bool StereoConsumer::threadExecute() {
EGLint state = EGL_STREAM_STATE_CONNECTING_KHR;
while (state != EGL_STREAM_STATE_NEW_FRAME_AVAILABLE_KHR) {
if (!eglQueryStreamKHR(m_eglDisplay, m_inputStream, EGL_STREAM_STATE_KHR, &state)) {
ORIGINATE_ERROR("Failed to query stream state (possible producer failure).");
}
}
while (true) {
CUgraphicsResource bayerResource = 0;
CUresult cuResult = cuEGLStreamConsumerAcquireFrame(&m_cudaStreamConnection, &bayerResource, NULL, -1);
if (cuResult != CUDA_SUCCESS) {
ORIGINATE_ERROR("Unable to acquire image (%s)", getCudaErrorString(cuResult));
}
CUeglFrame bayerEglFrame;
memset(&bayerEglFrame, 0, sizeof(bayerEglFrame));
cuResult = cuGraphicsResourceGetMappedEglFrame(&bayerEglFrame, bayerResource, 0, 0);
if (cuResult != CUDA_SUCCESS) {
ORIGINATE_ERROR("Unable to get the CUDA frame (%s)", getCudaErrorString(cuResult));
}
if ((bayerEglFrame.cuFormat != CU_AD_FORMAT_SIGNED_INT16) ||
((bayerEglFrame.eglColorFormat != CU_EGL_COLOR_FORMAT_BAYER_RGGB) &&
(bayerEglFrame.eglColorFormat != CU_EGL_COLOR_FORMAT_BAYER_BGGR) &&
(bayerEglFrame.eglColorFormat != CU_EGL_COLOR_FORMAT_BAYER_GRBG) &&
(bayerEglFrame.eglColorFormat != CU_EGL_COLOR_FORMAT_BAYER_GBRG))) {
ORIGINATE_ERROR("Only 16bit signed Bayer color formats are supported");
}
cudaBayerDemosaic((CUdeviceptr)(bayerEglFrame.frame.pPitch[0]), bayerEglFrame.width, bayerEglFrame.height,
bayerEglFrame.pitch, bayerEglFrame.eglColorFormat, (CUdeviceptr)((void*)m_rgbaBuffer));
sensor_msgs::Image output_image;
output_image.header.stamp = ros::Time::now();
output_image.is_bigendian = true;
cuMemcpyDtoH(img_buffer, (CUdeviceptr)((void*)m_rgbaBuffer), 4 * m_outputSize.width() * m_outputSize.height());
sensor_msgs::fillImage(output_image, sensor_msgs::image_encodings::BGRA8, m_outputSize.height(),
m_outputSize.width(), 4 * m_outputSize.width(), (void*) img_buffer);
left_image_pub.publish(output_image);
cuResult = cuEGLStreamConsumerReleaseFrame(&m_cudaStreamConnection, bayerResource, NULL);
if (cuResult != CUDA_SUCCESS) {
ORIGINATE_ERROR("Unable to release frame (%s).", getCudaErrorString(cuResult));
}
}
printf("CUDA CONSUMER: No more frames. Cleaning up\n");
printf("CUDA CONSUMER: Done\n");
return true;
}
bool StereoConsumer::threadShutdown() {
CUresult cuResult = cuEGLStreamConsumerDisconnect(&m_cudaStreamConnection);
if (cuResult != CUDA_SUCCESS) {
ORIGINATE_ERROR("Unable to disconnect CUDA (%s)", getCudaErrorString(cuResult));
}
cuMemFree(m_rgbaBuffer);
delete[] img_buffer;
PROPAGATE_ERROR(cleanupCUDA(&m_cudaContext));
return true;
}
static bool execute() {
PROPAGATE_ERROR(g_display.initialize());
UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
if (!iCameraProvider) {
ORIGINATE_ERROR("Failed to create CameraProvider");
}
printf("Argus Version: %s\n", iCameraProvider->getVersion().c_str());
std::vector<CameraDevice*> cameraDevices;
iCameraProvider->getCameraDevices(&cameraDevices);
if (cameraDevices.size() < 2) {
ORIGINATE_ERROR("Insufficient number of sensor available");
}
std::vector<CameraDevice*> lrCameras;
lrCameras.push_back(cameraDevices[0]);
lrCameras.push_back(cameraDevices[1]);
ArgusHelpers::printCameraDeviceInfo(cameraDevices[0], "");
ArgusHelpers::printCameraDeviceInfo(cameraDevices[1], "");
SensorMode* sensorMode = ArgusHelpers::getSensorMode(lrCameras[0], 0);
ISensorMode *iSensorMode = interface_cast<ISensorMode>(sensorMode);
if (!iSensorMode) {
ORIGINATE_ERROR("Selected sensor mode not available");
}
UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(lrCameras));
ICaptureSession *iCaptureSession = interface_cast<ICaptureSession>(captureSession);
if (!iCaptureSession) {
ORIGINATE_ERROR("Failed to create CaptureSession");
}
UniqueObj<OutputStreamSettings> streamSettings(
iCaptureSession->createOutputStreamSettings(STREAM_TYPE_EGL));
IOutputStreamSettings* iStreamSettings =
interface_cast<IOutputStreamSettings>(streamSettings);
IEGLOutputStreamSettings* iEGLStreamSettings =
interface_cast<IEGLOutputStreamSettings>(streamSettings);
if (!iEGLStreamSettings) {
ORIGINATE_ERROR("Failed to create OutputStreamSettings");
}
iEGLStreamSettings->setEGLDisplay(g_display.get());
iEGLStreamSettings->setPixelFormat(PIXEL_FMT_RAW16);
iEGLStreamSettings->setResolution(iSensorMode->getResolution());
iEGLStreamSettings->setMode(EGL_STREAM_MODE_FIFO);
PRODUCER_PRINT("Creating left stream.\n");
iStreamSettings->setCameraDevice(lrCameras[0]);
UniqueObj<OutputStream> leftStream(iCaptureSession->createOutputStream(streamSettings.get()));
IEGLOutputStream *iLeftStream = interface_cast<IEGLOutputStream>(leftStream);
if (!iLeftStream) {
ORIGINATE_ERROR("Failed to create EGLOutputStream");
}
/*PRODUCER_PRINT("Creating right stream.\n");
iStreamSettings->setCameraDevice(lrCameras[1]);
UniqueObj<OutputStream> rightStream(iCaptureSession->createOutputStream(streamSettings.get()));
IEGLOutputStream *iRightStream = interface_cast<IEGLOutputStream>(rightStream);
if (!iRightStream) {
ORIGINATE_ERROR("Failed to create EGLOutputStream");
}*/
PRODUCER_PRINT("Launching stereo consumer.\n");
StereoConsumer stereoConsumer(
iLeftStream->getEGLDisplay(), iLeftStream->getEGLStream(), iEGLStreamSettings->getResolution());
PROPAGATE_ERROR(stereoConsumer.initialize());
PROPAGATE_ERROR(stereoConsumer.waitRunning());
UniqueObj<Request> request(iCaptureSession->createRequest());
IRequest *iRequest = interface_cast<IRequest>(request);
if (!iRequest) {
ORIGINATE_ERROR("Failed to create Request");
}
iRequest->enableOutputStream(leftStream.get());
ISourceSettings *iSourceSettings = interface_cast<ISourceSettings>(request);
if (!iSourceSettings) {
ORIGINATE_ERROR("Failed to get source settings request interface");
}
//iSourceSettings->setSensorMode(sensorMode);
iSourceSettings->setFrameDurationRange(Range<uint64_t>(1e9 / FRAMERATE));
PRODUCER_PRINT("Starting repeat capture requests.\n");
if (iCaptureSession->repeat(request.get()) != STATUS_OK)
ORIGINATE_ERROR("Failed to start repeat capture request for preview");
sleep(20);
iCaptureSession->stopRepeat();
iCaptureSession->waitForIdle();
PRODUCER_PRINT("Captures complete, disconnecting producer.\n");
iLeftStream->disconnect();
//iRightStream->disconnect();
PROPAGATE_ERROR(stereoConsumer.shutdown());
cameraProvider.reset();
PROPAGATE_ERROR(g_display.cleanup());
ros::shutdown();
PRODUCER_PRINT("Done -- exiting.\n");
return true;
}
};
int main(int argc, char** argv) {
ros::init(argc, argv, "argus_stereo_node");
ros::NodeHandle nh;
left_image_pub = nh.advertise<sensor_msgs::Image>("/camera/image", 1);
if (!ArgusSamples::execute()) {
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}