#include #include #include #include "Error.h" #include "Thread.h" #include #include #include #include #include #include #include #include #include #include //#include // add the camerainfo #include #include #include #include #include #include #include #include "pipeline.h" #include #include #include #include #include #include using namespace Argus; using namespace EGLStream; /* Constants */ static const uint32_t MAX_CAMERA_NUM = 6; static const uint32_t DEFAULT_FRAME_COUNT = 100; static const uint32_t DEFAULT_FPS = 24; static bool DO_STAT = false; static bool DO_ROS = true; uint32_t frame_duration = 1e9/DEFAULT_FPS; int selected_mode = 0; int ae_top = -1; int ae_bottom = -1; int ae_left = -1; int ae_right = -1; float exposure_compensation = -1.f; int exposure_time_min = -1; int exposure_time_max = -1; float isp_digital_gain_min = -1.f; float isp_digital_gain_max = -1.f; float gain_min = -1.f; float gain_max = -1.f; int denoise_mode = -1; float denoise_strength = -1.f; int edge_enhance_mode = -1; float edge_enhance_strength = -1.f; int image_width = -1; int image_height = -1; bool print_time_debug = false; //static const Size2D STREAM_SIZE(1280, 960); //static const Size2D STREAM_SIZE(3440, 1440); //static const Size2D STREAM_SIZE(2448, 2058); //static const Size2D CAPTURE_SIZE(2448, 2058); static Size2D STREAM_SIZE(1280, 960); //2448, 2058); // static const Size2D STREAM_SIZE(640, 480); //2448, 2058); // static const Size2D STREAM_SIZE(816, 686); //2448, 2058); static Size2D CAPTURE_SIZE(1280, 960); //2448, 2058); // static const Size2D CAPTURE_SIZE(640, 480); //2448, 2058); // static const Size2D CAPTURE_SIZE(816, 686); //2448, 2058); //Don't use these numbers...but others might work well //static const Range EXPOSURE_TIME_RANGE(44000, 1000000); /* Globals */ UniqueObj g_cameraProvider; uint32_t g_stream_num = MAX_CAMERA_NUM; uint32_t g_frame_count = DEFAULT_FRAME_COUNT; bool g_renderscreen = false; struct timespec systemtime1, systemtime2, monotonic, monotonic_raw; uint32_t g_frame_number = 0; cv_bridge::CvImage img_bridge; std::vector img_pub; nv2ros::Publisher* publisher; std::vector camera_index_vector; std::vector camera_lookup_index_vector(MAX_CAMERA_NUM, false);; std::vector camera_index_to_ros_vector(MAX_CAMERA_NUM, false); std::vector camera_index_to_udp_vector(MAX_CAMERA_NUM, false); std::vector> gst_pipeline_vector; // adding here--------------------- std::vector camerainfo_pub; // ros::Publisher debug_pub; //camera_info_manager::CameraInfoManager camera_info_manager_; // ending here------------------------ /* Debug print macros */ #define PRODUCER_PRINT(...) printf("PRODUCER: " __VA_ARGS__) #define CONSUMER_PRINT(...) printf("CONSUMER: " __VA_ARGS__) #define JPEG_BUFFER_SIZE (CAPTURE_SIZE.area() * 3 / 2) namespace ArgusSamples { /* An utility class to hold all resources of one capture session */ class CaptureHolder : public Destructable { public: explicit CaptureHolder(); virtual ~CaptureHolder(); bool initialize(CameraDevice *device); CaptureSession *getSession() const { return m_captureSession.get(); } OutputStream *getStream() const { return m_outputStream.get(); } Request *getRequest() const { return m_request.get(); } virtual void destroy() { delete this; } private: UniqueObj m_captureSession; UniqueObj m_outputStream; UniqueObj m_request; }; CaptureHolder::CaptureHolder() { } CaptureHolder::~CaptureHolder() { /* Destroy the output stream */ m_outputStream.reset(); } bool CaptureHolder::initialize(CameraDevice *device) { ICameraProperties* props = interface_cast(device); std::vector modes; props->getAllSensorModes(&modes); for(int i = 0; i < modes.size(); i++){ ISensorMode* sensor_mode = interface_cast(modes[i]); ROS_INFO_STREAM("sensor mode " << i << ":"); ROS_INFO_STREAM("\tresolution: " << sensor_mode->getResolution().width() << " x " << sensor_mode->getResolution().height()); ROS_INFO_STREAM("\texposure time range: " << sensor_mode->getExposureTimeRange().min() << ", " << sensor_mode->getExposureTimeRange().max()); ROS_INFO_STREAM("\thdr ratio range: " << sensor_mode->getHdrRatioRange().min() << ", " << sensor_mode->getHdrRatioRange().max()); ROS_INFO_STREAM("\tframe duration range: " << sensor_mode->getFrameDurationRange().min() << ", " << sensor_mode->getFrameDurationRange().max()); ROS_INFO_STREAM("\tanalog gain range: " << sensor_mode->getAnalogGainRange().min() << ", " << sensor_mode->getAnalogGainRange().max()); ROS_INFO_STREAM("\tinput bit depth: " << sensor_mode->getInputBitDepth()); ROS_INFO_STREAM("\toutput bit depth: " << sensor_mode->getOutputBitDepth()); //ROS_INFO_STREAM("\tsensor mode type: " << sensor_mode->getSensorModeType()); //ROS_INFO_STREAM("\tbayer phase: " << sensor_mode->getBayerPhase()); if(i == selected_mode){ STREAM_SIZE = sensor_mode->getResolution(); CAPTURE_SIZE = sensor_mode->getResolution(); } } if(image_width > 0 && image_height > 0){ STREAM_SIZE = Size2D(image_width, image_height); CAPTURE_SIZE = Size2D(image_width, image_height); } ICameraProvider *iCameraProvider = interface_cast(g_cameraProvider); if (!iCameraProvider) ORIGINATE_ERROR("Failed to get ICameraProvider interface"); /* Create the capture session using the first device and get the core interface */ m_captureSession.reset(iCameraProvider->createCaptureSession(device)); ICaptureSession *iCaptureSession = interface_cast(m_captureSession); IEventProvider *iEventProvider = interface_cast(m_captureSession); if (!iCaptureSession || !iEventProvider) ORIGINATE_ERROR("Failed to create CaptureSession"); /* Create the OutputStream */ UniqueObj streamSettings( iCaptureSession->createOutputStreamSettings(STREAM_TYPE_EGL)); IEGLOutputStreamSettings *iEglStreamSettings = interface_cast(streamSettings); if (!iEglStreamSettings) ORIGINATE_ERROR("Failed to create EglOutputStreamSettings"); iEglStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888); //iEglStreamSettings->setEGLDisplay(g_renderer->getEGLDisplay()); iEglStreamSettings->setResolution(STREAM_SIZE); iEglStreamSettings->setMode(Argus::EGL_STREAM_MODE_MAILBOX); iEglStreamSettings->setMetadataEnable(true); m_outputStream.reset(iCaptureSession->createOutputStream(streamSettings.get())); /* Create capture request and enable the output stream */ m_request.reset(iCaptureSession->createRequest()); IRequest *iRequest = interface_cast(m_request); if (!iRequest) ORIGINATE_ERROR("Failed to create Request"); iRequest->enableOutputStream(m_outputStream.get()); ISourceSettings *iSourceSettings = interface_cast(iRequest->getSourceSettings()); if (!iSourceSettings) ORIGINATE_ERROR("Failed to get ISourceSettings interface"); if(selected_mode >= 0) iSourceSettings->setSensorMode(modes[selected_mode]); if(frame_duration >= 0) iSourceSettings->setFrameDurationRange(Range(frame_duration)); if(gain_min >= 0 && gain_max >= 0) iSourceSettings->setGainRange(Range(gain_min, gain_max)); if(exposure_time_min >= 0 && exposure_time_max >= 0) iSourceSettings->setExposureTimeRange(Range(exposure_time_min, exposure_time_max)); //iSourceSettings->setExposureTimeRange(EXPOSURE_TIME_RANGE); // denoise settings IDenoiseSettings *denoiseSettings = interface_cast(m_request); if (!denoiseSettings) ORIGINATE_ERROR("Failed to get DenoiseSettings interface"); if(denoise_mode == 0) denoiseSettings->setDenoiseMode(DENOISE_MODE_OFF); else if(denoise_mode == 1) denoiseSettings->setDenoiseMode(DENOISE_MODE_FAST); else if(denoise_mode == 2) denoiseSettings->setDenoiseMode(DENOISE_MODE_HIGH_QUALITY); if(denoise_strength >= 0.f) denoiseSettings->setDenoiseStrength(denoise_strength); // auto control settings IAutoControlSettings *autoControlSettings = interface_cast(iRequest->getAutoControlSettings()); if (!autoControlSettings) ORIGINATE_ERROR("Failed to get AutoControlSettings interface"); if(ae_top >= 0 && ae_bottom >= 0 && ae_left >= 0 && ae_right >= 0){ std::vector regions; AcRegion region(ae_left, ae_top, ae_right, ae_bottom, 1.f); regions.push_back(region); autoControlSettings->setAeRegions(regions); } if(exposure_compensation >= 0.f) autoControlSettings->setExposureCompensation(exposure_compensation); if(isp_digital_gain_min >= 0.f && isp_digital_gain_max >= 0.f) autoControlSettings->setIspDigitalGainRange(Range(isp_digital_gain_min, isp_digital_gain_max)); // edge enhance settings IEdgeEnhanceSettings *edgeEnhanceSettings = interface_cast(m_request); if (!edgeEnhanceSettings) ORIGINATE_ERROR("Failed to get EdgeEnhanceSettings interface"); if(edge_enhance_mode == 0) edgeEnhanceSettings->setEdgeEnhanceMode(EDGE_ENHANCE_MODE_OFF); else if(edge_enhance_mode == 1) edgeEnhanceSettings->setEdgeEnhanceMode(EDGE_ENHANCE_MODE_FAST); else if(edge_enhance_mode == 2) edgeEnhanceSettings->setEdgeEnhanceMode(EDGE_ENHANCE_MODE_HIGH_QUALITY); if(edge_enhance_strength >= 0.f) edgeEnhanceSettings->setEdgeEnhanceStrength(edge_enhance_strength); return true; } /* * Argus Consumer Thread: * This is the thread acquires buffers from each stream and composite them to * one frame. Finally it renders the composited frame through EGLRenderer. */ class ConsumerThread : public Thread { public: explicit ConsumerThread(std::vector &streams) : m_streams(streams), m_framesRemaining(g_frame_count), m_compositedFrame(0), //m_JpegEncoder(NULL), m_OutputBuffer(NULL) { } virtual ~ConsumerThread(); protected: /** @name Thread methods */ /**@{*/ virtual bool threadInitialize(); virtual bool threadExecute(); virtual bool threadShutdown(); /**@}*/ std::vector &m_streams; uint32_t m_framesRemaining; UniqueObj m_consumers[MAX_CAMERA_NUM]; int m_dmabufs[MAX_CAMERA_NUM]; NvBufferCompositeParams m_compositeParam; int m_compositedFrame; bool processV4L2Fd(int32_t fd, uint64_t frameNumber, uint64_t camNumber); bool processOpenCV(int32_t fd, uint64_t frameNumber, uint64_t camNumber, const ros::Time &camera_time); //NvJPEGEncoder *m_JpegEncoder; unsigned char *m_OutputBuffer; }; ConsumerThread::~ConsumerThread() { if (m_compositedFrame) NvBufferDestroy(m_compositedFrame); /*if (m_JpegEncoder) delete m_JpegEncoder; */ if (m_OutputBuffer) delete[] m_OutputBuffer; for (uint32_t i = 0; i < m_streams.size(); i++) if (m_dmabufs[i]) NvBufferDestroy(m_dmabufs[i]); } bool ConsumerThread::threadInitialize() { // Initialize buffer handles. Buffer will be created by FrameConsumer memset(m_dmabufs, 0, sizeof(m_dmabufs)); // Create the FrameConsumer for (uint32_t i = 0; i < m_streams.size(); i++) { m_consumers[i].reset(FrameConsumer::create(m_streams[i])); } m_OutputBuffer = new unsigned char[JPEG_BUFFER_SIZE]; if (!m_OutputBuffer) return false; /*m_JpegEncoder = NvJPEGEncoder::createJPEGEncoder("jpenenc"); if (!m_JpegEncoder) ORIGINATE_ERROR("Failed to create JPEGEncoder."); if (DO_STAT) m_JpegEncoder->enableProfiling(); */ return true; } bool ConsumerThread::threadExecute() { IEGLOutputStream *iEglOutputStreams[MAX_CAMERA_NUM]; IFrameConsumer *iFrameConsumers[MAX_CAMERA_NUM]; for (uint32_t i = 0; i < m_streams.size(); i++) { iEglOutputStreams[i] = interface_cast(m_streams[i]); iFrameConsumers[i] = interface_cast(m_consumers[i]); if (!iFrameConsumers[i]) ORIGINATE_ERROR("Failed to get IFrameConsumer interface"); /* Wait until the producer has connected to the stream */ CONSUMER_PRINT("Waiting until producer is connected...\n"); if (iEglOutputStreams[i]->waitUntilConnected() != STATUS_OK) ORIGINATE_ERROR("Stream failed to connect."); CONSUMER_PRINT("Producer has connected; continuing.\n"); } //while (m_framesRemaining--) NV::IImageNativeBuffer *image_buffer[6]; UniqueObj frame_buffer[6]; auto stream_size = m_streams.size(); ros::Rate r(50.0); uint64_t system_nano, system_nano2, mono_raw_nano, argus_offset_ns; int64_t offset_ros_to_monoraw; while (ros::ok()) { //ROS_INFO_STREAM("print: " << print_time_debug); r.sleep(); g_frame_number += 1; //printf("Start of frame %u\n", g_frame_number); for (uint32_t i = 0; i < camera_index_vector.size(); i++) { //Status status; /* Acquire a frame */ frame_buffer[i] = UniqueObj(iFrameConsumers[i]->acquireFrame()); IFrame *iFrame = interface_cast