Hi,
I made a camera class based on Argus/SyncSensor example.
The design is as below because of the existing other implementations.
In the input stage of the video pipeline, it uses the camera class.
and the camera class has initilisation and read method.
- Initiliazasation of two camera
- Frame reading
→ Read Thread 1
→ Read Thread 2
In 1. Initiliazasation of two camera, it makes capture session, initCUDA, and connec the cuda egl stream etc.
ex) in SyncSensor - ArgusSamples>execute
...
// Initialize EGL.
PROPAGATE_ERROR(g_display.initialize(NULL));
// Initialize the Argus camera provider.
UniqueObj<CameraProvider> cameraProvider(CameraProvider::create());
// Get the ICameraProvider interface from the global CameraProvider.
ICameraProvider *iCameraProvider = interface_cast<ICameraProvider>(cameraProvider);
if (!iCameraProvider)
ORIGINATE_ERROR("Failed to get ICameraProvider interface");
printf("Argus Version: %s\n", iCameraProvider->getVersion().c_str());
// Get the camera devices.
std::vector<CameraDevice*> cameraDevices;
iCameraProvider->getCameraDevices(&cameraDevices);
if (cameraDevices.size() < 2)
ORIGINATE_ERROR("Must have at least 2 sensors available");
std::vector <CameraDevice*> lrCameras;
lrCameras.push_back(cameraDevices[0]); // Left Camera (the 1st camera will be used for AC)
lrCameras.push_back(cameraDevices[1]); // Right Camera
// Create the capture session, AutoControl will be based on what the 1st device sees.
UniqueObj<CaptureSession> captureSession(iCameraProvider->createCaptureSession(lrCameras));
ICaptureSession *iCaptureSession = interface_cast<ICaptureSession>(captureSession);
if (!iCaptureSession)
ORIGINATE_ERROR("Failed to get capture session interface");
// Create stream settings object and set settings common to both streams.
UniqueObj<OutputStreamSettings> streamSettings(
iCaptureSession->createOutputStreamSettings(STREAM_TYPE_EGL));
IOutputStreamSettings *iStreamSettings = interface_cast<IOutputStreamSettings>(streamSettings);
IEGLOutputStreamSettings *iEGLStreamSettings =
interface_cast<IEGLOutputStreamSettings>(streamSettings);
if (!iStreamSettings || !iEGLStreamSettings)
ORIGINATE_ERROR("Failed to create OutputStreamSettings");
iEGLStreamSettings->setPixelFormat(PIXEL_FMT_YCbCr_420_888);
iEGLStreamSettings->setResolution(STREAM_SIZE);
iEGLStreamSettings->setEGLDisplay(g_display.get());
// Create egl streams
PRODUCER_PRINT("Creating left stream.\n");
iStreamSettings->setCameraDevice(lrCameras[0]);
UniqueObj<OutputStream> streamLeft(iCaptureSession->createOutputStream(streamSettings.get()));
IEGLOutputStream *iStreamLeft = interface_cast<IEGLOutputStream>(streamLeft);
if (!iStreamLeft)
ORIGINATE_ERROR("Failed to create left stream");
PRODUCER_PRINT("Creating right stream.\n");
iStreamSettings->setCameraDevice(lrCameras[1]);
UniqueObj<OutputStream> streamRight(iCaptureSession->createOutputStream(streamSettings.get()));
IEGLOutputStream *iStreamRight = interface_cast<IEGLOutputStream>(streamRight);
if (!iStreamRight)
ORIGINATE_ERROR("Failed to create right stream");
PRODUCER_PRINT("Launching disparity checking consumer\n");
StereoDisparityConsumerThread disparityConsumer(iStreamLeft, iStreamRight);
PROPAGATE_ERROR(disparityConsumer.initialize());
PROPAGATE_ERROR(disparityConsumer.waitRunning());
// Create a request
UniqueObj<Request> request(iCaptureSession->createRequest());
IRequest *iRequest = interface_cast<IRequest>(request);
if (!iRequest)
ORIGINATE_ERROR("Failed to create Request");
PRODUCER_PRINT("Launching disparity checking consumer\n");
////////here I use initCUDA before the stream connection///////////
CUcontext g_cudaContext = 0;
PROPAGATE_ERROR(initCUDA(&g_cudaContext));
// and then, connect the stream using cuEGLStreamConsumerConnect
//
...
In 2. Frame reading, it spawn two threads which loops the frame grabbing as below.
for (int i = 0; i < m_camera_count; i++)
m_input_thread[i] = std::thread(&CameraStage::read, this, i);
read method will loop the frame grabbing function of the camera object
///t_index is the camera index; 0 or 1
CUresult cuResult;
// printf("Acquiring an image from the EGLStream\n");
CUgraphicsResource cudaResource = 0;
CUstream cudaStream = 0;
CUresult status;
cudaFree(0);
cuResult = cuEGLStreamConsumerAcquireFrame(&m_cudaConnections[t_index], &cudaResource,
&cudaStream, -1);
if (cuResult != CUDA_SUCCESS) {
REPORT_ERROR("Unable to acquire an image frame from the EGLStream with CUDA as a "
"consumer (CUresult %s)",
getCudaErrorString(cuResult));
}
// Get the CUDA EGL frame.
CUeglFrame cudaEGLFrame;
cuResult = cuGraphicsResourceGetMappedEglFrame(&cudaEGLFrame, cudaResource, 0, 0);
if (cuResult != CUDA_SUCCESS) {
REPORT_ERROR("Unable to get the CUDA EGL frame (CUresult %s).",
getCudaErrorString(cuResult));
}
BUT, here when it trys cuEGLStreamConsumerAcquireFrame in these looping two threads, it gives error as below.
invalid device context, 201
when I just call one frame reading loop, it seems it reads the frame properly.
should I somehow use multiple CUcontext g_cudaContext and CUstream cudaStream ?
Thank you.