Hi Jerry,
I looked over syncStereo
, but did not see a major difference in how the capture session is being created.
Would you mind looking at our implementation to see if something has been done incorrectly?
Here is a snippet of how we are setting up our capture session currently:
auto *camera_provider = GetCameraProvider();
if (!camera_provider)
{
std::fprintf(stderr, "Failed to get ICameraProvider interface\n");
return false;
}
std::vector<Argus::CameraDevice *> camera_devices;
camera_provider->getCameraDevices(&camera_devices);
auto max_camera_index = *std::max_element(indices.begin(), indices.end());
if (camera_devices.size() < max_camera_index)
{
std::fprintf(stderr, "Found %lu cameras which is less than specified camera index %u\n", camera_devices.size(), max_camera_index);
return false;
}
std::vector<Argus::CameraDevice *> devices;
std::vector<std::pair<Argus::CameraDevice *, uint32_t>> device_index_pairs;
for (auto camera_index : indices)
{
auto *device = camera_devices[camera_index];
devices.push_back(device);
device_index_pairs.push_back(std::pair<Argus::CameraDevice *, uint32_t>(device, camera_index));
}
// Begin a capture session for the selected devices.
Argus::Status capture_status = {};
Argus::UniqueObj<Argus::CaptureSession> capture_session(camera_provider->createCaptureSession(devices, &capture_status));
switch (capture_status)
{
case Argus::STATUS_OK:
break;
case Argus::STATUS_UNAVAILABLE:
std::fprintf(stderr, "A is camera already in use\n");
return false;
default:
std::fprintf(stderr, "Could not capture cameras: %u\n", (uint32_t)capture_status);
return false;
}
Argus::ICaptureSession *i_capture_session = Argus::interface_cast<Argus::ICaptureSession>(capture_session);
if (!capture_session)
{
std::fprintf(stderr, "Failed to create capture session\n");
return false;
}
auto stream_settings = i_capture_session->createOutputStreamSettings(Argus::STREAM_TYPE_BUFFER);
auto i_stream_settings = Argus::interface_cast<Argus::IOutputStreamSettings>(stream_settings);
auto buffer_stream_settings = Argus::interface_cast<Argus::IBufferOutputStreamSettings>(stream_settings);
if (!stream_settings)
{
std::fprintf(stderr, "Failed to create output stream settings");
return false;
}
buffer_stream_settings->setBufferType(Argus::BUFFER_TYPE_EGL_IMAGE);
buffer_stream_settings->setMetadataEnable(true);
for (auto [device, index] : device_index_pairs)
{
auto *camera_properties = Argus::interface_cast<Argus::ICameraProperties>(device);
if (!camera_properties)
{
std::fprintf(stderr, "Failed to get properties of camera %u\n", index);
return false;
}
std::vector<Argus::SensorMode *> modes;
if (camera_properties->getAllSensorModes(&modes) || modes.empty())
{
std::fprintf(stderr, "Failed to get supported modes of camera %u\n", index);
return false;
}
auto *default_mode = Argus::interface_cast<Argus::ISensorMode>(modes[0]);
if (!default_mode)
{
std::fprintf(stderr, "Failed to get default mode of camera %u\n", index);
return false;
}
auto camera = std::make_unique<Camera::Impl>();
camera->resolution = default_mode->getResolution();
camera->output_stream_settings.reset(stream_settings);
i_stream_settings->setCameraDevice(device);
camera->output_stream.reset(i_capture_session->createOutputStream(camera->output_stream_settings.get()));
auto output_stream = Argus::interface_cast<Argus::IBufferOutputStream>(camera->output_stream);
if (!output_stream)
{
std::fprintf(stderr, "Failed to create output stream for camera %u\n", index);
return false;
}
auto egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
Argus::UniqueObj<Argus::BufferSettings> buffer_settings_owned(output_stream->createBufferSettings());
auto *buffer_settings = Argus::interface_cast<Argus::IEGLImageBufferSettings>(buffer_settings_owned);
if (!buffer_settings)
{
std::fprintf(stderr, "Failed to create buffer settings for camera %u\n", index);
return false;
}
camera->frames.reserve(kCaptureQueueDepth);
for (int i = 0; i < kCaptureQueueDepth; i++)
{
auto maybe_dmabuf = DmaBuffer::create(camera->resolution);
if (!maybe_dmabuf)
{
std::fprintf(stderr, "Failed to allocate dmabuf %d for camera %u\n", i, index);
return false;
}
auto maybe_egl_image = EglImage::create(egl_display, *maybe_dmabuf);
if (!maybe_egl_image)
{
std::fprintf(stderr, "Failed to allocate egl image %d for camera %u\n", i, index);
return false;
}
buffer_settings->setEGLImage(maybe_egl_image->image());
buffer_settings->setEGLDisplay(egl_display);
Argus::UniqueObj<Argus::Buffer> argus_buffer_owned(output_stream->createBuffer(buffer_settings_owned.get()));
auto frame_impl = std::make_unique<Frame::Impl>(
std::move(*maybe_dmabuf), std::move(*maybe_egl_image), std::move(argus_buffer_owned));
camera->frames.emplace_back(std::move(frame_impl));
auto &frame = camera->frames.back();
auto *buffer = Argus::interface_cast<Argus::IBuffer>(frame.argus_buffer());
buffer->setClientData(&frame);
if (output_stream->releaseBuffer(frame.argus_buffer()))
{
std::fprintf(stderr, "Allocated buffer was not accepted for capture on camera %u\n", index);
return false;
}
}
camera->capture_request.reset(i_capture_session->createRequest());
auto *request = Argus::interface_cast<Argus::IRequest>(camera->capture_request);
if (!request)
{
std::fprintf(stderr, "Failed to create capture request for camera %u\n", index);
return false;
}
auto *source_settings = Argus::interface_cast<Argus::ISourceSettings>(request->getSourceSettings());
if (!source_settings)
{
std::fprintf(stderr, "Failed to access source settings for camera %u\n", index);
return false;
}
source_settings->setFrameDurationRange(Argus::Range<uint64_t>(1e9 / settings.framerate_fps));
source_settings->setExposureTimeRange({settings.min_exposure_time_ns, settings.max_exposure_time_ns});
request->enableOutputStream(camera->output_stream.get());
if (i_capture_session->repeat(camera->capture_request.get()) != Argus::STATUS_OK)
{
std::fprintf(stderr, "Failed to start capture on camera %u\n", camera->camera_index);
return false;
}
I can’t include all of the source code unfortunately. For context, camera
is a type we are using to store capture session info for each device:
truct Camera::Impl final
{
Argus::UniqueObj<Argus::CaptureSession> capture_session{};
Argus::UniqueObj<Argus::OutputStreamSettings> output_stream_settings{};
Argus::UniqueObj<Argus::OutputStream> output_stream{};
Argus::UniqueObj<Argus::Request> capture_request{};
std::vector<Frame> frames{};
Argus::Size2D<uint32_t> resolution{};
uint32_t camera_index = 0;
};
There is some issues with this current implementation related to ownership of the capture_session
now that there is only one (we previously used two), but the code is functional enough to compile and run it. It encounters the errors mentioned in the first post.