Jetson shuts down when running VPI stereo disparity

Hello Everyone, I’ve been trying to use the VPI stereo disparity algorithm with a luxonis oak-d lite. I’m using the camera’s sdk and opencv to access the stereo output.

When executing the code, the disparity, left frame, right frame is shown for 3-5 seconds and the jetson(Xavier NX with a custom carrier board) completely shuts down. Although the example on VPI’s site works just fine.

This is my first time using VPI so any help regarding this is appreciated :)

I’ve attached the code below:

#include <opencv2/core/version.hpp>
#include <depthai/depthai.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vpi/OpenCVInterop.hpp>

#include <vpi/Image.h>
#include <vpi/Status.h>
#include <vpi/Stream.h>
#include <vpi/algo/ConvertImageFormat.h>
#include <vpi/algo/Rescale.h>
#include <vpi/algo/StereoDisparity.h>

#include <cstring>
#include <iostream>
#include <sstream>

//Error Handling 
#define CHECK_STATUS(STMT)                                    \
    do                                                        \
    {                                                         \
        VPIStatus status = (STMT);                            \
        if (status != VPI_SUCCESS)                            \
        {                                                     \
            char buffer[VPI_MAX_STATUS_MESSAGE_LENGTH];       \
            vpiGetLastStatusMessage(buffer, sizeof(buffer));  \
            std::ostringstream ss;                            \
            ss << vpiStatusGetName(status) << ": " << buffer; \
            throw std::runtime_error(ss.str());               \
        }                                                     \
    } while (0);

int main(int, char **)
{   
    cv::Mat frameLeft, frameRight;
    dai::Pipeline pipeline;

    VPIImage inFrameLeft = NULL;
    VPIImage inFrameRight = NULL;
    VPIImage tmpLeft = NULL;
    VPIImage tmpRight = NULL;
    VPIImage stereoLeft = NULL;
    VPIImage stereoRight = NULL;
    VPIImage disparity = NULL;
    VPIImage confidenceMap = NULL;
    VPIStream stream = NULL;
    VPIPayload stereo = NULL;

    //configure depthai 
    auto monoLeft = pipeline.create<dai::node::MonoCamera>();
    auto monoRight = pipeline.create<dai::node::MonoCamera>();

    auto xLinkOutLeft = pipeline.create<dai::node::XLinkOut>();
    auto xLinkOutRight = pipeline.create<dai::node::XLinkOut>();

    xLinkOutLeft->setStreamName("left");
    xLinkOutRight->setStreamName("right");

    monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_480_P);
    monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT);

    monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_480_P);
    monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT);

    monoLeft->out.link(xLinkOutLeft->input);
    monoRight->out.link(xLinkOutRight->input);

    dai::Device device(pipeline);

    auto leftQueue = device.getOutputQueue("left", 8, false);
    auto rightQueue = device.getOutputQueue("right", 8, false);

    //proc backend
    uint64_t backend = VPI_BACKEND_CUDA;
    
    //input frame dimensions
    int32_t inputWidth = 640;
    int32_t inputHeight = 480;

    //init vpi stream
    CHECK_STATUS(vpiStreamCreate(0, &stream));
    
    //conversion params
    VPIConvertImageFormatParams convParams;
    CHECK_STATUS(vpiInitConvertImageFormatParams(&convParams));    

    //disparity params
    VPIStereoDisparityEstimatorCreationParams stereoParams;
    CHECK_STATUS(vpiInitStereoDisparityEstimatorCreationParams(&stereoParams));
    stereoParams.maxDisparity = 64;

    //in-out format
    VPIImageFormat stereoFormat = VPI_IMAGE_FORMAT_Y16_ER;
    VPIImageFormat disparityFormat = VPI_IMAGE_FORMAT_U16;

    int stereoWidth = inputWidth;
    int stereoHeight = inputHeight;
    int outputWidth = inputWidth;
    int outputHeight = inputHeight;

    CHECK_STATUS(vpiCreateStereoDisparityEstimator(VPI_BACKEND_CUDA, stereoWidth, stereoHeight, stereoFormat, &stereoParams,
                                                        &stereo));

    CHECK_STATUS(vpiImageCreate(outputWidth, outputHeight, disparityFormat, 0, &disparity));

    CHECK_STATUS(vpiImageCreate(inputWidth, inputHeight, VPI_IMAGE_FORMAT_U16, 0, &confidenceMap));


    while (true)
    {
        //read left cam frames
        auto inLeft = leftQueue->get<dai::ImgFrame>();
        frameLeft = inLeft->getFrame();
        cv::imshow("Left", frameLeft);

        //read right cam frames
        auto inRight = rightQueue->get<dai::ImgFrame>();
        frameRight = inRight->getFrame();
        cv::imshow("Right", frameRight);

        //vpi frame wrap
        CHECK_STATUS(vpiImageCreateOpenCVMatWrapper(frameLeft, 0, &inFrameLeft));
        CHECK_STATUS(vpiImageCreateOpenCVMatWrapper(frameRight, 0, &inFrameRight));

        //input stereo images
        CHECK_STATUS(vpiImageCreate(stereoWidth, stereoHeight, stereoFormat, 0, &stereoLeft));
        CHECK_STATUS(vpiImageCreate(stereoWidth, stereoHeight, stereoFormat, 0, &stereoRight));

        CHECK_STATUS(vpiSubmitConvertImageFormat(stream, VPI_BACKEND_CUDA, inFrameLeft, stereoLeft, &convParams));
        CHECK_STATUS(vpiSubmitConvertImageFormat(stream, VPI_BACKEND_CUDA, inFrameRight, stereoRight, &convParams));

        //submit input and output
        CHECK_STATUS(vpiSubmitStereoDisparityEstimator(stream, VPI_BACKEND_CUDA, stereo, stereoLeft, stereoRight, disparity,
                                                    confidenceMap, NULL));

        // Wait until the algorithm finishes processing
        CHECK_STATUS(vpiStreamSync(stream));

        VPIImageData data;
        CHECK_STATUS(vpiImageLock(disparity, VPI_LOCK_READ, &data));

        //opencv matrix
        cv::Mat cvDisparity;
        CHECK_STATUS(vpiImageDataExportOpenCVMat(data, &cvDisparity));

        //scale result from 0-255.
        cvDisparity.convertTo(cvDisparity, CV_8UC1, 255.0 / (32 * stereoParams.maxDisparity), 0);

        //colormap
        cv::Mat cvDisparityColor;
        applyColorMap(cvDisparity, cvDisparityColor, cv::COLORMAP_JET);

        CHECK_STATUS(vpiImageUnlock(disparity));

        cv::imshow("Disparity", cvDisparityColor);

        int key = cv::waitKey(1);

        if (key == 'q' || key == 'Q')
        {
            return 0;
        }
    }
    vpiStreamDestroy(stream);
    vpiImageDestroy(inFrameLeft);
    vpiImageDestroy(inFrameRight);
    vpiImageDestroy(tmpLeft);
    vpiImageDestroy(tmpRight);
    vpiImageDestroy(stereoLeft);
    vpiImageDestroy(stereoRight);
    vpiImageDestroy(confidenceMap);
    vpiImageDestroy(disparity);
    vpiPayloadDestroy(stereo);

    return 0;
}

Hi,

Could you check the value of the following command?

$ cat /sys/kernel/pmc/tegra_reset_reason

Thanks.

Thanks, I ran the command and the output is “TEGRA_POWER_ON_RESET”

I think the shutdown is due to the current consumption because the code works just fine on my Desktop. I’m using a 12V 2A power supply. After some calculations I’ve found that the xavier consumes 1.6A and the camera consumes around 0.8 to 1A. So seems like the power supply is the bottle neck.

Hi,

An alternative is to set the nvpmodel to energy-efficient mode.
So the power consumption should decrease and avoid the shutdown.

Supported Modes and Power Efficiency

Thanks.

I set the power mode to 10 watt 4 core and the code is working fine without any shutdowns but the algorithm is still stuttering. I think it’s due to the power bottle neck. I’ve ordered a new power supply, I’ll let you know once I test it.

I’m closing this topic due to there is no update from you for a period, assuming this issue was resolved.
If still need the support, please open a new topic. Thanks

Is this still an issue to support? Any result can be shared? Thanks