Over laying mlx90640 (Thermal Camera) and Pi camera module V2 (Color Camera) on jetson nano

I am using following Repository

While overlaying both cameras I get following error

Traceback (most recent call last):
File “2_cam_overlay_mlx.py”, line 89, in <module>
cv2.addWeighted(frame, alpha, tframe, 1-alpha, 0, frame)
cv2.error: OpenCV(4.5.5) /io/opencv/modules/core/src/arithm.cpp:647: error: (-209:Sizes of input arguments do not match) The operation is neither ‘array op array’ (where arrays have the same size and the same number of channels), nor ‘array op scalar’, nor ‘scalar op array’ in function ‘arithm_op’

Exiting…
Cleaning up pins

Overlaying code is

# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
import argparse
import imutils
import time
import cv2

import time,board,busio
import adafruit_mlx90640
import datetime as dt

def gstreamer_pipeline(
    #capture_width=3280,
    #capture_height=2464,
    capture_width=1640,
    capture_height=1232,
    display_width=640,
    display_height=480,
    framerate=29.999999,
    flip_method=6,
):
    return (
        "nvarguscamerasrc ! "
        "video/x-raw(memory:NVMM), "
        "width=(int)%d, height=(int)%d, "
        "format=(string)NV12, framerate=(fraction)%d/1 ! "
        "nvvidconv flip-method=%d ! "
        "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
        "videoconvert ! "
        "video/x-raw, format=(string)BGR ! appsink"
        % (
            capture_width,
            capture_height,
            framerate,
            flip_method,
            display_width,
            display_height,
        )
    )

i2c = busio.I2C(board.SCL, board.SDA, frequency=400000) # setup I2C
mlx = adafruit_mlx90640.MLX90640(i2c) # begin MLX90640 with I2C comm
mlx.refresh_rate = adafruit_mlx90640.RefreshRate.REFRESH_8_HZ # 16Hz max

mlx_shape = (24,32)
tdata = np.zeros((24*32,))
alpha = 0.5
tframe = np.reshape(np.zeros((480*640,)), (480,640))

def td_to_img(f,tmax,tmin):
    norm = np.uint8((f - tmin)*255/(tmax-tmin))
    return norm

def tframe2Que(outputQueue):
    while True:
        mlx.getFrame(tdata) # read MLX temperatures into frame var
        t_img = (np.reshape(tdata,mlx_shape)) # reshape to 24x32
        tmax = tdata.max()
        tmin = tdata.min()
        ta_img = td_to_img(t_img, tmax, tmin)
        # Image processing
        img = cv2.applyColorMap(ta_img, cv2.COLORMAP_JET) # cv2.COLORMAP_HSV
        img = cv2.resize(img, (640,480), interpolation = cv2.INTER_CUBIC)
        # img = cv2.flip(img, 1)
        outputQueue.put(img)

print("[INFO] starting process...")
outputQueue = Queue(maxsize=1)
p = Process(target=tframe2Que, args=(outputQueue,))
p.daemon = True
p.start()

print("[INFO] starting video stream...")
cap = cv2.VideoCapture(gstreamer_pipeline(), cv2.CAP_GSTREAMER)
time.sleep(2.0) # allow the camera sensor to warm up for 2 seconds

# loop over the frames from the video stream
while True:
    ret, frame = cap.read()
    # if the output queue *is not* empty, grab the detections
    if not outputQueue.empty():
        tframe = outputQueue.get()
    if len(tframe.shape) < 3: continue
    # show the output frame
    cv2.addWeighted(frame, alpha, tframe, 1-alpha, 0, frame)
    cv2.imshow('FaceTemp', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# stop the timer and display FPS information
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

# do a bit of cleanup
cap.release()
cv2.destroyAllWindows()

To run Solo Thermal Camera

#!/usr/bin/python3
##################################

import time,board,busio
import numpy as np
import adafruit_mlx90640
import datetime as dt
import cv2

i2c = busio.I2C(board.SCL, board.SDA, frequency=400000) # setup I2C
mlx = adafruit_mlx90640.MLX90640(i2c) # begin MLX90640 with I2C comm
mlx.refresh_rate = adafruit_mlx90640.RefreshRate.REFRESH_8_HZ # 16Hz is noisy

mlx_shape = (24,32)
tframe = np.zeros((24*32,)) # setup array for storing all 768 temperatures

def td_to_img(f,tmax,tmin):
    norm = np.uint8((f - tmin)*255/(tmax-tmin))
    return norm

time.sleep(2)
t0 = time.time()

try:
    while True:
        # waiting for data frame
        mlx.getFrame(tframe) # read MLX temperatures into frame var
        t_img = (np.reshape(tframe,mlx_shape)) # reshape to 24x32
        tmax = tframe.max()
        tmin = tframe.min()
        ta_img = td_to_img(t_img, tmax, tmin)
        # np.fliplr(ta_img)

        # Image processing
        img = cv2.applyColorMap(ta_img, cv2.COLORMAP_JET)
        img = cv2.resize(img, (640,480), interpolation = cv2.INTER_CUBIC)
        # img = cv2.flip(img, 1)

        text = 'Tmin = {:+.1f} Tmax = {:+.1f} FPS = {:.2f}'.format(tmin, tmax, 1/(time.time() - t0))
        cv2.putText(img, text, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 0), 1)
        cv2.imshow('Thermal Image', img)

        # if 's' is pressed - saving of picture
        key = cv2.waitKey(1) & 0xFF
        if key == ord("s"):
            fname = 'pic_' + dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '.jpg'
            cv2.imwrite(fname, img)
            print('Saving image ', fname)
        if key == ord("q"):
            break
        t0 = time.time()

except KeyboardInterrupt:
    # to terminate the cycle
    cv2.destroyAllWindows()
    print(' Stopped')

cv2.destroyAllWindows()

I am using python 3.8 on jetpack 4.6.1 on jetson nano 2GB kit

hello waseemsofficial,

could you please check developer guide, Approaches for Validating and Testing the V4L2 Driver.
please verify the basic camera stream, are you able to access the camera stream with the standard v4l2 ioctl utility?

Thanks for the reply, Please bear with me, There is some electricity issue at lab of my University. I shall update ASAP. Please do not close this thread.
Thanks in anticipation.

waseemsofficial@Waseems-AI:~$ gst-launch-1.0 nvarguscamerasrc num-buffers=200 ! 'video/x-raw(memory:NVMM),width=1920, height=1080, framerate=30/1, format=NV12' ! omxh264enc ! qtmux ! filesink location=test.mp4 -e
Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock
Framerate set to : 30 at NvxVideoEncoderSetParameterNvMMLiteOpen : Block : BlockType = 4 
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4 
H264: Profile = 66, Level = 40 
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: Running with following settings:
   Camera index = 0 
   Camera mode  = 2 
   Output Stream W = 1920 H = 1080 
   seconds to Run    = 0 
   Frame Rate = 29.999999 
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
Got EOS from element "pipeline0".
Execution ended after 0:00:07.436499740
Setting pipeline to PAUSED ...
Setting pipeline to READY ...
GST_ARGUS: Cleaning up
CONSUMER: Done Success
GST_ARGUS: Done Success
Setting pipeline to NULL ...
Freeing pipeline ...
waseemsofficial@Waseems-AI:~$ gst-launch-1.0 nvarguscamerasrc num-buffers=200 ! 'video/x-raw(memory:NVMM),width=1920, height=1080, framerate=30/1, format=NV12' ! omxh264enc ! qtmux ! filesink location=test.mp4 -e
Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock
Framerate set to : 30 at NvxVideoEncoderSetParameterNvMMLiteOpen : Block : BlockType = 4 
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4 
H264: Profile = 66, Level = 40 
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: Running with following settings:
   Camera index = 0 
   Camera mode  = 2 
   Output Stream W = 1920 H = 1080 
   seconds to Run    = 0 
   Frame Rate = 29.999999 
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
Got EOS from element "pipeline0".
Execution ended after 0:00:07.009904900
Setting pipeline to PAUSED ...
Setting pipeline to READY ...
GST_ARGUS: Cleaning up
CONSUMER: Done Success
GST_ARGUS: Done Success
Setting pipeline to NULL ...
Freeing pipeline ...
waseemsofficial@Waseems-AI:~$ gst-launch-1.0 nvarguscamerasrc ! ‘video/x-raw(memory:NVMM), width=1920, height=1080, format=(string)NV12, framerate=(fraction)30/1' ! nvoverlaysink -e
bash: syntax error near unexpected token `('
waseemsofficial@Waseems-AI:~$ gst-launch-1.0 v4l2src num-buffers=200 device=/dev/video0 ! 'video/x-raw, format=YUY2, width=640, height=480, framerate=30/1' ! videoconvert ! omxh264enc ! qtmux ! filesink location=test.mp4 -ev
Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock
ERROR: from element /GstPipeline:pipeline0/GstV4l2Src:v4l2src0: Internal data stream error.
Additional debug info:
gstbasesrc.c(3055): gst_base_src_loop (): /GstPipeline:pipeline0/GstV4l2Src:v4l2src0:
streaming stopped, reason not-negotiated (-4)
EOS on shutdown enabled -- waiting for EOS after Error
Waiting for EOS...
/GstPipeline:pipeline0/GstQTMux:qtmux0.GstPad:src: caps = video/quicktime, variant=(string)apple
/GstPipeline:pipeline0/GstFileSink:filesink0.GstPad:sink: caps = video/quicktime, variant=(string)apple
^Chandling interrupt.
Interrupt: Stopping pipeline ...
Interrupt while waiting for EOS - stopping pipeline...
Execution ended after 0:00:28.674247620
Setting pipeline to PAUSED ...
Setting pipeline to READY ...
Setting pipeline to NULL ...
Freeing pipeline ...
waseemsofficial@Waseems-AI:~$ v4l2-ctl --set-fmt-video=width=1920,height=1080,pixelformat=RG10 --stream-mmap --stream-count=1 -d /dev/video0 --stream-to=ov5693.raw
bash: v4l2-ctl: command not found

I tried all command to verify V4L2 driver but my camera didnt start.
Another thing When I use python command to run RPi camera it starts, I am using following Repository GitHub - JetsonHacksNano/CSI-Camera: Simple example of using a CSI-Camera (like the Raspberry Pi Version 2 camera) with the NVIDIA Jetson Developer Kit

But when I run Python3 command it says unable to open camera.

hello waseemsofficial,

please download v4l2 ioctl utility via apt server. i.e. $ sudo apt-get install v4l-utils

sudo python3 2_cam_overlay_mlx.py
[INFO] starting process...
[INFO] starting video stream...
Traceback (most recent call last):
  File "2_cam_overlay_mlx.py", line 89, in <module>
    cv2.addWeighted(frame, alpha, tframe, 1-alpha, 0, frame)
cv2.error: OpenCV(4.5.5) /io/opencv/modules/core/src/arithm.cpp:647: error: (-209:Sizes of input arguments do not match) The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array' in function 'arithm_op'

Exiting... 
Cleaning up pins

after installing I have same error (shown above) while doing overlay.

the CSI camera works with python command as

waseemsofficial@Waseems-AI:~/CSI-Camera$ python simple_camera.py
nvarguscamerasrc sensor-id=0 !video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)960, height=(int)540, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected...
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 3264 x 2464 FR = 21.000000 fps Duration = 47619048 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 3264 x 1848 FR = 28.000001 fps Duration = 35714284 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1920 x 1080 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1640 x 1232 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 59.999999 fps Duration = 16666667 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 10.625000; Exposure Range min 13000, max 683709000;

GST_ARGUS: Running with following settings:
   Camera index = 0 
   Camera mode  = 2 
   Output Stream W = 1920 H = 1080 
   seconds to Run    = 0 
   Frame Rate = 29.999999 
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
[ WARN:0] global /home/nvidia/host/build_opencv/nv_opencv/modules/videoio/src/cap_gstreamer.cpp (933) open OpenCV | GStreamer warning: Cannot query video position: status=0, value=-1, duration=-1
^CGST_ARGUS: Cleaning up
CONSUMER: Done Success
GST_ARGUS: Done Success
Traceback (most recent call last):
  File "simple_camera.py", line 75, in <module>
    show_camera()
  File "simple_camera.py", line 63, in show_camera
    keyCode = cv2.waitKey(10) & 0xFF
KeyboardInterrupt

with python3 it doesnt work as

waseemsofficial@Waseems-AI:~/CSI-Camera$ python3 simple_camera.py
nvarguscamerasrc sensor-id=0 !video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, framerate=(fraction)30/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)960, height=(int)540, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink
Error: Unable to open camera

please narrow down the issue by using general v4l2 ioctl utility, you should check whether you’ve got correct sensor streaming.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.