Hi,
I’m trying the latest 4.6 with VPI.
The sample code for stereo depth estimation runs fine without issue.
However, I am having difficulties running the stereo estimation program in a thread.
I simply put the python sample code into a thread function like the following:
import cv2
import vpi
import numpy as np
import threading
from PIL import Image
class StereoDepth():
def __init__(self):
self.process = threading.Thread(target=self.depth_estimate, args=())
self.neglect_range = 0
self.process.start()
self.process.join()
def depth_estimate(self):
backend = 'cuda' # cpu, cuda, pva, pva-nvenc-vic
left = 'chair_stereo_left.png'
right = 'chair_stereo_right.png'
# scaling factor when loading input
scale = 1
backend = vpi.Backend.CUDA
# --------------------------------------------------------------
# Load input into a vpi.Image and convert it to grayscale, 16bpp
with vpi.Backend.CUDA:
left = vpi.asimage(np.asarray(Image.open(left))).convert(vpi.Format.Y16_ER, scale=scale)
right = vpi.asimage(np.asarray(Image.open(right))).convert(vpi.Format.Y16_ER, scale=scale)
# --------------------------------------------------------------
# Preprocess input
maxDisparity = 64
confidenceMap = vpi.Image(left.size, vpi.Format.U16)
disparity = vpi.stereodisp(left, right, out_confmap=confidenceMap, backend=backend, window=5,
maxdisp=maxDisparity)
# --------------------------------------
# Postprocess results prior saving them to disk
with vpi.Backend.CUDA:
# Scale disparity and confidence map so that values like between 0 and 255.
# Disparities are in Q10.5 format, so to map it to float, it gets
# divided by 32. Then the resulting disparity range, from 0 to
# stereo.maxDisparity gets mapped to 0-255 for proper output.
disparity = disparity.convert(vpi.Format.U8, scale=255.0 / (32 * maxDisparity))
disparityColor = cv2.applyColorMap(disparity.cpu(), cv2.COLORMAP_JET)
# Converts to RGB for output with PIL
# disparityColor = cv2.cvtColor(disparityColor, cv2.COLOR_BGR2RGB)
if confidenceMap:
confidenceMap = confidenceMap.convert(vpi.Format.U8, scale=255.0 / 65535)
# When pixel confidence is 0, its color in the disparity
# output is black.
mask = cv2.threshold(confidenceMap.cpu(), 1, 255, cv2.THRESH_BINARY)[1]
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
disparityColor = cv2.bitwise_and(disparityColor, mask)
return
a = StereoDepth()
And it gives me the following error:
line 48, in depth_estimate
disparityColor = cv2.applyColorMap(disparity.cpu(), cv2.COLORMAP_JET)
RuntimeError: VPI_ERROR_INVALID_OPERATION: Can't use object created in one context with stream created in a different context
Could you please give me feedback on this? Is there any context operation I missed?