I am attempting to implement a system whereby I use a model I have trained as well as mobilenet itself as I have a custom object detector but also need to detect objects that the mobilenet can already detect.
Anyway I wrote the below code but I cannot quite figure out the command line syntax for using two networks on two different inputs.
On a side note my ultimate goal will be to have a gui and two background workers one detecting on one data set, and another detecting on another data set being pulled from cameras…so first step is to work out how to run two nets in the same program :)
Thanks in advance, code below:
import jetson.inference
import jetson.utils
import argparse
import sys
parser = argparse.ArgumentParser(description="Run a primary as well as a secondary detection network.",
formatter_class=argparse.RawTextHelpFormatter, epilog=jetson.inference.detectNet.Usage() +
jetson.utils.videoSource.Usage() + jetson.utils.videoOutput.Usage() + jetson.utils.logUsage())
parser.add_argument("primary_input_URI", type=str, default="", nargs='?', help="URI of the primary input stream")
parser.add_argument("primary_output_URI", type=str, default="", nargs='?', help="URI of the primary output stream")
parser.add_argument("--primarynet", type=str, default="ssd-mobilenet-v2", help="pre-trained model to load (see below for options)")
parser.add_argument("--primarythreshold", type=float, default=0.3, help="minimum detection threshold to use")
parser.add_argument("--primarylabels", type=str, default="primary_labels.txt", help=" primary labels txt file")
parser.add_argument("secondary_input_URI", type=str, default="", nargs='?', help="URI of the secondary input stream")
parser.add_argument("secondary_output_URI", type=str, default="", nargs='?', help="URI of the secondary output stream")
parser.add_argument("--secondarynet", type=str, default="ssd-mobilenet-v2", help="pre-trained model to load (see below for options)")
parser.add_argument("--secondarythreshold", type=float, default=0.8, help="minimum detection threshold to use")
parser.add_argument("--secondarylabels", type=str, default="secondary_labels.txt", help=" secondary labels txt file")
parser.add_argument("--overlay", type=str, default="box,labels,conf", help="detection overlay flags (e.g. --overlay=box,labels,conf)\nvalid combinations are: 'box', 'labels', 'conf', 'none'")
# argument options:
try:
opt = parser.parse_known_args()[0]
except:
print("")
parser.print_help()
sys.exit(0)
# load the object detection networks
primary_net = jetson.inference.detectNet(opt.primarynet, sys.argv)
secondary_net = jetson.inference.detectNet(opt.secondarynet, sys.argv)
# create video sources & outputs
primary_input = jetson.utils.videoSource(opt.primary_input_URI, argv=sys.argv)
primary_output = jetson.utils.videoOutput(opt.primary_output_URI, argv=sys.argv)
secondary_input = jetson.utils.videoSource(opt.secondary_input_URI, argv=sys.argv)
secondary_output = jetson.utils.videoOutput(opt.secondary_output_URI, argv=sys.argv)
def detect_primary():
# capture the next image
img = primary_input.Capture()
# detect objects in the image (with overlay)
primary_detections = primary_net.Detect(img, overlay=opt.overlay)
# print the detections
print("detected {:d} primary objects in image".format(len(detections)))
for detection in primary_detections:
print(detection)
# render the image
output.Render(img)
# update the title bar
output.SetStatus("{:s} | Network {:.0f} FPS".format(opt.network, primary_net.GetNetworkFPS()))
# print out performance info
primary_net.PrintProfilerTimes()
def detect_secondary():
# capture the next image
img = secondary_input.Capture()
# detect objects in the image (with overlay)
secondary_detections = secondary_net.Detect(img, overlay=opt.overlay)
# print the detections
print("detected {:d} cars in image".format(len(detections)))
for detection in secondary_detections:
print(detection)
# render the image
output.Render(img)
# update the title bar
output.SetStatus("{:s} | Network {:.0f} FPS".format(opt.network, secondary_net.GetNetworkFPS()))
# print out performance info
secondary_net.PrintProfilerTimes()
# process frames until the user exits
while True:
detect_primary()
detect_secondary()
# exit on input/output EOS
if not primary_input.IsStreaming() or not primary_output.IsStreaming() and not secondary_input.IsStreaming() or not secondary_output.IsStreaming():
break
Command line I am attempting to run:
python3 main.py --firenet=models/detection/Primary_Custom.onnx --primary_labels=models/detection/primary_labels.txt --primary_input_URI="file://primary_test_images/image*.jpg" --primary_output_URI="file://primary_test_images/results/result_%i.jpg" --secondary_input_URI="file://secondary_test_images/*.jpg" --secodnary_output_URI="file://secondary_test_images/results/result_%i.jpg" --secondary_labels=secondary_labels.txt --input-blob=input_0 --output-cvg=scores --output-bbox=boxes
Output (which tells me my command is not correct):
[gstreamer] gstreamer message stream-start ==> pipeline0
Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:645 No cameras available
[gstreamer] gstCamera -- end of stream (EOS)
(python3:11829): GStreamer-CRITICAL **: 22:46:27.014: gst_mini_object_set_qdata: assertion 'object != NULL' failed
I had a look at the c files and I am wondering if what I am attempting is even possible?