import numpy as np
import sys
import argparse
import jetson.inference
import jetson.utils
import torch
import os
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from pathlib import Path
from PIL import Image
from jetson.inference import detectNet
from jetson.utils import videoSource, videoOutput, logUsage
parser = argparse.ArgumentParser(description=“Locate objects in a live camera stream using an object detection DNN.”,
formatter_class=argparse.RawTextHelpFormatter,
epilog=detectNet.Usage() + videoSource.Usage() + videoOutput.Usage() + logUsage())
parser.add_argument(“input_URI”, type=str, default=“”, nargs=‘?’, help=“URI of the input stream”)
parser.add_argument(“output_URI”, type=str, default=“”, nargs=‘?’, help=“URI of the output stream”)
parser.add_argument(“–network”, type=str, default=“ssd-mobilenet-v2”, help=“pre-trained model to load (see below for options)”)
parser.add_argument(“–overlay”, type=str, default=“box,labels,conf”, help=“detection overlay flags (e.g. --overlay=box,labels,conf)\nvalid combinations are: ‘box’, ‘labels’, ‘conf’, ‘none’”)
parser.add_argument(“–threshold”, type=float, default=0.5, help=“minimum detection threshold to use”)
is_headless = [“–headless”] if sys.argv[0].find(‘console.py’) != -1 else [“”]
try:
args = parser.parse_known_args()[0]
except:
print(“”)
parser.print_help()
sys.exit(0)
create video sources and output stream
input = videoSource(args.input_URI, argv=sys.argv)
output = videoOutput(args.output_URI, argv=sys.argv+is_headless)
net = detectNet(model=“/home/jetson/jetson-inference/python/training/detection/ssd/models/One_Label/ssd-mobilenet.onnx”, labels=“/home/jetson/jetson-inference/python/training/detection/ssd/models/One_Label/labels.txt”, input_blob=“input_0”, output_cvg=“scores”, output_bbox=“boxes”, threshold=0.3)
Load the image
image_path = “/home/jetson/jetson-inference/python/training/detection/ssd/data/Unlabelled_dataset/JPEGImages/”
images=os.listdir(image_path)
output_path=“/home/jetson/jetson-inference/python/training/detection/ssd/data/Pseudo_Label_image”
#pseudo_labels =
for image in images:
img = Image.open(os.path.join(image_path,image)).convert(‘RGB’)
#image = np.array(img)
batch_pseudo_labels =
Convert NumPy array to cudaImage
cuda_image = jetson.utils.cudaFromNumpy(np.asarray(img))
Make predictions
detections = net.Detect(cuda_image)
Print the class IDs and confidence scores for each detection
for detection in detections:
class_id=detection.ClassID
class_label=net.GetClassDesc(class_id)
print(f"Class ID: {class_id}, Class Label: {class_label}, Confidence: {detection.Confidence}")
batch_pseudo_labels.extend(class_label)
print(f'Pseudo-labels for images:{class_label }')
#pseudo_labels.extend(batch_pseudo_labels)
output_name = os.path.splitext(image)[0] + "_" + "_".join(batch_pseudo_labels) + ".jpg"
img.save(os.path.join(output_path, output_name))
print(‘The pseudo images are loaded successfully’)
In the above code I have a set of unlabeled images taken from jetson nano and stored in the folder Unlabeled_dataset. I’m converting it into a cudaImage and then applying the detectnet model to generate the pseudo labels for the unlabeled images. Then i’m printing the label and the confidence with which the model detects the label. But how can I verify that the label detected on the unlabeled image has been correctly detected? As by only printing the confidence does not verify if the model has detected the label correctly or not.