Cannot register existing type 'GstVideoAggregator'

I’m trying to create a drowsiness detection program. Where specific behaviors will output actions like play alarm or display gif.

What i’ve tried

rm .cache/gstreamer-1.0/registry.aarch64.bin

updated to gst-inspect-1.0 version 1.16.3 from original version, which didnt fix anything.

gdb --args python3 script.py

gdb run

(gst-plugin-scanner:7308): GLib-GObject-WARNING **: 16:10:18.108: cannot register existing type 'GstVideoAggregator'
 
(gst-plugin-scanner:7308): GLib-GObject-CRITICAL **: 16:10:18.110: g_type_add_interface_static: assertion 'G_TYPE_IS_INSTANTIATABLE (instance_type)' failed
 
(gst-plugin-scanner:7308): GLib-CRITICAL **: 16:10:18.110: g_once_init_leave: assertion 'result != 0' failed
 
(gst-plugin-scanner:7308): GLib-GObject-CRITICAL **: 16:10:18.110: g_type_register_static: assertion 'parent_type > 0' failed
 
(gst-plugin-scanner:7308): GLib-CRITICAL **: 16:10:18.110: g_once_init_leave: assertion 'result != 0' failed
 
(gst-plugin-scanner:7308): GStreamer-CRITICAL **: 16:10:18.110: gst_element_register: assertion 'g_type_is_a (type, GST_TYPE_ELEMENT)' failed

Full Debug log

gst-inspect-1.0 -b
Blacklisted files:
  libgstnvcompositor.so

Total count: 1 blacklisted file

Source Code

import os
import sys
import time

import jetson.utils
import jetson.inference

from threading import Thread
from playsound import playsound
from PIL import Image,  ImageSequence


GIF_TIME = 3  # seconds
CLOSE_EYE_TIME = 3  # seconds

CAMERA_PATH = "/dev/video1"
DISPLAY_PATH = "display://0"

FILE_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(FILE_DIR, 'model')
AUDIO_DIR = os.path.join(FILE_DIR, 'audio')
GIF_DIR = os.path.join(FILE_DIR, 'gif')

output = jetson.utils.videoOutput(DISPLAY_PATH)


def play_alarm():
    playsound(os.path.join(AUDIO_DIR, "alarm.wav"), block=False)


class DrawGif(Thread):
    def __init__(self) -> None:
        super().__init__()

        self.counter = 0
        self.current_state = "default"
        self.states = {
            "default": self.get_frames("HappyDefault.png"),
            "smile": self.get_frames("smile.gif"),
            "mad": self.get_frames("mad.gif"),
        }

    def set_state(self, state):
        self.current_state = state
        self.counter = 0

    def get_frames(self, gif):
        path = os.path.join(GIF_DIR, gif)
        frames = []
        seq = ImageSequence.Iterator(Image.open(path))
        for img in seq:
            frames.append(img)
        return frames

    def run(self):
        while True:
            if self.counter == len(self.states[self.current_state]):
                self.counter = 0
            try:
                output.Render(self.states[self.current_state][self.counter])
            except:
                self.counter = 0
            self.counter += 1
            time.sleep(20/1000)


gif_thread = DrawGif()


def main():
    net = jetson.inference.detectNet('googlenet', [
        '--model={}'.format(os.path.join(MODEL_DIR, 'ssd-mobilenet.onnx')),
        '--labels={}'.format(os.path.join(MODEL_DIR, 'labels.txt')),
        '--input-blob=input_0', '--output-cvg=scores', '--output-bbox=boxes'
    ])

    item = 0
    speed = 0.0
    camera = jetson.utils.videoSource(CAMERA_PATH)
fr
    info = jetson.utils.cudaFont()
    
    dt = time.time()
    close_state = False
    timer = time.time()
    try:
        while True:

            img = camera.Capture()
            detections = net.Detect(img)
            # Define new list items
            items = []
            for detection in detections:
                # turns ClassID into string
                item = net.GetClassDesc(detection.ClassID)
                # Add detected item to items list
                items.append(item)
                info.OverlayText(img, 5, 5, "speed:{:.2f}".format(speed), int(
                    detection.Left)+5, int(detection.Top)+35, info.White, info.Gray40)

            # If animation show for while
            if gif_thread.current_state != "default":
                if (time.time() - timer) > GIF_TIME:
                    gif_thread.set_state("default")

            if close_state:
                if (time.time() - dt) > CLOSE_EYE_TIME:
                    gif_thread.set_state("mad")
                    play_alarm()

            if "ClosedEyes" in items or "LookingDown" in items:
                if not close_state:
                    close_state = True
                    dt = time.time()

            elif "OpenEyes" in items:
                close_state = False
                gif_thread.set_state("default")

            elif "Phone" in items:
                gif_thread.set_state("mad")
                timer = time.time()
                print("Wow really going on your phone huh")

            output.SetStatus(
                "Object Detection | Network {:.0f} FPS".format(
                    net.GetNetworkFPS()
                )
            )

            # print out performance info
            # net.PrintProfilerTimes()

    except KeyboardInterrupt:
        sys.exit(0)
    except Exception as e:
        print(e)
    finally:
        sys.exit(0)


if __name__ == '__main__':
    main_thread = Thread(target=main, args=())
    gif_thread.start()
    main_thread.start()

Hi,
A possible solution is to rebuild nvcompositor plugin. Please refer to this post:
How to make videomix-plugin not block when one or more datasource stop? - #5 by DaneLLL

Does this mean I have to downgrade to Jetpack 4.5 to rebuild nvcompositor? I’m currently on 4.6.

Hi,
The source code is public after Jetpack 4.5.1. Please download the package fitting the version.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.