Holoscan problem using VideoRecorderOp for saving segmentation output

Hi, i´m having some issues using VideoRecorderOp (Holoscan Operator) for saving tensorrt segmentation output. Although it create a GXF file, when i convert GXF to AVI or MP4 the output file it´s just black video. The warning say “no serializer found for component”. Below i leave the warning and my code.

2024-02-28 02:12:08.447 WARN /workspace/holoscan-sdk/gxf_extensions/stream_playback/video_stream_serializer.cpp@188: No serializer found for component '' with type ID 0x7982aeac37f141beade86f00b4b5d47c
YAML config file

replayer:  # VideoStreamReplayer
  basename: "arthroscopic_segmentation"  
  frame_rate: 0 # as specified in timestamps
  repeat: true # default: false
  realtime: true # default: true
  count: 0 # default: 0 (no frame count restriction)

aja:  # AJASourceOp
  width: 1920
  height: 1080
  rdma: false
  enable_overlay: false

drop_alpha_channel:  # FormatConverter
  in_dtype: "rgba8888"
  in_tensor_name: source_video
  out_dtype: "rgb888"
  
preprocessor:  # FormatConverter
  in_tensor_name: "input_tensor"
  out_tensor_name: "output_tensor"
  out_dtype: "float32"
  resize_width: 1024
  resize_height: 1024
  # scale_min: 0.0 scale_max: 255.0
  # out_channel_order: [2,1,0]  # BGR to RGB

inference:  # Inference
  backend: "trt"
  pre_processor_map: 
    "AJA_arthrosegmentation": ["source_video"]
  inference_map: 
    "AJA_arthrosegmentation": ["output"]
  # dla_core: 0  
  # Para usar o DLA - Holoscan 0.4.0 encontrei informaçao SDK e suporta - Holoscan 1.0.3 não suporta?

postprocessor:  # SegmentationPostprocessor
  in_tensor_name: output
  network_output_type: softmax
  data_format: nchw

viz:  # Holoviz
  width: 1920
  height: 1080
  color_lut: [
    [0.65, 0.81, 0.89, 0.1],
    [0.2, 0.63, 0.17, 0.7]
    ]
    

Python

import os
from argparse import ArgumentParser

from holoscan.core import Application
from holoscan.operators import (
    AJASourceOp,
    FormatConverterOp,
    HolovizOp,
    InferenceOp,
    InferenceProcessorOp, # ver este
    SegmentationPostprocessorOp,
    VideoStreamRecorderOp,
    VideoStreamReplayerOp,
)
from holoscan.resources import BlockMemoryPool, CudaStreamPool, MemoryStorageType, UnboundedAllocator


class AJA_arthrosegmentation(Application):
    def __init__(self, data, source="aja"):
        """Initialize the arthrosegmentation application

        Parameters
        ----------
        data : Location to the data
        """
        super().__init__()

        # set name
        self.name = "AJA_arthrosegmentation"

        # Optional parameters affecting the graph created by compose.
        self.source = source

        if data == "none":
            data = os.environ.get("HOLOSCAN_INPUT_PATH", "../data")

        self.sample_data_path = data

        self.model_path = os.path.join(self.sample_data_path, "arthroscopic_segmentation", "model")
        self.model_path_map = {
            "AJA_arthrosegmentation": os.path.join(self.model_path, "unet_1024x1024_nhwc.onnx"),
       }

        self.video_dir = os.path.join(self.sample_data_path, "arthroscopic_segmentation", "video")

        if not os.path.exists(self.video_dir):
            raise ValueError(f"Could not find video data: {self.video_dir=}")

    def compose(self):
        n_channels = 4  # 4 channels
        bpp = 4  # 4 bytes per pixel

        cuda_stream_pool = CudaStreamPool( # Create a pool of CUDA streams
            self,
            name="cuda_stream",
            dev_id=0,
            stream_flags=0,
            stream_priority=0,
            reserved_size=1,
            max_size=5,
        )

        is_aja = self.source.lower() == "aja" # True if source is AJA
        if is_aja:
            source = AJASourceOp(self, name="aja", **self.kwargs("aja")) # Create an AJA source operator
            host_allocator = UnboundedAllocator(self, name="host_allocator") # Use UnboundedAllocator instead of BlockMemoryPool

            drop_alpha_channel = FormatConverterOp( # Create a format converter operator
                self,
                name="drop_alpha_channel",
                pool=host_allocator, # Use host_allocator instead of BlockMemoryPool
                cuda_stream_pool=cuda_stream_pool,
                **self.kwargs("drop_alpha_channel"),
            ) # The format converter operator will drop the alpha channel
        else :
            host_allocator = UnboundedAllocator(self, name="host_allocator")
            video_dir = os.path.join(self.sample_data_path, "arthroscopic_segmentation", "video") # Set the video directory  
            if not os.path.exists(video_dir): # Check if the video directory exists
                raise ValueError(f"Could not find video data: {video_dir=}")
            
            source = VideoStreamReplayerOp( # Create a video stream replayer operator
                self, name="replayer", directory=self.video_dir, **self.kwargs("replayer")
            )

        preprocessor = FormatConverterOp(
            self, name="preprocessor", pool = host_allocator, # Use host_allocator instead of BlockMemoryPool
            cuda_stream_pool=cuda_stream_pool, **self.kwargs("preprocessor")
        )

        # Inference
        inference = InferenceOp(
            self,
            name="inference",
            allocator=host_allocator,
            model_path_map=self.model_path_map,
            **self.kwargs("inference"),
        )

        # Post processor

        postprocessor = SegmentationPostprocessorOp(
            self, name="postprocessor", allocator=host_allocator, **self.kwargs("postprocessor")
        )

        # Recorder
        recorder = VideoStreamRecorderOp(
            self, 
            directory=self.video_dir, 
            basename="segmentation", 
            flush_on_tick=True, 
            name="recorder"
        )        
        
        viz = HolovizOp(self, name="viz", **self.kwargs("viz"))

        # Define the workflow
        if is_aja:
            self.add_flow(source, viz, {("video_buffer_output", "receivers")})
            self.add_flow(source, drop_alpha_channel, {("video_buffer_output", "")})
            self.add_flow(drop_alpha_channel, preprocessor)
            self.add_flow(postprocessor, recorder)
        else:
            self.add_flow(source, viz, {("output", "receivers")})
            self.add_flow(source, preprocessor, {("output", "source_video")})   #porque source_video?
        self.add_flow(preprocessor, inference, {("tensor", "receivers")})       #porque tensor?
        self.add_flow(inference, postprocessor, {("transmitter", "in_tensor")})
        self.add_flow(postprocessor, recorder, {("out_tensor", "input")})
        self.add_flow(postprocessor, viz, {("out_tensor", "receivers")})

if __name__ == "__main__":
    parser = ArgumentParser(description="AJA_arthrosegmentation demo application.")
    parser.add_argument(
        "-s",
        "--source",
        choices=["replayer", "aja"],
        default="replayer",
        help=(
            "If 'replayer', replay a prerecorded video. If 'aja' use an AJA "
            "capture card as the source (default: %(default)s)."
        ),
    )
    parser.add_argument(
        "-d",
        "--data",
        default="none",
        help=("Set the data path"),
    )

    args = parser.parse_args()

    config_file = os.path.join(os.path.dirname(__file__), "AJA_arthrosegmentation.yaml")

    app = AJA_arthrosegmentation(data=args.data, source=args.source)
    app.config(config_file)
    app.run()

Hi there, the warning you’re seeing may not be the root cause. Could you please create a minimal reproducible sample to run for this issue of saving a blank video? We’d be happy to take a look into that.

1 Like

Hi, problem solved. I still have the warning but you were right, it is not directly linked to the problem I was having. My problem was not using VideoRecorderOp the right way.

Thank you for your help!

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.