Hi, I am facing an issue. Following the endoscopy_tool_tracking i changed my code to do the overlay buffer flow between AJA source and visualizer but i am getting an error related to the buffering loop between AJA and Holoviz.
[info] [gxf_executor.cpp:210] Creating context
DEBUGGING: is_aja_overlay_enabled is True
[info] [gxf_executor.cpp:1595] Loading extensions from configs...
[error] [gxf_executor.cpp:1245] Worklist is empty, but not all nodes have been visited. There is a cycle.
[error] [gxf_executor.cpp:1247] Application is being aborted.
[error] [gxf_executor.cpp:1621] Failed to initialize fragment
[error] [gxf_executor.cpp:292] Failed to initialize GXF graph
Bellow attached my workflow aswell as my Yaml file configuration
if is_aja: # If the source is AJA
#self.add_flow(source, viz, {("video_buffer_output", "receivers")})
self.add_flow(source, drop_alpha_channel, {("video_buffer_output", "")})
#self.add_flow(drop_alpha_channel, preprocessor)
if do_preprocessing:
self.add_flow(drop_alpha_channel, ImageProcessing, {("", "input_tensor")})
self.add_flow(ImageProcessing, preprocessor, {("output_tensor", "source_video")})
#self.add_flow(drop_alpha_channel, ImageProcessing, {("", "input_tensor")}
#self.add_flow(ImageProcessing, viz, {("output_tensor", "receivers")})
#self.add_flow(ImageProcessing, preprocessor, {("output_tensor", "source_video")})
else: # If the source is not AJA (video file)
if do_preprocessing: # If the preprocessing is done
self.add_flow(source, ImageProcessing, {("output", "input_tensor")})
self.add_flow(source, viz, {("video_buffer_output" if is_aja else "output", "receivers")})
#self.add_flow(ImageProcessing, Debugging, {("output_tensor", "input_tensor")})
#self.add_flow(Debugging, preprocessor, {("output_tensor", "source_video")})
self.add_flow(ImageProcessing, preprocessor, {("output_tensor", "source_video")})
#self.add_flow(ImageProcessing, MyPreprocessor, {("output_tensor", "input_tensor")})
else: # If the preprocessing is not done
self.add_flow(source, preprocessor, {("output", "source_video")})
#self.add_flow(preprocessor, DebuggingPreprocessor, {("tensor", "input_tensor")})
#self.add_flow(DebuggingPreprocessor, inference, {("output_tensor", "receivers")})
self.add_flow(preprocessor, inference, {("tensor", "receivers")})
#self.add_flow(MyPreprocessor, inference, {("output_tensor", "receivers")})
#self.add_flow(inference, DebuggingInference, {("transmitter", "input_tensor")})
#self.add_flow(DebuggingInference, postprocessor, {("output_tensor", "in_tensor")})
self.add_flow(inference, postprocessor, {("transmitter", "in_tensor")})
self.add_flow(postprocessor, PostImageProcessing, {("out_tensor", "input_tensor")})
self.add_flow(PostImageProcessing, viz, {("out_tensor", "receivers")})
if is_aja_overlay_enabled:
print('DEBUGGING: is_aja_overlay_enabled is True')
self.add_flow(source, viz, {("overlay_buffer_output", "render_buffer_input")})
self.add_flow(viz, source, {("render_buffer_output", "overlay_buffer_input")})
if do_record_output:
self.add_flow(viz, recorder_format_converter, {("render_buffer_output", "source_video")})
self.add_flow(recorder_format_converter, recorder)
%YAML 1.2
#flags
is_record_segmentation: False # True if the segmentation is recorded
is_record_output: True # True if the output is recorded
is_preprocessing: False # True if the preprocessing is done
replayer: # VideoStreamReplayer
basename: "arthroscopic_segmentation"
frame_rate: 60 # as specified in timestamps
repeat: False # default: false
realtime: True # default: true
count: 0 # default: 0 (no frame count restriction)
aja: # AJASourceOp
width: 1920
height: 1080
rdma: true
enable_overlay: True
overlay_rdma: True
drop_alpha_channel: # FormatConverter
in_dtype: "rgba8888"
in_tensor_name: source_video
out_dtype: "rgb888"
ImageProcessing:
in_tensor_name: input_tensor
out_tensor_name: output_tensor
Debugging:
in_tensor_name: input_tensor
out_tensor_name: output_tensor
DebuggingPreprocessor:
in_tensor_name: input_tensor
out_tensor_name: output_tensor
DebuggingInference:
in_tensor_name: input_tensor
out_tensor_name: output_tensor
MyPreprocessor: # FormatConverter
in_tensor_name: input_tensor
out_tensor_name: output_tensor
preprocessor: # FormatConverter
out_tensor_name: source_video
out_dtype: "float32"
#scale_min: 0.0
#scale_max: 255.0
#if not is_preprocessing:
# resize_width: 1024
# resize_height: 1024
# out_channel_order: [2,1,0] # BGR to RGB
inference: # Inference
backend: "trt"
pre_processor_map:
"AJA_arthrosegmentation": ["source_video"]
inference_map:
"AJA_arthrosegmentation": ["output"]
# dla_core: 0
# Para usar o DLA - Holoscan 0.4.0 encontrei informaçao SDK e suporta - Holoscan 1.0.3 não suporta?
postprocessor: # SegmentationPostprocessor
in_tensor_name: output
network_output_type: softmax
data_format: nchw
recorder_format_converter:
in_dtype: "rgba8888"
out_dtype: "rgb888"
scale_min: 0.0
scale_max: 255.0
out_tensor_name: out_video
PostImageProcessing:
in_tensor_name: input_tensor
out_tensor_name: output_tensor
Upsampling:
in_tensor_name: input_tensor
out_tensor_name: output_tensor
SaveInput:
in_tensor_name: source_video
recorder:
directory: "/tmp"
basename: "tensor"
viz: # Holoviz
width: 1920
height: 1080
color_lut: [
[0, 0, 0, 0],
#[1, 1, 1, 1]
[0.2, 0.63, 0.17, 0.32]
]
#use_exclusive_display: False # default: true
#display_name: "DP-0" # default: "DP-2"
fullscreen: False
enable_render_buffer_output: True
enable_render_buffer_input: True ```