Drawing boundary box on unicast broadcast (Gstremer)

Hello .
I have a code written in C++, the code broadcasts both unicast and multicast in a single main.
I just want to draw a box on a unicast broadcast, I’m drawing it using g_signal_connect with the cairo method right now, but I couldn’t find a way to separate them from each other,I just want it to do it for unicast broadcast, can someone help ?

Hi,
Is it possible to send frame data to two individual appsink? One appsink is for unicast and the other is for multicast. So that you can apply the code to specific appsink.

I am currently able to broadcast to two separate IP addresses with “udpsink” with the g_object_set function, this is possible, but only g_signal_connect(cairoelement,“draw”, G_CALLBACK(mydraw_overlay), unicastsink); When I write it this way, it sends to both IP addresses, even though I want it to unicast.

Hi,
Please check if the following linkage can work in the use-case:

... ! tee name=t ! queue ! udpsink name=sink_1 t. ! queue ! udpsink name=sink_2

And one udpsink is for unicast. The other is for multicast. If the linkage does not work, this request may not be possible.

Would see if other users can provide suggestion.

In this way, the system already works separately, but when I want to draw the boundary box, it draws both on unicast and multicast, even though I specify only as unicast, this is exactly my problem.

Hi,
So you have two udpsink in the pipeline? If yes, you can register a prob function to sink pad of the udpsink for unicast. And put the code in the prob fuction.

Here is a patch for registering a prob function to source pad of nvarguscamerasrc:
Nvarguscamerasrc Buffer Metadata is missing - #29 by DaneLLL
You can use same method to apply to sink pad of the udpsink

i am trying i will share the results with you…

I can check if my draw overlay method is working with gst_pad_add_probe method, a verify method, I tried it works without problem, but my problem is still not solved, I want drawoverlay method to work in a single unicast broadcast.

Hi,
Do you send raw frame data to the two udpsink? Or send compressed h264/h265 stream? If you send compressed stream to udpsink, you would need to encode two streams. One is original and the other has bounding boxes.

Not sure I correctly understand your case, please better explain and provide a simplified C++ code and build command so that one can easily reproduce your case.
Be sure you’re streaming to different ports for unicast or multicast.

You may try this code (for JP4 only,), simulating your source with videotestsrc, and adding a src pad probe on nvvidconv on the unicast branch nvvidconv that uses opencv CUDA for drawing a rectangle. Note that there is a second nvvidconv in that branch for converting in NVMM from RGBA after procesing into NV12 format as expected by H264 encoder:

#include <iostream>
#include <unistd.h>
#include <gst/gst.h>
//#include <cairo.h>
//#include <pango/pango.h>
//#include <pango/pango-layout.h>
//#include <pango/pangocairo.h>
#include "nvbuf_utils.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cudaEGL.h>
#include <opencv2/core.hpp>
#include <opencv2/cudafilters.hpp>

static EGLDisplay egl_display;

static GstPadProbeReturn
conv_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{
    GstBuffer *buffer = (GstBuffer *) info->data;
    GstMapInfo map    = {0};
    int dmabuf_fd = 0;
    gst_buffer_map (buffer, &map, GST_MAP_WRITE);
    if (-1 == ExtractFdFromNvBuffer((void *)map.data, &dmabuf_fd))
    {
        printf("ExtractFdFromNvBuffer failed\n");
        return GST_PAD_PROBE_OK;
    }

    {
        EGLImageKHR egl_image;
        egl_image = NvEGLImageFromFd(egl_display, dmabuf_fd);
        //printf("EglImage at %p\n", egl_image);
        CUresult status;
        CUeglFrame eglFrame;
        CUgraphicsResource pResource = NULL;
        cudaFree(0);
        status = cuGraphicsEGLRegisterImage(&pResource,
                    egl_image,
                    CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
        if (status != CUDA_SUCCESS)
        {
            printf("cuGraphicsEGLRegisterImage failed: %d \n",status);
            return GST_PAD_PROBE_OK;
        }
        status = cuGraphicsResourceGetMappedEglFrame(&eglFrame, pResource, 0, 0);
        if (status != CUDA_SUCCESS)
        {
            printf("cuGraphicsResourceGetMappedEglFrame failed: %d \n",status);
            return GST_PAD_PROBE_OK;
        }
        status = cuCtxSynchronize();
        if (status != CUDA_SUCCESS)
        {
            printf("cuCtxSynchronize failed: %d \n",status);
            return GST_PAD_PROBE_OK;
        }
        
        // Here just setting a 200x200 red rectangle starting at (100,100) 
        cv::cuda::GpuMat d_mat(eglFrame.height, eglFrame.width, CV_8UC4, eglFrame.frame.pPitch[0]);
        cv::Rect roi(100,100,200,200);
        d_mat(roi).setTo(cv::Scalar(255,0,0,255));

        status = cuCtxSynchronize();
        status = cuGraphicsUnregisterResource(pResource);
        NvDestroyEGLImage(egl_display, egl_image);
    }
    gst_buffer_unmap(buffer, &map);

    return GST_PAD_PROBE_OK;
}


int main (gint argc, gchar * argv[])
{  
    egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
    eglInitialize(egl_display, NULL, NULL);

    gst_init (&argc, &argv);
	GMainLoop *loop = g_main_loop_new (NULL, FALSE);

	const char* pipelineStr = "videotestsrc is-live=1 ! video/x-raw, format=RGBA, width=640,height=480,framerate=30/1 ! tee name =t    "
		                      "t. ! queue ! nvvidconv name=unicast_conv ! video/x-raw(memory:NVMM),format=RGBA ! nvvidconv ! nvv4l2h264enc insert-sps-pps=1 idrinterval=15 ! queue ! h264parse ! rtph264pay ! udpsink auto-multicast=0 host=192.168.1.15 port=5000    "
				              "t. ! queue ! nvvidconv ! nvv4l2h264enc insert-sps-pps=1 idrinterval=15 ! queue ! h264parse ! rtph264pay ! udpsink auto-multicast=1 host=224.1.1.1 port=5002 ";
				 
	printf("Using pipeline:\n %s\n", pipelineStr);

	/* Create the pipeline...this will negociate unspecified caps between elements */
	GstElement *pipeline = gst_parse_launch (pipelineStr, NULL);
	if (!pipeline) {
		std::cerr << "Failed to create pipeline\n";
		exit(-1);
	}

	/* Get element with name unicast_conv */
	GstElement *unicast_conv = gst_bin_get_by_name(GST_BIN(pipeline), "unicast_conv");
	if(!unicast_conv) {
		std::cerr << "Failed to get unicast_conv\n";
		exit(-2);	
	}

	/* Get its src (output) pad */
	GstPad *srcpad = gst_element_get_static_pad (unicast_conv, "src");
	if(!srcpad) {
		std::cerr << "Failed to get srcpad\n";
		exit(-3);	
	}

	/* Attach a probe call back to that pad that will be called for each ouputted buffer */
	gst_pad_add_probe (srcpad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) conv_src_pad_buffer_probe, NULL, NULL);
  	gst_object_unref (srcpad);
	gst_object_unref (unicast_conv);

	/* Ok, successfully created the pipeline, now start it */
  	gst_element_set_state (pipeline, GST_STATE_READY);
  	gst_element_set_state (pipeline, GST_STATE_PLAYING);

 	/* wait until it's up and running or failed */
  	if (gst_element_get_state (pipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
    		g_error ("Failed to go into PLAYING state");
    		return(-1);
  	}

  	g_print ("Running ...\n");
  	g_main_loop_run (loop);

    return 0;
}

Ran it from my AGX-Xavier (running JP4 R32.6.1).

Then from unicast target 192.168.1.15 (AGX-Orin), I can get unicast stream with:

gst-launch-1.0 udpsrc auto-multicast=0 address=192.168.1.15 port=5000 ! application/x-rtp,encoding-name=H264 ! queue ! rtpjitterbuffer latency=1000 ! rtph264depay ! h264parse ! nvv4l2decoder ! queue ! nvvidconv ! autovideosink

or get the multicast stream with:

gst-launch-1.0 udpsrc address=224.1.1.1 port=5002 ! application/x-rtp,encoding-name=H264 ! queue ! rtpjitterbuffer latency=1000 ! rtph264depay ! h264parse ! nvv4l2decoder ! queue ! nvvidconv ! autovideosink

1 Like

hello, it’s very nice to see you here :) , first of all, thank you very much for your help, I see that the problem is in the pipeline creation part, you have added a special video convert for each broadcast, it will take some time to implement and test it in my code, it is very complex, I used tee in the wrong region It limits my flexibility in the code, I will try by separating the code after the video source, my own code is not parse launch, I define functions and parameters one by one, it will take some time. I’ll be sure to reply when I’m done. Sorry for the late reply, I’m a little busy. I will definitely reply when I try.

There is another little thing I want to ask, if you had created the tee element and other elements as gst_element_factory_create, what would be the order of the gst_element_link_many section, I could not find an example of the use of tee here. After trying all these, I think this part will be a problem, thank you take care …

Here is the same with programmatic construction of the pipeline:

#include <gst/gst.h>
#include "nvbuf_utils.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cudaEGL.h>
#include <opencv2/core.hpp>
#include <opencv2/cudafilters.hpp>


static EGLDisplay egl_display;

static GstPadProbeReturn
conv_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
{
    GstBuffer *buffer = (GstBuffer *) info->data;
    GstMapInfo map    = {0};
    int dmabuf_fd = 0;
    gst_buffer_map (buffer, &map, GST_MAP_WRITE);
    if (-1 == ExtractFdFromNvBuffer((void *)map.data, &dmabuf_fd))
    {
        g_printerr("ExtractFdFromNvBuffer failed");
    }
    
    //CUDA postprocess
    {
        EGLImageKHR egl_image;
        egl_image = NvEGLImageFromFd(egl_display, dmabuf_fd);
        //printf("EglImage at %p\n", egl_image);
        CUresult status;
        CUeglFrame eglFrame;
        CUgraphicsResource pResource = NULL;
        cudaFree(0);
        status = cuGraphicsEGLRegisterImage(&pResource,
                    egl_image,
                    CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
        if (status != CUDA_SUCCESS)
        {
            g_printerr("cuGraphicsEGLRegisterImage failed");
        }
        status = cuGraphicsResourceGetMappedEglFrame(&eglFrame, pResource, 0, 0);
        if (status != CUDA_SUCCESS)
        {
            g_printerr("cuGraphicsResourceGetMappedEglFrame failed");
        }
        status = cuCtxSynchronize();
        if (status != CUDA_SUCCESS)
        {
            g_printerr("cuCtxSynchronize failed");
        }
        
        //printf("Mapped frame width=%d height=%d pPitch %p\n", eglFrame.height, eglFrame.width, eglFrame.frame.pPitch[0]);
        cv::cuda::GpuMat d_mat(eglFrame.height, eglFrame.width, CV_8UC4, eglFrame.frame.pPitch[0]);
        cv::Rect roi(100,100,200,200);
        d_mat(roi).setTo(cv::Scalar(255,0,0,255));

        status = cuCtxSynchronize();
        status = cuGraphicsUnregisterResource(pResource);
        NvDestroyEGLImage(egl_display, egl_image);
    }
    gst_buffer_unmap(buffer, &map);

    return GST_PAD_PROBE_OK;
}


static GMainLoop *loop;

gint
main (gint   argc,
      gchar *argv[])
{
  GstElement *pipeline, *vidsrc, *tee0;
  GstElement *queue1, *conv1, *conv2, *enc1, *h264parse1, *pay1, *udpsink1;
  GstElement *queue2, *conv3, *enc2, *h264parse2, *pay2, *udpsink2;
  
  GstCaps *vid_caps, *tee_caps;
  GstCaps *queue1_caps, *conv1_caps, *conv2_caps, *enc1_caps, *h264parse1_caps, *pay1_caps;
  GstCaps *queue2_caps, *conv3_caps, *enc2_caps, *h264parse2_caps, *pay2_caps;


  /* init GStreamer */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* setup pipeline */
  pipeline = gst_pipeline_new ("pipeline");

  vidsrc = gst_element_factory_make ("videotestsrc", "vidsrc");
  g_object_set (G_OBJECT (vidsrc), "is-live", true, NULL);
  vid_caps = gst_caps_from_string("video/x-raw, width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)RGBA");
  
  tee0 = gst_element_factory_make ("tee", "tee0");
  tee_caps = gst_caps_from_string("video/x-raw, width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)RGBA");
  
  /* Unicast branch */
  queue1 = gst_element_factory_make ("queue", "queue1");
  queue1_caps = gst_caps_from_string("video/x-raw, width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)RGBA");
  
  conv1 = gst_element_factory_make ("nvvidconv", "conv1");
  conv1_caps = gst_caps_from_string("video/x-raw(memory:NVMM), width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)RGBA");

  conv2 = gst_element_factory_make ("nvvidconv", "conv2");
  conv2_caps = gst_caps_from_string("video/x-raw(memory:NVMM), width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)NV12");

  enc1 = gst_element_factory_make("nvv4l2h264enc", "enc1");
  g_object_set (G_OBJECT (enc1), "insert-sps-pps", true, NULL);
  g_object_set (G_OBJECT (enc1), "insert-vui", true, NULL);
  g_object_set (G_OBJECT (enc1), "idrinterval", 15, NULL);

  enc1_caps = gst_caps_from_string("video/x-h264, stream-format=(string)byte-stream, alignment=(string)au, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1");

  h264parse1 = gst_element_factory_make("h264parse", "h264parse1");
  h264parse1_caps = gst_caps_from_string("video/x-h264, stream-format=(string)byte-stream, alignment=(string)au, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1");

  pay1 = gst_element_factory_make ("rtph264pay", "pay1");
  pay1_caps = gst_caps_from_string("application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96");

  udpsink1 = gst_element_factory_make ("udpsink", "udpsink1");
  g_object_set (G_OBJECT (udpsink1), "host", "192.168.1.15", NULL);
  g_object_set (G_OBJECT (udpsink1), "port", 5000, NULL);
  g_object_set (G_OBJECT (udpsink1), "auto-multicast", false, NULL);



  /* Multicast branch */
  queue2 = gst_element_factory_make ("queue", "queue2");
  queue2_caps = gst_caps_from_string("video/x-raw, width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)RGBA");
  
  conv3 = gst_element_factory_make ("nvvidconv", "conv3");
  conv3_caps = gst_caps_from_string("video/x-raw(memory:NVMM), width=(int)640, height=(int)480, framerate=(fraction)30/1, format=(string)NV12");

  enc2 = gst_element_factory_make("nvv4l2h264enc", "enc2");
  g_object_set (G_OBJECT (enc2), "insert-sps-pps", true, NULL);
  g_object_set (G_OBJECT (enc2), "insert-vui", true, NULL);
  g_object_set (G_OBJECT (enc2), "idrinterval", 15, NULL);
  enc2_caps = gst_caps_from_string("video/x-h264, stream-format=(string)byte-stream, alignment=(string)au, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1");

  h264parse2 = gst_element_factory_make("h264parse", "h264parse2");
  h264parse2_caps = gst_caps_from_string("video/x-h264, stream-format=(string)byte-stream, alignment=(string)au, width=(int)640, height=(int)480, pixel-aspect-ratio=(fraction)1/1, framerate=(fraction)30/1");

  pay2 = gst_element_factory_make ("rtph264pay", "pay2");
  pay2_caps = gst_caps_from_string("application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96");

  udpsink2 = gst_element_factory_make ("udpsink", "udpsink2");
  g_object_set (G_OBJECT (udpsink2), "host", "224.1.1.1", NULL);
  g_object_set (G_OBJECT (udpsink2), "port", 5002, NULL);
  g_object_set (G_OBJECT (udpsink2), "auto-multicast", true, NULL);




  gst_bin_add_many (GST_BIN (pipeline), vidsrc, tee0, queue1, conv1, conv2, enc1, h264parse1, pay1, udpsink1, queue2, conv3, enc2, h264parse2, pay2, udpsink2, NULL);

  if (!gst_element_link_filtered(vidsrc, tee0, vid_caps)) {
        g_printerr("Fail to gst_element_link_filtered vidsrc -- tee0\n");
        return -1;
  }

  if (!gst_element_link_filtered(tee0, queue1, tee_caps)) {
        g_printerr("Fail to gst_element_link_filtered tee0 -- queue1\n");
        return -1;
  }
  
  
  
  if (!gst_element_link_filtered(queue1, conv1, queue1_caps)) {
        g_printerr("Fail to gst_element_link_filtered queue1 -- conv1\n");
        return -1;
  }
 
  if (!gst_element_link_filtered(conv1, conv2, conv1_caps)) {
        g_printerr("Fail to gst_element_link_filtered conv1 -- conv2\n");
        return -1;
  }

  if (!gst_element_link_filtered(conv2, enc1, conv2_caps)) {
        g_printerr("Fail to gst_element_link_filtered conv2 -- enc1\n");
        return -1;
  }

  if (!gst_element_link_filtered(enc1, h264parse1, enc1_caps)) {
        g_printerr("Fail to gst_element_link_filtered enc1 -- h264parse1\n");
        return -1;
  }

  if (!gst_element_link_filtered(h264parse1, pay1, h264parse1_caps)) {
        g_printerr("Fail to gst_element_link_filtered h264parse1 -- pay1\n");
        return -1;
  }

  if (!gst_element_link_filtered(pay1, udpsink1, pay1_caps)) {
        g_printerr("Fail to gst_element_link_filtered pay1 -- udpsink1\n");
        return -1;
  }
  
  
  if (!gst_element_link_filtered(tee0, queue2, tee_caps)) {
        g_printerr("Fail to gst_element_link_filtered tee0 -- queue2\n");
        return -1;
  }

  if (!gst_element_link_filtered(queue2, conv3, queue2_caps)) {
        g_printerr("Fail to gst_element_link_filtered queue2 -- conv3\n");
        return -1;
  }

  if (!gst_element_link_filtered(conv3, enc2, conv3_caps)) {
        g_printerr("Fail to gst_element_link_filtered conv3 -- enc2\n");
        return -1;
  }

  if (!gst_element_link_filtered(enc2, h264parse2, enc2_caps)) {
        g_printerr("Fail to gst_element_link_filtered enc2 -- h264parse2\n");
        return -1;
  }

  if (!gst_element_link_filtered(h264parse2, pay2, h264parse2_caps)) {
        g_printerr("Fail to gst_element_link_filtered h264parse2 -- pay2\n");
        return -1;
  }

  if (!gst_element_link_filtered(pay2, udpsink2, pay2_caps)) {
        g_printerr("Fail to gst_element_link_filtered pay2 -- udpsink2\n");
        return -1;
  }
 
  
  
  GstPad *srcpad = gst_element_get_static_pad (conv1, "src");
  if(!srcpad) {
	g_printerr("Failed to get srcpad");
	return -2;	
  }

  if (!gst_pad_add_probe (srcpad, GST_PAD_PROBE_TYPE_BUFFER, (GstPadProbeCallback) conv_src_pad_buffer_probe, NULL, NULL)) {
	g_printerr("Failed to add probe");
	return -3;		  
  }
  gst_object_unref (srcpad);

  
  /* This will output details of pipeline going to play  */
  //g_signal_connect(pipeline, "deep-notify", G_CALLBACK(gst_object_default_deep_notify), NULL);

  /* play */
  gst_element_set_state (pipeline, GST_STATE_READY);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  
  /* wait until it's up and running or failed */
  if (gst_element_get_state (pipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
     g_error ("Failed to go into PLAYING state");
  }

  g_print ("Running ...\n");
  g_main_loop_run (loop);

  /* clean up */
  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (GST_OBJECT (pipeline));
  g_main_loop_unref (loop);

  return 0;
}
1 Like

Thank you, i will try it and share my results with you. I realized that my problem is the lack of video converter, it will be fixed when I fix it. Take care …

1 Like

Hello again ;
not unique in bin new pipeline not adding
I encountered such an error, why?

My guess would be that you tried to add 2 elements with the same name.

1 Like

Hello again;
gst_bin_add_many(GST_BIN(GMainElement.pipeline),
GMainElement.v4l2src, GMainElement.source2convertcapsfilter GMainElement.tee,
GUnicastElement.videoconvert1, GUnicastElement.cairooverlay,
GUnicastElement.videoconvert2,
GUnicastElement.queue_unicast,
GUnicastElement.nvvidconv, GUnicastElement.convert2encodercapsfilter, GUnicastElement.nvv4l2h265enc, GUnicastElement.encoder2muxcapsfilter, GUnicastElement.mpegtsmux, GUnicastElement.mux2payloadcapsfilter, GUnicastElement.rtpmp2tpay, GUnicastElement.payload2udpcapsfilter, GUnicastElement.udpsink),

GMulticastElement.queue_unicast,
GMulticastElement.nvvidconv, GMulticastElement.convert2encodercapsfilter, GMulticastElement.nvv4l2h265enc, GMulticastElement.encoder2muxcapsfilter, GMulticastElement.mpegtsmux, GMulticastElement.mux2payloadcapsfilter, GMulticastElement.rtpmp2tpay, GMulticastElement.payload2udpcapsfilter, GMulticastElement.udpsink, NULL);

gst_element_link_many(GMainElement.v4l2src, GMainElement.source2convertcapsfilter GMainElement.tee,
GUnicastElement.videoconvert1, GUnicastElement.cairooverlay,
GUnicastElement.videoconvert2,
GUnicastElement.queue_unicast,
GUnicastElement.nvvidconv, GUnicastElement.convert2encodercapsfilter, GUnicastElement.nvv4l2h265enc, GUnicastElement.encoder2muxcapsfilter, GUnicastElement.mpegtsmux, GUnicastElement.mux2payloadcapsfilter, GUnicastElement.rtpmp2tpay, GUnicastElement.payload2udpcapsfilter, GUnicastElement.udpsink),

GMulticastElement.queue_multicast,
GMulticastElement.nvvidconv, GMulticastElement.convert2encodercapsfilter, GMulticastElement.nvv4l2h265enc, GMulticastElement.encoder2muxcapsfilter, GMulticastElement.mpegtsmux, GMulticastElement.mux2payloadcapsfilter, GMulticastElement.rtpmp2tpay, GMulticastElement.payload2udpcapsfilter, GMulticastElement.udpsink, NULL) !=TRUE

tee_unicast_pad = gst_element_get_request_pad(GMainElement.tee, src);
queue_unicast_pad = gst_element_get_static_pad(GUnicastElement.queue_unicast, “sink”);

tee_multicast_pad = gst_element_get_request_pad(GMainElement.tee, src);
queue_multicast_pad = gst_element_get_static_pad(GUnicastElement.queue_multicast, “sink”);

When I write this way, I get is not unique in bin new pipeline not adding error.

I also tried removing the caps filters from gst_bin_add_many.

Take care…

Seems that you have two elements with name queue_unicast

The special names in the gst_element_factory method are not supposed to be the same. The problem was because of this, solve

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.