How could I use nvarguscamerasrc plugin with raw12 ?

Hi:
The output of camera is raw12. And we can get the pictures successfully with v4l2 api.

v4l2-ctl -d /dev/video0 --list-formats-ext

ioctl: VIDIOC_ENUM_FMT
	Index       : 0
	Type        : Video Capture
	Pixel Format: 'Y12 '
	Name        : 12-bit Greyscale
		Size: Discrete 1280x800
			Interval: Discrete 0.020s (49.000 fps)

v4l2-ctl -d /dev/video0 --set-ctrl bypass_mode=0 --stream-mmap --stream-count=100

<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 49.01 fps
<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 49.25 fps

As I know,the ouput of camera which format is raw12, one pixel(12bits), actually is 2 byte!

If I execute:
gst-launch-1.0 nvarguscamerasrc blocksize=2048000 sensor-id=0 !\
	'video/x-raw(memory:NVMM), format=(string)NV12, width=(int)1280, height=(int)800,framerate=(fraction)49' !\
    nvvidconv !\
    'video/x-raw(memory:NVMM), format=(string)I420, width=(int)1280, height=(int)800' !\
    omxh264enc !\
    'video/x-h264,stream-format=(string)byte-stream' !\
    h264parse !\
    avdec_h264 !\
    xvimagesink sync=false

the output is:

Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock
Framerate set to : 49 at NvxVideoEncoderSetParameterNvMMLiteOpen : Block : BlockType = 4 
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4 
H264: Profile = 66, Level = 40 
Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:521 No cameras available
^Chandling interrupt.
Interrupt: Stopping pipeline ...
Execution ended after 0:00:13.068225514
Setting pipeline to PAUSED ...
Setting pipeline to READY ...

someone has meet the same problem:
https://devtalk.nvidia.com/default/topic/1051533/jetson-tx2/nvarguscamerasrc-no-cameras-available/post/5341958/#5341958

EnriqueR said we should get “4.9.140” directory ??? may be 4.9.140-tegra?

But I can’t get the 4.9.140-tegra, I deleted the directory 4.9.140-tegra at …/rootfs/lib/modules .And modified the “Local version,append to kernel release” to “-tegra”,but always get “4.9.140-tegra+” directory .

???

anyone any help?

Hi XuXiang,

As EnriqueR said, in some cases we have faced some issues with nvcamerasrc when we try to capture and there are issues with the graphic interface. Is this your case?

can you share the output of these 2 commands in your tx2?

uname -a
ls /lib/modules
lsmod

That is to verify if the modules are properly loaded.

-Adrian

1.uname -a

Linux nvidia-desktop 4.9.140+ #45 SMP PREEMPT Mon Jul 8 18:43:24 HKT 2019 aarch64 aarch64 aarch64 GNU/Linux

2.ls /lib/modules

4.9.140+  4.9.140-tegra

3.lsmod

Module                  Size  Used by
motor                  25055  0
bnep                   19270  2
fuse                  119179  3
bcmdhd                984971  0
cfg80211              722424  1 bcmdhd
nvs_bmi160             26763  0
nvs                    62167  1 nvs_bmi160
tc358746a               5425  0
nvgpu                1704896  18
bluedroid_pm           16187  0
ip_tables              21475  0
x_tables               38080  1 ip_tables

“motor” is a driver for FPGA.

Because I am in china,so my next test and result will be 8 hours later.

Seems like the modules are properly loaded so maybe that is not the issue.
Are you developing your own driver? which camera are you using?

-Adrian

Thank you for your reply.

My driver is just like imx185_v1.c.

I deleted some code about register write/read which is for frame length,coarse time,gain.

static inline void ar0134a_get_frame_length_regs(ar0134_reg *regs,
				u32 frame_length)
{
}

static inline void ar0134a_get_coarse_time_regs_shs1(ar0134_reg *regs,
				u32 coarse_time)
{

}

static inline void ar0134a_get_coarse_time_regs_shs2(ar0134_reg *regs,
				u32 coarse_time)
{

}

static inline void ar0134a_get_gain_reg(ar0134_reg *regs,
				u8 gain)
{
}

static int test_mode;
module_param(test_mode, int, 0644);

static int ar0134a_power_on(struct camera_common_data *s_data)
{
	struct ar0134 *priv = (struct ar0134 *)s_data->priv;
	struct camera_common_power_rail *pw = &priv->power;

	pw->state = SWITCH_ON;
	return 0;

}

static int ar0134a_power_off(struct camera_common_data *s_data)
{
	struct ar0134 *priv = (struct ar0134 *)s_data->priv;
	struct camera_common_power_rail *pw = &priv->power;

	pw->state = SWITCH_OFF;
	return 0;
}

static int ar0134a_power_get(struct ar0134 *priv)
{
	struct camera_common_power_rail *pw = &priv->power;
	struct camera_common_pdata *pdata = priv->pdata;
	struct device *dev = &priv->i2c_client->dev;
	const char *mclk_name;
	struct clk *parent;
	int err = 0;

	mclk_name = priv->pdata->mclk_name ?
		    priv->pdata->mclk_name : "extperiph1";
	pw->mclk = devm_clk_get(&priv->i2c_client->dev, mclk_name);
	if (IS_ERR(pw->mclk)) {
		dev_err(dev,"unable to get clock %s\n", mclk_name);
		return PTR_ERR(pw->mclk);
	}

	parent = devm_clk_get(&priv->i2c_client->dev, "pllp_grtba");
	if (IS_ERR(parent))
		dev_err(dev, "devm_clk_get failed for pllp_grtba");
	else
		clk_set_parent(pw->mclk, parent);

	pw->reset_gpio = pdata->reset_gpio;

	pw->state = SWITCH_OFF;
	return err;
}

static int ar0134a_set_coarse_time(struct ar0134 *priv, s64 val);
static int ar0134a_set_coarse_time_hdr(struct ar0134 *priv, s64 val);
static int ar0134a_set_gain(struct ar0134 *priv, s64 val);
static int ar0134a_set_frame_rate(struct ar0134 *priv, s64 val);
static int ar0134a_set_exposure(struct ar0134 *priv, s64 val);

static int ar0134a_s_stream(struct v4l2_subdev *sd, int enable)
{
	return 0;
}

static int ar0134a_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
	struct i2c_client *client = v4l2_get_subdevdata(sd);
	struct camera_common_data *s_data = to_camera_common_data(&client->dev);
	struct ar0134 *priv = (struct ar0134 *)s_data->priv;
	struct camera_common_power_rail *pw = &priv->power;

	*status = pw->state == SWITCH_ON;
	return 0;
}

static struct v4l2_subdev_video_ops ar0134a_subdev_video_ops = {
	.s_stream	= ar0134a_s_stream,
	.g_mbus_config	= camera_common_g_mbus_config,
	.g_input_status = ar0134a_g_input_status,
};

static struct v4l2_subdev_core_ops ar0134a_subdev_core_ops = {
	.s_power	= camera_common_s_power,
};

static int ar0134a_get_fmt(struct v4l2_subdev *sd,
		struct v4l2_subdev_pad_config *cfg,
		struct v4l2_subdev_format *format)
{
	return camera_common_g_fmt(sd, &format->format);
}

static int ar0134a_set_fmt(struct v4l2_subdev *sd,
		struct v4l2_subdev_pad_config *cfg,
	struct v4l2_subdev_format *format)
{
	int ret;

	if (format->which == V4L2_SUBDEV_FORMAT_TRY)
    {
		ret = camera_common_try_fmt(sd, &format->format);
    }
	else
    {
		ret = camera_common_s_fmt(sd, &format->format);
    }

	return ret;
}

static struct v4l2_subdev_pad_ops ar0134a_subdev_pad_ops = {
	.set_fmt = ar0134a_set_fmt,
	.get_fmt = ar0134a_get_fmt,
	.enum_mbus_code = camera_common_enum_mbus_code,
	.enum_frame_size	= camera_common_enum_framesizes,
	.enum_frame_interval	= camera_common_enum_frameintervals,
};

static struct v4l2_subdev_ops ar0134a_subdev_ops = {
	.core	= &ar0134a_subdev_core_ops,
	.video	= &ar0134a_subdev_video_ops,
	.pad = &ar0134a_subdev_pad_ops,
};

static struct camera_common_sensor_ops ar0134a_common_ops = {
	.power_on = ar0134a_power_on,
	.power_off = ar0134a_power_off,
	.write_reg = uglyNv_write_reg,
	.read_reg = uglyNv_read_reg,
};

static int ar0134a_set_group_hold(struct ar0134 *priv, s32 val)
{
	int err;
	int gh_en = switch_ctrl_qmenu[val];

	priv->group_hold_prev = val;
	if (gh_en == SWITCH_ON) {

		err = uglyNv_write_reg(priv->s_data,
				       AR0134_GROUP_HOLD_ADDR, 0x1);
		if (err)
			goto fail;
	} else if (gh_en == SWITCH_OFF) {
		err = uglyNv_write_reg(priv->s_data,
				       AR0134_GROUP_HOLD_ADDR, 0x0);
		if (err)
			goto fail;
	}
	return 0;
fail:
	dev_dbg(&priv->i2c_client->dev,
		 "%s: Group hold control error\n", __func__);
	return err;
}

static int ar0134a_set_gain(struct ar0134 *priv, s64 val)
{
	return 0;
}

static int ar0134a_set_frame_rate(struct ar0134 *priv, s64 val)
{
	int err;
	u32 frame_length;
	struct camera_common_data *s_data = priv->s_data;
	const struct sensor_mode_properties *mode =
		&s_data->sensor_props.sensor_modes[s_data->mode];
	struct v4l2_control control;
	struct device *dev = &priv->i2c_client->dev;
	int hdr_en;

	frame_length = mode->signal_properties.pixel_clock.val *
		FIXED_POINT_SCALING_FACTOR /
		mode->image_properties.line_length / val;

	priv->frame_length = frame_length;
	if (priv->frame_length > AR0134_MAX_FRAME_LENGTH)
		priv->frame_length = AR0134_MAX_FRAME_LENGTH;

	dev_dbg(&priv->i2c_client->dev,
		"%s: val: %lld, , frame_length: %d\n", __func__,
		val, priv->frame_length);

	control.id = TEGRA_CAMERA_CID_HDR_EN;
	err = camera_common_g_ctrl(priv->s_data, &control);
	if (err < 0) {
		dev_err(dev,"could not find device ctrl.\n");
		return err;
	}

	hdr_en = switch_ctrl_qmenu[control.value];
	if ((hdr_en == SWITCH_ON) && (priv->last_wdr_et_val != 0)) {
		err = ar0134a_set_coarse_time_hdr(priv, priv->last_wdr_et_val);
		if (err)
			dev_dbg(&priv->i2c_client->dev,
			"%s: error coarse time SHS1 SHS2 override\n", __func__);
	}

	return 0;
}

static int ar0134a_set_exposure(struct ar0134 *priv, s64 val)
{
	int err;
	struct v4l2_control control;
	struct device *dev = &priv->i2c_client->dev;
	int hdr_en;

	dev_dbg(&priv->i2c_client->dev,
		 "%s: val: %lld\n", __func__, val);

	control.id = TEGRA_CAMERA_CID_HDR_EN;
	err = camera_common_g_ctrl(priv->s_data, &control);
	if (err < 0) {
		dev_err(dev,"could not find device ctrl.\n");
		return err;
	}

	hdr_en = switch_ctrl_qmenu[control.value];
	if (hdr_en == SWITCH_ON) {
		err = ar0134a_set_coarse_time_hdr(priv, val);
		if (err)
			dev_dbg(&priv->i2c_client->dev,
			"%s: error coarse time SHS1 SHS2 override\n", __func__);
	} else {
		err = ar0134a_set_coarse_time(priv, val);
		if (err)
			dev_dbg(&priv->i2c_client->dev,
			"%s: error coarse time SHS1 override\n", __func__);
	}
	return err;
}

static int ar0134a_set_coarse_time(struct ar0134 *priv, s64 val)
{
	return 0;
}

static int ar0134a_set_coarse_time_hdr(struct ar0134 *priv, s64 val)
{
	return 0;
}

static int ar0134a_fuse_id_setup(struct ar0134 *priv)
{
	return 0;
}

static int ar0134a_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
	struct ar0134 *priv =
		container_of(ctrl->handler, struct ar0134, ctrl_handler);
	struct device *dev = &priv->i2c_client->dev;
	int err = 0;

	if (priv->power.state == SWITCH_OFF)
		return 0;

	switch (ctrl->id) {

	default:
			dev_err(dev,"%s: unknown ctrl id.\n", __func__);
			return -EINVAL;
	}

	return err;
}

static int ar0134a_s_ctrl(struct v4l2_ctrl *ctrl)
{
	struct ar0134 *priv =
		container_of(ctrl->handler, struct ar0134, ctrl_handler);
	struct device *dev = &priv->i2c_client->dev;
	struct camera_common_data	*s_data = priv->s_data;
	int err = 0;

	if (priv->power.state == SWITCH_OFF)
		return 0;

	switch (ctrl->id) {
	case TEGRA_CAMERA_CID_GAIN:
		err = ar0134a_set_gain(priv, *ctrl->p_new.p_s64);
		break;
	case TEGRA_CAMERA_CID_EXPOSURE:
		err = ar0134a_set_exposure(priv, *ctrl->p_new.p_s64);
		break;
	case TEGRA_CAMERA_CID_FRAME_RATE:
		err = ar0134a_set_frame_rate(priv, *ctrl->p_new.p_s64);
		break;
	case TEGRA_CAMERA_CID_GROUP_HOLD:
		err = ar0134a_set_group_hold(priv, ctrl->val);
		break;
	case TEGRA_CAMERA_CID_HDR_EN:
		break;
	case TEGRA_CAMERA_CID_SENSOR_MODE_ID:
		s_data->sensor_mode_id = (int) (*ctrl->p_new.p_s64);
		break;
	default:
		dev_err(dev,"%s: unknown ctrl id.\n", __func__);
		return -EINVAL;
	}

	return err;
}

static int ar0134a_ctrls_init(struct ar0134 *priv)
{
	struct i2c_client *client = priv->i2c_client;
	struct v4l2_ctrl *ctrl;
	int num_ctrls;
	int err;
	int i;

	dev_dbg(&client->dev, "%s++\n", __func__);
dev_dbg(&client->dev, "\n****XX:%s++\n", __func__);

	num_ctrls = ARRAY_SIZE(ctrl_config_list);
	v4l2_ctrl_handler_init(&priv->ctrl_handler, num_ctrls);

	for (i = 0; i < num_ctrls; i++) {
		ctrl = v4l2_ctrl_new_custom(&priv->ctrl_handler,
			&ctrl_config_list[i], NULL);
		if (ctrl == NULL) {
			dev_err(&client->dev, "Failed to init %s ctrl\n",
				ctrl_config_list[i].name);
			continue;
		}

		if (ctrl_config_list[i].type == V4L2_CTRL_TYPE_STRING &&
			ctrl_config_list[i].flags & V4L2_CTRL_FLAG_READ_ONLY) {
			ctrl->p_new.p_char = devm_kzalloc(&client->dev,
				ctrl_config_list[i].max + 1, GFP_KERNEL);
		}
		priv->ctrls[i] = ctrl;
	}

	priv->numctrls = num_ctrls;
	priv->subdev->ctrl_handler = &priv->ctrl_handler;
	if (priv->ctrl_handler.error) {
		dev_err(&client->dev, "Error %d adding controls\n",
			priv->ctrl_handler.error);
		err = priv->ctrl_handler.error;
		goto error;
	}

	err = v4l2_ctrl_handler_setup(&priv->ctrl_handler);
	if (err) {
		dev_err(&client->dev,
			"Error %d setting default controls\n", err);
		goto error;
	}

	err = ar0134a_fuse_id_setup(priv);
	if (err) {
		dev_err(&client->dev,
			"Error %d reading fuse id data\n", err);
		goto error;
	}

	return 0;

error:
	v4l2_ctrl_handler_free(&priv->ctrl_handler);
	return err;
}

we use ar0134,the hardware about camera is:
ar0134(parallel)—>DS92LV16TVHG(serial)---->DS92LV16TVHG(deserial,parallel)---->FPGA(parallel)----->tc358748---->csi

Hi XuXiang,

In order to get nvarguscamerasrc the driver and the dtb properties have to match, in you set the driver to run at Y12 greyscale format the DTB needs to be configured also as Y12, but at this point, I do not know if this format is supported. For example, if the driver is configurated RAW12, the format should be RG12 and at the dtb should be rggb12.

Please check the color format you have in your dtb. The issue you are getting seems like the issue is related to you dtb.

-Adrian

Thanks.
I can get the picture successfully with gray12 format by v4l2 api.

ar0134_a@10 {
				compatible = "nvidia,ar0134a";
				reg = <0x10>;
				devnode = "video0";
				physical_w = "10.0";
				physical_h = "10.0";
				sensor_model ="ar0134a";
				post_crop_frame_drop = "0";
				use_decibel_gain = "false";
				delayed_gain = "false";
				use_sensor_mode_id = "true";

				mode0 {/*mode AR0134_MODE_1280X800_CROP_47FPS*/
					mclk_khz = "24000";
					num_lanes = "2";
					tegra_sinterface = "serial_a";
                                        phy_mode = "DPHY";
					discontinuous_clk = "no";
					dpcm_enable = "false";
					cil_settletime = "0";
					dynamic_pixel_bit_depth = "8";
					csi_pixel_bit_depth = "8";
                                        pixel_t="yuv_y8";
 					mode_type = "yuv";
					pixel_phase = "y";
					active_w = "1280";
					active_h = "800";
					readout_orientation = "0";
					line_length = "1280";
					inherent_gain = "1";
					mclk_multiplier = "4.037";
					pix_clk_hz = "96875000";

                                        gain_factor = "10";
					min_gain_val = "0"; /* dB */
					max_gain_val = "480"; /* dB */
					step_gain_val = "3";
                                        default_gain = "0";
					min_hdr_ratio = "1";
					max_hdr_ratio = "1";
                                        framerate_factor = "1000000";
					min_framerate = "1500000";
					max_framerate = "60000000";
                                        step_framerate = "1";
                                        default_framerate = "47000000";
                                        exposure_factor = "1";
					min_exp_time = "2470";
					max_exp_time = "6600";
                                        step_exp_time = "1";
                                        default_exp_time = "4940";
   					embedded_metadata_height = "0";
				};

				ports {
					#address-cells = <1>;
					#size-cells = <0>;
					port@0 {
						reg = <0>;
						ar0134a_out0: endpoint {
							port-index = <0>;
							bus-width = <2>;
							remote-endpoint = <&ar0134a_csi_in0>;
							};
						};
					};
				};


	tegra-camera-platform {
		compatible = "nvidia, tegra-camera-platform";
		
		num_csi_lanes = <8>;
		max_lane_speed = <1500000>;
		min_bits_per_pixel = <12>;
		vi_peak_byte_per_pixel = <2>;
		vi_bw_margin_pct = <25>;
		isp_peak_byte_per_pixel = <5>;
		isp_bw_margin_pct = <25>;

		modules {
			module0 {
				badge = "ar0134a_bottomleft_ar0134";
				position = "bottomleft";
				orientation = "0";
				drivernode0 {
					/* Declare PCL support driver (classically known as guid)  */
					pcl_id = "v4l2_sensor";
					/* Driver v4l2 device name */
					devname = "ar0134a 30-0010";
					/* Declare the device-tree hierarchy to driver instance */
					proc-device-tree = "/proc/device-tree/i2c@3180000/ar0134_a@10";
				};
			};

May be I should pay attention to plugin-manager & version2 ,version2 means just like imx185.c ,not like imx185_v1.c .
I will try it and share the result.

Hi XuXiang,

I was checking your pipeline system and I saw that you are using a bridge TC358748, is this bridge converting color space from grayscale to yuv? I saw you configured as yuv in the dtb:

pixel_t="yuv_y8";
mode_type = "yuv";
pixel_phase = "y";

I mention this because nvarguscamerasrc uses an ISP to convert from raw format to yuv format, if this is the case you won’t be able to use nvarguscamerasrc, because you are getting the buffers at the TX2 side, in yuv format.

-Adrian

tc358748 just translate the data from parallel to csi. The input and output of tc358748 all gray12.

“yuv_y8” just a symbol.
I expand the default format of csi:
…/kernel/nvidia/drivers/media/platform/soc_camera/soc_mediabus.c

@@ -71,6 +71,21 @@ static const struct camera_common_colorfmt camera_common_color_fmts[] = {
                V4L2_COLORSPACE_SRGB,
                V4L2_PIX_FMT_SRGGB8,
        },
+       {
+        MEDIA_BUS_FMT_Y8_1X8,
+               V4L2_COLORSPACE_RAW,
+               V4L2_PIX_FMT_GREY,
+       },
+       {
+        MEDIA_BUS_FMT_Y12_1X12,
+               V4L2_COLORSPACE_RAW,
+               V4L2_PIX_FMT_Y12,
+       },
+       {
+        MEDIA_BUS_FMT_RGB888_1X24,
+               V4L2_COLORSPACE_SRGB,
+               V4L2_PIX_FMT_RGB24,
+       },

drivers/media/platform/tegra/camera/camera_common.c

@@ -597,6 +612,14 @@ int camera_common_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 verify_code:
        mf->field = V4L2_FIELD_NONE;
        mf->colorspace = V4L2_COLORSPACE_SRGB;
+    if ( (MEDIA_BUS_FMT_Y8_1X8 == mf->code)||(MEDIA_BUS_FMT_Y12_1X12 == mf->code) )
+    {
+           mf->colorspace = V4L2_COLORSPACE_RAW;
+    }
+    if ( (MEDIA_BUS_FMT_RGB888_1X24 == mf->code) )
+    {
+           mf->colorspace = V4L2_COLORSPACE_SRGB;
+    }
        mf->xfer_func = V4L2_XFER_FUNC_DEFAULT;
        mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
        mf->quantization = V4L2_QUANTIZATION_DEFAULT;

/drivers/media/platform/tegra/camera/sensor_common.c

@@ -214,6 +214,12 @@ static int extract_pixel_format(
                *format = V4L2_PIX_FMT_XBGGR10P;
        else if (strncmp(pixel_t, "bayer_xrggb10p", size) == 0)
                *format = V4L2_PIX_FMT_XRGGB10P;
+       else if (strncmp(pixel_t, "yuv_y8", size) == 0)
+               *format = V4L2_PIX_FMT_GREY;
+       else if (strncmp(pixel_t, "yuv_y12", size) == 0)
+               *format = V4L2_PIX_FMT_Y12;
+       else if (strncmp(pixel_t, "rgb_rgb888", size) == 0)
+               *format = V4L2_PIX_FMT_RGB24;
        else {
                pr_err("%s: Need to extend format%s\n", __func__, pixel_t);
                return -EINVAL;

drivers/media/platform/tegra/camera/vi/vi4_formats.h

@@ -95,6 +95,8 @@ static const struct tegra_video_format vi4_video_formats[] = {
                                RAW8, SGBRG8, "GBGB.. RGRG.."),
        TEGRA_VIDEO_FORMAT(RAW8, 8, SBGGR8_1X8, 1, 1, T_L8,
                                RAW8, SBGGR8, "BGBG.. GRGR.."),
+       TEGRA_VIDEO_FORMAT(RAW8, 8, Y8_1X8, 1, 1, T_L8,
+                               RAW8, GREY, "GREY.. GREY.."),
 
        /* RAW 10 */
        TEGRA_VIDEO_FORMAT(RAW10, 10, SRGGB10_1X10, 2, 1, T_R16_I,
@@ -115,6 +117,8 @@ static const struct tegra_video_format vi4_video_formats[] = {
                                RAW12, SGBRG12, "GBGB.. RGRG.."),
        TEGRA_VIDEO_FORMAT(RAW12, 12, SBGGR12_1X12, 2, 1, T_R16_I,
                                RAW12, SBGGR12, "BGBG.. GRGR.."),
+       TEGRA_VIDEO_FORMAT(RAW12, 12, Y12_1X12, 2, 1, T_R16_I,
+                               RAW12, Y12, "GREY12.. GREY12.."),
 
        /* RGB888 */
        TEGRA_VIDEO_FORMAT(RGB888, 24, RGB888_1X24, 4, 1, T_A8R8G8B8,
(END)

In your DTB you are using yuv_y8 instead yuv_y12 grey scale formart, my understanding nvarguscamerasrc does not support 8-bits format.

if you configure your driver as Y12 your dtb have to match with that format.

I am sorry, #10 is wrong. The truth is:

ar0134_a@10 {
				compatible = "nvidia,ar0134a";
				reg = <0x10>;
				devnode = "video0";
				physical_w = "10.0";
				physical_h = "10.0";
				sensor_model ="ar0134a";
				post_crop_frame_drop = "0";
				use_decibel_gain = "false";
				delayed_gain = "false";
				use_sensor_mode_id = "true";

				mode0 {/*mode AR0134_MODE_1280X800_CROP_47FPS*/
					mclk_khz = "24000";
					num_lanes = "2";
					tegra_sinterface = "serial_a";
                                        phy_mode = "DPHY";
					discontinuous_clk = "no";
					dpcm_enable = "false";
					cil_settletime = "0";
					dynamic_pixel_bit_depth = "12";
					csi_pixel_bit_depth = "12";
                                        pixel_t="yuv_y12";
					mode_type = "yuv";
					pixel_phase = "y";
					active_w = "1280";
					active_h = "800";
					readout_orientation = "0";
					line_length = "1280";
					inherent_gain = "1";
					mclk_multiplier = "3.386";
					pix_clk_hz = "81250000";

                                        gain_factor = "10";
					min_gain_val = "0"; /* dB */
					max_gain_val = "480"; /* dB */
					step_gain_val = "3";
                                        default_gain = "0";
					min_hdr_ratio = "1";
					max_hdr_ratio = "1";
                                        framerate_factor = "1000000";
					min_framerate = "1500000";
					max_framerate = "60000000";
                                        step_framerate = "1";
                                        default_framerate = "47000000";
                                        exposure_factor = "1";
					min_exp_time = "2470";
					max_exp_time = "6600";
                                        step_exp_time = "1";
                                        default_exp_time = "4707";
					embedded_metadata_height = "0";
				};

the outpout is:

Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Setting pipeline to PLAYING ...
New clock: GstSystemClock
Framerate set to : 47 at NvxVideoEncoderSetParameterNvMMLiteOpen : Block : BlockType = 4 
===== NVMEDIA: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4 
H264: Profile = 66, Level = 40 
Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:521 <b>No cameras available</b>
^Chandling interrupt.
Interrupt: Stopping pipeline ...
Execution ended after 0:00:06.934256545
Setting pipeline to PAUSED ...
Setting pipeline to READY ...
^C

and I modified “/drivers/media/platform/tegra/camera/vi/vi4_formats.h” to let v4l2src to work:

@@ -118,7 +118,7 @@ static const struct tegra_video_format vi4_video_formats[] = {
        TEGRA_VIDEO_FORMAT(RAW12, 12, SBGGR12_1X12, 2, 1, T_R16_I,
                                RAW12, SBGGR12, "BGBG.. GRGR.."),
        TEGRA_VIDEO_FORMAT(RAW12, 12, Y12_1X12, 2, 1, T_R16_I,
-                               RAW12, Y12, "GREY12.. GREY12.."),
+                               RAW12, Y16_BE, "GREY16BE"),

now the sensor driver code is “version2”,and NOT use plugin-manager.

I can use v4l2src to get raw12 now,but can’t use nvarguscamerasrc also.

If I use v4l2src,the kernel will crash sometimes,so if possible,I want to use nvarguscamerasrc.

Any news? I’m having a similar problem, where I’m able to successfully capture images with v4l2-ctl, but cannot make nvarguscamerasrc work. My data format is RAW10 grayscale, so do I have to modify soc_mediabus.c, camera_common.c, sensor_common.c and vi4_formats.h as you did? Did you get any progress in making nvarguscamerasrc work? Thanks in advance!

Finally we chose raw8 format because of UI engineer like it,raw12 is too complex to use for he.
Modify these files to support raw12,or just use RG12( please pay attention to #9 ) ,may be get the data also.But I am not sure,the project has finished months ago,and I am now at home,can’t go to the company.

Hello XuXiang/ euskadi
As I quoted here in the above output, we can see the print “no cameras available”. This shows that the argus daemon doesn’t detect them as cameras as it is not properly configured in dtb on tegra-camera-platform entry. Refer to Camera Modules and the Device Tree section on the L4T developer guide [link] to configure it as an argus camera properly.

Thanks and Regards
Sarath S

Please read this(#27) if you want to use raw12:
https://devtalk.nvidia.com/default/topic/1036708/jetson-tx1/support-for-grayscale-sensors-is-missing/2

Thanks XuXiang, I appreciate your answer. Maybe it is better to focus on RAW8 and later move on to RAW10/RAW12, as they are more complex…

That’s interesting to know, anyway afaik nvargus only accepts bayer sensors, so it might not be suitable for me as I’m using monochrome… Thanks anyway!