Jetson Nano MIPI CSI-2 without I2C from FPGA

hello adrien.leroy2,

BTW,
could you please try this example, 12_camera_v4l2_cuda,
this have implementation for GRAY8 pixel formats.

1 Like

Hello @JerryChang,
I have some news, we found a hardware problem coming from the evaluation board so we switched from two lanes to one lane.
To try to find where the problem comes from we stayed on rggb format, not our custom greyscale. So we are on RGGB8 to test.

We add some debugging messages, as advised, in vi2_ops.c

And the problem comes from that function :

static void tegra_channel_capture_done(struct tegra_channel *chan)
{
	struct timespec ts;
	int index, err;
	int bytes_per_line = chan->format.bytesperline;
	u32 val, mw_ack_done;
	u32 thresh[TEGRA_CSI_BLOCKS] = { 0 };
	struct tegra_channel_buffer *buf;
	int state = VB2_BUF_STATE_DONE;

	/* dequeue buffer and return if no buffer exists */
	buf = dequeue_buffer(chan, !chan->low_latency);
	if (!buf)
		return;

	for (index = 0; index < chan->valid_ports; index++) {
		/* Program buffer address by using surface 0 */
		csi_write(chan, index, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
			((u64)buf->addr + chan->buffer_offset[index]) >> 32);
		csi_write(chan, index, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB,
			(buf->addr + chan->buffer_offset[index]));
		csi_write(chan, index,
			TEGRA_VI_CSI_SURFACE0_STRIDE, bytes_per_line);

		if (chan->fmtinfo->fourcc == V4L2_PIX_FMT_NV16) {
			/*
			 * Program surface 1 for UV plane,
			 * with offset sizeimage from Y plane
			 */
			csi_write(chan, index,
				TEGRA_VI_CSI_SURFACE1_OFFSET_MSB,
				((u64)buf->addr + chan->format.sizeimage / 2 +
				chan->buffer_offset[index]) >> 32);
			csi_write(chan, index,
				TEGRA_VI_CSI_SURFACE1_OFFSET_LSB,
				(buf->addr + chan->format.sizeimage / 2 +
				chan->buffer_offset[index]));
			csi_write(chan, index,
				TEGRA_VI_CSI_SURFACE1_STRIDE, bytes_per_line);
		}

		if (chan->low_latency) {
			/* Program syncpoints */
			thresh[index] = nvhost_syncpt_incr_max_ext(
				chan->vi->ndev,
				chan->syncpt[index][1], 1);
			mw_ack_done = VI_CSI_MW_ACK_DONE(chan->port[index]);
			val = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
				chan->syncpt[index][1];
		} else {
			/* Program syncpoints */
			thresh[index] = nvhost_syncpt_incr_max_ext(
				chan->vi->ndev,
				chan->syncpt[index][0], 1);
			mw_ack_done = VI_CSI_MW_ACK_DONE(chan->port[index]);
			val = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
				chan->syncpt[index][0];
		}

		tegra_channel_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, val);
		if (!csi_read(chan, index, TEGRA_VI_CSI_SINGLE_SHOT)) {
			csi_write(chan, index,
				TEGRA_VI_CSI_SINGLE_SHOT, SINGLE_SHOT_CAPTURE);
		} else {
			dev_dbg(&chan->video->dev,
				"Syncpoint already enabled at capture done!%d\n", index);
		}
	}

	for (index = 0; index < chan->valid_ports; index++) {
		if (chan->low_latency) {
			err = nvhost_syncpt_wait_timeout_ext(chan->vi->ndev,
				chan->syncpt[index][1], thresh[index],
				chan->timeout, NULL, &ts);
//debug case 1
			printk("capture_done_case1\n");
			printk("%d", err);
		} else {
			err = nvhost_syncpt_wait_timeout_ext(chan->vi->ndev,
				chan->syncpt[index][0], thresh[index],
				chan->timeout, NULL, &ts);
//debug case 2
			printk("capture_done_case2\n");
			printk("%d", err);
		}
		if (err) {
			dev_err(&chan->video->dev,
				"%s: MW_ACK_DONE syncpoint time out!%d\n",
				__func__, index);
			state = VB2_BUF_STATE_REQUEUEING;
			/* perform error recovery for timeout */
			tegra_channel_ec_recover(chan);
			chan->capture_state = CAPTURE_TIMEOUT;
			break;
		}
	}

	set_timestamp(buf, &ts);
	/* Mark capture state to IDLE as capture is finished */
	chan->capture_state = CAPTURE_IDLE;

	if (chan->low_latency) {
		buf->state = VB2_BUF_STATE_DONE;
		release_buffer(chan, buf);
	} else
		tegra_channel_ring_buffer(chan, &buf->buf, &ts, state);

	trace_tegra_channel_capture_done("mw_ack_done", ts);
} 

We get a value of err of -11 for nvhost_syncpt_wait_timeout_ext in //debug case 2 using dmesg --follow

So we assume, that the problem is at the end of the frame, is that right ?
Does the value -11 makes you thinking about some possible mistake ?

Thanks !
Adrien

hello adrien.leroy2,

here are two different approaches to fetch camera frames.
if you enable low_latency ; it’ll looking for start-of-frame and end-of-frame for a success captured frame.
suggest you disable low_latency by using single thread approach to debug the issue.
please also check below for example,

	if (chan->low_latency)
		ret = tegra_channel_capture_frame_multi_thread(chan, buf);
	else
		ret = tegra_channel_capture_frame_single_thread(chan, buf);

for the single thread approach,
this function, tegra_channel_capture_done() is being called when you attempt to stop the stream. it waits for last frame memory write acknowledgement.

BTW,
could you please share the L4T release version you’re working with?
for example, $ cat /etc/nv_tegra_release
thanks

Hello @JerryChang ,
I am not sure to understand well.
How/where can I enable/disable low_latency ? Because, considering the messaage that I have posted in my previous post,
it seems to be executing as if it was in single thread. I have the message single_thread before the -11 error.
In function : tegra_channel_capture_done from vi2_fops.c

for (index = 0; index < chan->valid_ports; index++) {
		if (chan->low_latency) {
			err = nvhost_syncpt_wait_timeout_ext(chan->vi->ndev,
				chan->syncpt[index][1], thresh[index],
				chan->timeout, NULL, &ts);
//debug case 1
			printk("multi_thread\n");
			printk("%d", err);
		} else {
			err = nvhost_syncpt_wait_timeout_ext(chan->vi->ndev,
				chan->syncpt[index][0], thresh[index],
				chan->timeout, NULL, &ts);
//debug case 2
			printk("single_thread\n");
			printk("%d", err);
		}

So I am not sure what to do…

Concerning the L4T release version :

# R32 (release), REVISION: 5.1, GCID : 26202423, BOARD: t210ref, EABI: aarch64

Thank you !

Adrien

hello adrien.leroy2,

it’s user-space switch to configure the low_latency modes.
please check $ v4l2-ctl -d /dev/video0 --list-ctrls for reference.

please also refer to below for the return error,
$L4T_Sources/r32.5/Linux_for_Tegra/source/public/kernel/kernel-4.9/include/uapi/asm-generic/errno-base.h
nvhost_syncpt_wait_timeout_ext() returns -11 means the it’ll try again,

please share the command-line you used for reference,
you may also review Sensor Pixel Clock, to examine device tree property settings, i.e. pixel_clk_hz.
here’s Supported Modes and Power Efficiency session, please configure nvpmodel clock configuration as MaxN to boost performance to testing,
thanks

1 Like

Hello @JerryChang ,
So we checked and we were already in single thread.

The command used to get the -11 value is dmesg --follow after doing :
sudo v4l2-ctl -d /dev/video0 --set-fmt-video=width=320,height=240,pixelformat=RGGB --set-ctrl bypass_mode=0 --stream-mmap --stream-count=20000

We identified some points to be careful on about the pixel_clk_hz so we need to carry on some tests.
Here is an image of what we send on the MIPI :

The data are sent during the high speed phase in MIPI. As we chose to pass in RGGB8, we configured the active_w, active_h at 160 and 120 because our sensor resolution is 320x240. We are not sure of what to put for line_length as we don’t know what is considered as blanking… Is the jetson filtring the low power phases by itself, or is it considered as blanking ?
Because from the FPGA, there is no blanking sent (in high speed phases, for a line, there are only the 320 pixels).

About MaxN we are already configured that way.

Thank you for your help,
Adrien

hello adrien.leroy2,

you should configure active_w and acvite_h as same as your sensor active region. that’s say, the actual image size.

just double check you’re assign correct pixel format and also supported resolution to the v4l2 command-line.
for example, $ v4l2-ctl -d /dev/video0 --list-formats-ext

note, you should review following property settings, since they’re used for the camera stack to calculate the clocks.
for example,
it’s line_length indicate the horizontal timing, this value must be greater than or equal to active_w.
sensor pixel clock to calculate the exposure and frame rate of the sensor. It must be set correctly to avoid potential issues.
i.e. pixel_clk_hz = sensor data rate per lane (Mbps) * number of lanes / bits per pixel
you may also have below formula to examine your line_length and frame_length configuration,
i.e. FPS = pixel_clk_hz / (Line length * Frame length)

1 Like

hello adrien.leroy2,

here’re training videos in the Tutorials | NVIDIA Developer page,
please check V4L2 Sensor Driver Development Tutorial to helps you develop your sensor driver.
thanks

Hello @JerryChang

Here is what I put in the dtsi :

/*
 * Copyright (c) 2018-2019, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <dt-bindings/media/camera.h>
#include <dt-bindings/platform/t210/t210.h>

/ {
        host1x {
                vi_base: vi {
                        num-channels = <1>;
                        ports {
                                #address-cells = <1>;
                                #size-cells = <0>;
                                vi_port0: port@0 {
                                        reg = <0>;
                                        rbpcv2_imx219_vi_in0: endpoint {
                                                port-index = <0>;
                                                bus-width = <1>;
                                                remote-endpoint = <&rbpcv2_imx219_csi_out0>;
                                        };
                                };
                        };
                };

                csi_base: nvcsi {
                        num-channels = <1>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        csi_chan0: channel@0 {
                                reg = <0>;
                                ports {
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        csi_chan0_port0: port@0 {
                                                reg = <0>;
                                                rbpcv2_imx219_csi_in0: endpoint@0 {
                                                        port-index = <0>;
                                                        bus-width = <1>;
                                                        remote-endpoint = <&rbpcv2_imx219_out0>;
                                                };
                                        };
                                        csi_chan0_port1: port@1 {
                                                reg = <1>;
                                                rbpcv2_imx219_csi_out0: endpoint@1 {
                                                        remote-endpoint = <&rbpcv2_imx219_vi_in0>;
                                                };
                                        };
                                };
                        };
                };

                i2c@546c0000 {
                        imx219_single_cam0: rbpcv2_imx219_a@10 {
                                compatible = "nvidia,imx219";
                                /* I2C device address */
                                reg = <0x10>;

                                /* V4L2 device node location */
                                devnode = "video0";

                                /* Physical dimensions of sensor */
                                physical_w = "3.680";
                                physical_h = "2.760";

                                sensor_model = "imx219";

                                use_sensor_mode_id = "true";

                                mode0 { /* ATI320_MODE_320x240_60FPS */
                                        mclk_khz = "20000";
                                        num_lanes = "1";
                                        tegra_sinterface = "serial_a";
                                        phy_mode = "DPHY";
                                        discontinuous_clk = "no";
                                        dpcm_enable = "false";
                                        cil_settletime = "0";
                                        dynamic_pixel_bit_depth="8";
                                        csi_pixel_bit_depth = "8";
                                        mode_type="bayer";
                                        pixel_t="bayer_rggb8";
                                        pixel_phase="rggb";

                                        active_w = "320";
                                        active_h = "240";
                                        readout_orientation = "90";
                                        line_length = "1480";
                                        inherent_gain = "1";
                                        mclk_multiplier = "2.625";
                                        pix_clk_hz = "52500000";

                                        gain_factor = "16";
                                        framerate_factor = "1000000";
                                        exposure_factor = "1000000";
                                        min_gain_val = "16"; /* 1.00x */
                                        max_gain_val = "170"; /* 10.66x */
                                        step_gain_val = "1";
                                        default_gain = "16"; /* 1.00x */
                                        min_hdr_ratio = "1";
                                        max_hdr_ratio = "1";
                                        min_framerate = "2000000"; /* 2.0 fps */
                                        max_framerate = "147700000"; /* 144.0 fps */
                                        step_framerate = "1";
                                        default_framerate = "147700000"; /* 144.0 fps */
                                        min_exp_time = "13"; /* us */
                                        max_exp_time = "683709"; /* us */
                                        step_exp_time = "1";
                                        default_exp_time = "2495"; /* us */

                                        embedded_metadata_height = "2";
                                };

                                ports {
                                        #address-cells = <1>;
                                        #size-cells = <0>;

                                        port@0 {
                                                reg = <0>;
                                                rbpcv2_imx219_out0: endpoint {
                                                        port-index = <0>;
                                                        bus-width = <1>;
                                                        remote-endpoint = <&rbpcv2_imx219_csi_in0>;
                                                };
                                        };
                                };
                        };
                };
    };

        lens_imx219@RBPCV2 {
                min_focus_distance = "0.0";
                hyper_focal = "0.0";
                focal_length = "3.04";
                f_number = "2.0";
                aperture = "0.0";
        };
};

/ {
        tcp: tegra-camera-platform {
                compatible = "nvidia, tegra-camera-platform";

                num_csi_lanes = <1>;
                max_lane_speed = <1500000>;
                min_bits_per_pixel = <8>;
                vi_peak_byte_per_pixel = <2>;
                vi_bw_margin_pct = <25>;
                max_pixel_rate = <240000>;
                isp_peak_byte_per_pixel = <5>;
                isp_bw_margin_pct = <25>;

                modules {
                        cam_module0: module0 {
                                badge = "porg_front_RBPCV2";
                                position = "front";
                                orientation = "1";
                                cam_module0_drivernode0: drivernode0 {
                                        pcl_id = "v4l2_sensor";
                                        devname = "imx219 6-0010";
                                        proc-device-tree = "/proc/device-tree/host1x/i2c@546c0000/rbpcv2_imx219_a@10";
                                };
                        };
                };
        };
};

From the FPGA there are 240 lines by frame composed of 320 active pixels sent on 1 lane at 52,5MHz (pixel_clk_hz)
We calculated a line length of 1480 using the formula in V4L2 Sensor Driver Development (Advanced)
Our framerate is 144,7.

Do you think the dtsi is well configured for our situation ?

Furthermore, do you have any other tips which could help me to debug my error ?
I always have the weird framerate of near 15000 when I use this command :

sudo v4l2-ctl -d /dev/video0 --set-fmt-video=width=320,height=240,pixelformat=RGGB --set-ctrl bypass_mode=0 --stream-mmap --stream-count=20000

I don’t find where does this come from… (I was already in single thread mode and I checked the pixelformat and supported resolution and It was OK)

Thanks,

Adrien.

hello adrien.leroy2,

you’re using imx219 device tree to have customization, please update the node as your customize naming,
please revise the string as your customize drivers.
I would suggest you update this at least, compatible = "nvidia,imx219"; it’s property for using imx219 sensor driver to do the sensor operations.

are you output embedded metadata on the signaling?
if no, please configure embedded_metadata_height=0 in the device tree.

Hello @JerryChang ,

I did some modifications…we configure embedded_metadata_height=0 in the device tree.
As well I changed the driver name to my sensor name and the compatible field.

I flashed that on my Jetson nano and I tried the following command :
sudo v4l2-ctl -d /dev/video0 --set-fmt-video=width=320,height=240,pixelformat=RGGB --set-ctrl bypass_mode=0 --stream-mmap --stream-count=1 to capture one frame. I activated some debugging messages :

echo file vi2_fops +p > /sys/kernel/debug/dynamic_debug/control
echo file csi2_fops +p > /sys/kernel/debug/dynamic_debug/control

Here is what I get :

[ 3713.962533] video4linux video0: tegra_channel_capture_done: MW_ACK_DONE syncpoint time out!0
[ 3713.972098] video4linux video0: TEGRA_VI_CSI_ERROR_STATUS 0x00000004
[ 3713.972127] vi 54080000.vi: TEGRA_CSI_PIXEL_PARSER_STATUS 0x00000180
[ 3713.972148] vi 54080000.vi: TEGRA_CSI_CIL_STATUS 0x00000000
[ 3713.972167] vi 54080000.vi: TEGRA_CSI_CILX_STATUS 0x00000000
[ 3713.972365] vi 54080000.vi: cil_settingtime was autocalculated
[ 3713.972393] vi 54080000.vi: csi clock settle time: 13, cil settle time: 10

So I checked the Tegra_X1_TRM to interpret those bits.

For CSI_CSI_PIXEL_PARSER_A_STATUS_0 I have :

Bit 8 : PPA_EXTRA_SF: Set when CSI-PPA receives a SF when it is expecting an EF. This happens when EF of
the frame gets corrupted before arriving CSI. CSI-PPA will insert a fake EF and the drop the current frame
with Correct SF.
Bit 7 : PPA_SHORT_FRAME: Set when CSI-PPA receives a short frame. This bit gets set even if
CSI_PPA_PAD_FRAME specifies that short frames are to be padded to the correct line length.

What do you think about it ?

Thank you
Adrien

hello adrien.leroy2,

please also check TRM for [Table 180: VI Error Status and Interrupt Mask Registers Functional Register Definitions]
the 0x4 error reported an FRAME_HEIGHT_SHORT_ERROR.
short frame error means the coming signaling less than VI’s expectation. you could have adjust active_h (to reduce) in the DT property for verification.

BTW,
please check via sysnode to ensure you’re having correct device tree blob updates.
for example,
# cat /proc/device-tree/host1x/i2c@546c0000/rbpcv2_imx219_a@10/mode0/active_h
or
# cat /proc/device-tree/host1x/i2c@546c0000/rbpcv2_imx219_a@10/mode0/embedded_metadata_height

1 Like

Hi @JerryChang ,

Thanks for your answer, I will check that.

Do you know if there is a mean to display the number of pixels, lines and rows of each frame received ? I think it could help me a lot.

Adrien.

hello adrien.leroy2,

No, Jetson Nano don’t have VI tracing logs for checking that.
it usually an issue of device tree properties settings, you should have correct settings for VI’s buffer allocation,
thanks

Hello @JerryChang,

I tried to reduce active_h in the device-tree until active_h = 10 but without any success, the error does not change :

With dmesg --follow :

[ 3713.962533] video4linux video0: tegra_channel_capture_done: MW_ACK_DONE syncpoint time out!0
[ 3713.972098] video4linux video0: TEGRA_VI_CSI_ERROR_STATUS 0x00000004
[ 3713.972127] vi 54080000.vi: TEGRA_CSI_PIXEL_PARSER_STATUS 0x00000180
[ 3713.972148] vi 54080000.vi: TEGRA_CSI_CIL_STATUS 0x00000000
[ 3713.972167] vi 54080000.vi: TEGRA_CSI_CILX_STATUS 0x00000000
[ 3713.972365] vi 54080000.vi: cil_settingtime was autocalculated
[ 3713.972393] vi 54080000.vi: csi clock settle time: 13, cil settle time: 10

And I always have the weird variating framerate (as you can see here : Jetson Nano MIPI CSI-2 without I2C from FPGA - #19 by adrien.leroy2)

Do you have any other idea about how to solve this problem ?

Thanks.

Adrien

hello adrien.leroy2,

I would suggest you probe and review the sensor signaling again,
is it actual follow MIPI CSI-2 specification, especially, end-of-frame signaling.
thanks

Hello @JerryChang,

I am not sure to understand what you mean. I have analysed the signals and they seem good. I don’t have a MIPI analyser so I can’t verify all the datas sent accurately.

I tested to reduce the framerate (from 147,7 to 60 fps) of my sensor and it modified the result of the previous command :

sudo v4l2-ctl -d /dev/video0 --set-fmt-video=width=320,height=240,pixelformat=RGGB --set-ctrl bypass_mode=0 --stream-mmap --stream-count=1

I now have a framerate which vary from 5000 to 6000 (Before it was from 11000 to 15000). So it seems to have an impact but it’s always not good).

Furthermore, I tested to modify a lot of parameters of the device tree (line length, pix_clk_hz, tegra_sinterface, port-index…) and each time I have the same problem with the framerate. However, the device tree is well compiled. When I use the commands you gave me :

cat /proc/device-tree/host1x/i2c@546c0000/rbpcv2_imx219_a@10/mode0/active_h

After the flash of DTB I can see the parameters was been well updated. Could it be that the device tree is not read correctly ? (The device tree is well connected to the driver with the field compatible and the driver is well loaded)

What do you think about this ?

Thank you in advance,

Adrien.

hello adrien.leroy2,

the device tree blob has updated successfully if you checked and confirmed it via sysnode.

please check the frame-length settings of your sensor stream, frame-length is defined in the sensor init table,
there’s a formula to examine your line_length, frame_length, and also FPS.
i.e. FPS = pixel_clk_hz / (Line length * Frame length)
thanks

Hello @JerryChang ,
Currently, we are not working with a sensor but a FPGA which simulates its behaviour. So the Verylog/VHDL code allows me to set the parameters of the stream.
I checked this input stream using an oscilloscope to verify the framerate, the pixel clock, the image dimensions etc.
Everything seems correct.

In my opinion the problem would more likely come from the Jetson side, such as device tree, mode_tbls or driver but I don’t know where precisely…

I join to this post my codes for the driver and the mode_tbls file.

Here is the driver code :

#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/debugfs.h>

#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>

#include <media/tegra_v4l2_camera.h>
#include <media/tegracam_core.h>
#include <media/ati320.h>

#include "../platform/tegra/camera/camera_gpio.h"
#include "ati320_mode_tbls.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ati320.h>

/* ati320 - sensor parameter limits */
//#define ATI320_MIN_GAIN				0x0000

/* ati320 sensor register address */
//#define ATI320_MODEL_ID_ADDR_MSB		0x0000

static const struct of_device_id ati320_of_match[] = {
	{ .compatible = "lynred,ati320", },
	{ },
};
MODULE_DEVICE_TABLE(of, ati320_of_match);

static const u32 ctrl_cid_list[] = {
	TEGRA_CAMERA_CID_GAIN,
	TEGRA_CAMERA_CID_EXPOSURE,
	TEGRA_CAMERA_CID_FRAME_RATE,
	TEGRA_CAMERA_CID_SENSOR_MODE_ID,
};

struct ati320 {
	struct i2c_client		*i2c_client;
	struct v4l2_subdev		*subdev;
	struct mutex            streaming_lock;
	bool                    streaming;
	u16				fine_integ_time;
	u32				frame_length;
	struct camera_common_data	*s_data;
	struct tegracam_device		*tc_dev;
};

static const struct regmap_config sensor_regmap_config = {
	.reg_bits = 16,
	.val_bits = 16,
	.cache_type = REGCACHE_RBTREE,
	.use_single_rw = true,
};



static inline int ati320_read_reg(struct camera_common_data *s_data,
	u16 addr, u8 *val)
{
	int err = 0;
	u32 reg_val = 0;

	err = regmap_read(s_data->regmap, addr, &reg_val);
	*val = reg_val & 0xff;

	return err;
}

static inline int ati320_write_reg(struct camera_common_data *s_data,
	u16 addr, u8 val)
{
	int err = 0;

	err = regmap_write(s_data->regmap, addr, val);
	if (err)
		dev_err(s_data->dev, "%s: i2c write failed, 0x%x = %x",
			__func__, addr, val);

	return err;
}

static int ati320_write_table(struct ati320 *priv, const ati320_reg table[])
{
	return 0;
}

static int ati320_set_group_hold(struct tegracam_device *tc_dev, bool val)
{
	return 0;
}

static int ati320_set_gain(struct tegracam_device *tc_dev, s64 val)
{
	return 0;
}

static int ati320_set_frame_rate(struct tegracam_device *tc_dev, s64 val)
{
	return 0;
}

static int ati320_set_exposure(struct tegracam_device *tc_dev, s64 val)
{
	return 0;
}


static struct tegracam_ctrl_ops ati320_ctrl_ops = {
	.numctrls = ARRAY_SIZE(ctrl_cid_list),
	.ctrl_cid_list = ctrl_cid_list,
	.set_gain = ati320_set_gain,
	.set_exposure = ati320_set_exposure,
	.set_frame_rate = ati320_set_frame_rate,
	.set_group_hold = ati320_set_group_hold,
};

static int ati320_power_on(struct camera_common_data *s_data)
{
	int err = 0;
	struct camera_common_power_rail *pw = s_data->power;
	struct camera_common_pdata *pdata = s_data->pdata;
	struct device *dev = s_data->dev;

	printk("Allumage de l'ati...\n");

	dev_dbg(dev, "%s: power on\n", __func__);
	if (pdata && pdata->power_on) {
		err = pdata->power_on(pw);
		if (err)
			dev_err(dev, "%s failed.\n", __func__);
		else
			pw->state = SWITCH_ON;
		return err;
	}

	// Ici met la sequence de démarrage de l ati

	/*
	if (pw->reset_gpio) {
		if (gpio_cansleep(pw->reset_gpio))
			gpio_set_value_cansleep(pw->reset_gpio, 0);
		else
			gpio_set_value(pw->reset_gpio, 0);
	}

	if (unlikely(!(pw->avdd || pw->iovdd || pw->dvdd)))
		goto skip_power_seqn;

	usleep_range(10, 20);

	if (pw->avdd) {
		err = regulator_enable(pw->avdd);
		if (err)
			goto ati320_avdd_fail;
	}

	if (pw->iovdd) {
		err = regulator_enable(pw->iovdd);
		if (err)
			goto ati320_iovdd_fail;
	}

	if (pw->dvdd) {
		err = regulator_enable(pw->dvdd);
		if (err)
			goto ati320_dvdd_fail;
	}

	usleep_range(10, 20);

skip_power_seqn:
	if (pw->reset_gpio) {
		if (gpio_cansleep(pw->reset_gpio))
			gpio_set_value_cansleep(pw->reset_gpio, 1);
		else
			gpio_set_value(pw->reset_gpio, 1);
	}
	*/
	/* Need to wait for t4 + t5 + t9 time as per the data sheet */
	/* t4 - 200us, t5 - 21.2ms, t9 - 1.2ms */
	//usleep_range(23000, 23100);
	
	//printk("ati start streaming");
	//ati320_start_streaming(s_data->tegracam_ctrl_hdl->tc_dev);

	pw->state = SWITCH_ON;
	printk("Ati powered on\n");
	return 0;

/*ati320_dvdd_fail:
	regulator_disable(pw->iovdd);

ati320_iovdd_fail:
	regulator_disable(pw->avdd);

ati320_avdd_fail:
	dev_err(dev, "%s failed.\n", __func__);

	return -ENODEV;*/
}

static int ati320_power_off(struct camera_common_data *s_data)
{
	int err = 0;
	struct camera_common_power_rail *pw = s_data->power;
	struct camera_common_pdata *pdata = s_data->pdata;
	struct device *dev = s_data->dev;
	
	printk("Extinction de l'ati...\n");

	dev_dbg(dev, "%s: power off\n", __func__);

	if (pdata && pdata->power_off) {
		err = pdata->power_off(pw);
		if (err) {
			dev_err(dev, "%s failed.\n", __func__);
			return err;
		}
	} else {
		if (pw->reset_gpio) {
			if (gpio_cansleep(pw->reset_gpio))
				gpio_set_value_cansleep(pw->reset_gpio, 0);
			else
				gpio_set_value(pw->reset_gpio, 0);
		}

		usleep_range(10, 10);

		if (pw->dvdd)
			regulator_disable(pw->dvdd);
		if (pw->iovdd)
			regulator_disable(pw->iovdd);
		if (pw->avdd)
			regulator_disable(pw->avdd);
	}

	pw->state = SWITCH_OFF;

	printk("Ati powered off\n");
	return 0;
}

static int ati320_power_put(struct tegracam_device *tc_dev)
{
	struct camera_common_data *s_data = tc_dev->s_data;
	struct camera_common_power_rail *pw = s_data->power;

	if (unlikely(!pw))
		return -EFAULT;

	if (likely(pw->dvdd))
		devm_regulator_put(pw->dvdd);

	if (likely(pw->avdd))
		devm_regulator_put(pw->avdd);

	if (likely(pw->iovdd))
		devm_regulator_put(pw->iovdd);

	pw->dvdd = NULL;
	pw->avdd = NULL;
	pw->iovdd = NULL;

	if (likely(pw->reset_gpio))
		gpio_free(pw->reset_gpio);

	return 0;
}

static int ati320_power_get(struct tegracam_device *tc_dev)
{
	struct device *dev = tc_dev->dev;
	struct camera_common_data *s_data = tc_dev->s_data;
	struct camera_common_power_rail *pw = s_data->power;
	struct camera_common_pdata *pdata = s_data->pdata;
	struct clk *parent;
	int err = 0;

	if (!pdata) {
		dev_err(dev, "pdata missing\n");
		return -EFAULT;
	}

	/* Sensor MCLK (aka. INCK) */
	if (pdata->mclk_name) {
		pw->mclk = devm_clk_get(dev, pdata->mclk_name);
		if (IS_ERR(pw->mclk)) {
			dev_err(dev, "unable to get clock %s\n",
				pdata->mclk_name);
			return PTR_ERR(pw->mclk);
		}

		if (pdata->parentclk_name) {
			parent = devm_clk_get(dev, pdata->parentclk_name);
			if (IS_ERR(parent)) {
				dev_err(dev, "unable to get parent clock %s",
					pdata->parentclk_name);
			} else
				clk_set_parent(pw->mclk, parent);
		}
	}

	/* Reset or ENABLE GPIO */
	pw->reset_gpio = pdata->reset_gpio;
	/*err = gpio_request(pw->reset_gpio, "cam_reset_gpio");
	if (err < 0) {
		dev_err(dev, "%s: unable to request reset_gpio (%d)\n",
			__func__, err);
		goto done;
	}

done:*/
	pw->state = SWITCH_OFF;

	return err;
}

static struct camera_common_pdata *ati320_parse_dt(
	struct tegracam_device *tc_dev)
{
	struct device *dev = tc_dev->dev;
	struct device_node *np = dev->of_node;
	struct camera_common_pdata *board_priv_pdata;
	const struct of_device_id *match;
	struct camera_common_pdata *ret = NULL;
	int err = 0;
	int gpio;
	printk("lancement du parsing\n");

	if (!np)
		return NULL;

	match = of_match_device(ati320_of_match, dev);
	if (!match) {
		dev_err(dev, "Failed to find matching dt id\n");
		return NULL;
	}

	board_priv_pdata = devm_kzalloc(dev,
		sizeof(*board_priv_pdata), GFP_KERNEL);
	if (!board_priv_pdata)
		return NULL;

	gpio = of_get_named_gpio(np, "reset-gpios", 0);
	if (gpio < 0) {
		if (gpio == -EPROBE_DEFER)
			ret = ERR_PTR(-EPROBE_DEFER);
		dev_err(dev, "reset-gpios not found\n");
		//goto error;
	}
	board_priv_pdata->reset_gpio = (unsigned int)gpio;

	err = of_property_read_string(np, "mclk", &board_priv_pdata->mclk_name);
	if (err)
		dev_dbg(dev, "mclk name not present, "
			"assume sensor driven externally\n");

	return board_priv_pdata;

/*error:
	devm_kfree(dev, board_priv_pdata);

	return ret;*/
}

static int ati320_set_mode(struct tegracam_device *tc_dev)
{
	struct ati320 *priv = (struct ati320 *)tegracam_get_privdata(tc_dev);
	struct camera_common_data *s_data = tc_dev->s_data;

	int err = 0;

	err = ati320_write_table(priv, mode_table[s_data->mode_prop_idx]);
	if (err)
		return err;

	err = ati320_write_table(priv, mode_table[s_data->mode]);
	if (err)
		return err;

	return 0;
}

static int ati320_start_streaming(struct tegracam_device *tc_dev)
{
	struct ati320 *priv = (struct ati320 *)tegracam_get_privdata(tc_dev);

	mutex_lock(&priv->streaming_lock);
	priv->streaming = true;
	//set pin 76 a 1 pour demarrer la mire
	gpio_request(76, "stream");
	gpio_direction_output(76, 1);
	gpio_set_value(76, 1);
	printk("debut du steam\n");
	mutex_unlock(&priv->streaming_lock);

	return 0;
}

static int ati320_stop_streaming(struct tegracam_device *tc_dev)
{
	struct ati320 *priv = (struct ati320 *)tegracam_get_privdata(tc_dev);
	u32 frame_time;

	mutex_lock(&priv->streaming_lock);
	priv->streaming = false;
	//set pin 76 a 0
	gpio_direction_output(76, 0);
	gpio_set_value(76, 0);
	printk("arret du stream\n");
	mutex_unlock(&priv->streaming_lock);

	frame_time = priv->frame_length*1480/52500/1000;

	usleep_range(frame_time, frame_time + 1000);


	return 0;
}

static struct camera_common_sensor_ops ati320_common_ops = {
	.numfrmfmts = ARRAY_SIZE(ati320_frmfmt),
	.frmfmt_table = ati320_frmfmt,
	.power_on = ati320_power_on,
	.power_off = ati320_power_off,
	.write_reg = ati320_write_reg,
	.read_reg = ati320_read_reg,
	.parse_dt = ati320_parse_dt,
	.power_get = ati320_power_get,
	.power_put = ati320_power_put,
	.set_mode = ati320_set_mode,
	.start_streaming = ati320_start_streaming,
	.stop_streaming = ati320_stop_streaming,
};

/*static int ati320_board_setup(struct ati320 *priv)
{
	struct camera_common_data *s_data = priv->s_data;
	struct camera_common_pdata *pdata = s_data->pdata;
	struct device *dev = s_data->dev;
	//u8 reg_val[2];
	int err = 0;

	if (pdata->mclk_name) {
		err = camera_common_mclk_enable(s_data);
		if (err) {
			dev_err(dev, "error turning on mclk (%d)\n", err);
			goto done;
		}
	}

	err = ati320_power_on(s_data);
	if (err) {
		dev_err(dev, "error during power on sensor (%d)\n", err);
		goto err_power_on;
	}*/

	/* Probe sensor model id registers */
	/*err = ati320_read_reg(s_data, ATI320_MODEL_ID_ADDR_MSB, &reg_val[0]);
	if (err) {
		dev_err(dev, "%s: error during i2c read probe (%d)\n",
			__func__, err);
		goto err_reg_probe;
	}
	err = ati320_read_reg(s_data, ATI320_MODEL_ID_ADDR_LSB, &reg_val[1]);
	if (err) {
		dev_err(dev, "%s: error during i2c read probe (%d)\n",
			__func__, err);
		goto err_reg_probe;
	}
	if (!((reg_val[0] == 0x02) && reg_val[1] == 0x19))
		dev_err(dev, "%s: invalid sensor model id: %x%x\n",
			__func__, reg_val[0], reg_val[1]);*/

	/* Sensor fine integration time */
	/*err = ati320_get_fine_integ_time(priv, &priv->fine_integ_time);
	if (err)
		dev_err(dev, "%s: error querying sensor fine integ. time\n",
			__func__);

err_reg_probe:
	ati320_power_off(s_data);

err_power_on:
	if (pdata->mclk_name)
		camera_common_mclk_disable(s_data);

done:
	return err;
}*/

static int ati320_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
	struct i2c_client *client = v4l2_get_subdevdata(sd);
	
	printk("ati320 open \n");
	dev_dbg(&client->dev, "%s:\n", __func__);

	return 0;
}

static const struct v4l2_subdev_internal_ops ati320_subdev_internal_ops = {
	.open = ati320_open,
};

static int ati320_probe(struct i2c_client *client,
	const struct i2c_device_id *id)
{
	struct device *dev = &client->dev;
	struct tegracam_device *tc_dev;
	struct ati320 *priv;
	int err;
	printk("ati320 probe start !\n");
	dev_dbg(dev, "probing v4l2 sensor at addr 0x%0x\n", client->addr);

	if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
		return -EINVAL;

	priv = devm_kzalloc(dev,
			sizeof(struct ati320), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	tc_dev = devm_kzalloc(dev,
			sizeof(struct tegracam_device), GFP_KERNEL);
	if (!tc_dev)
		return -ENOMEM;

	priv->i2c_client = tc_dev->client = client;
	tc_dev->dev = dev;
	strncpy(tc_dev->name, "ati320", sizeof(tc_dev->name));
	tc_dev->dev_regmap_config = &sensor_regmap_config;
	tc_dev->sensor_ops = &ati320_common_ops;
	tc_dev->v4l2sd_internal_ops = &ati320_subdev_internal_ops;
	tc_dev->tcctrl_ops = &ati320_ctrl_ops;

	err = tegracam_device_register(tc_dev);
	if (err) {
		dev_err(dev, "tegra camera driver registration failed\n");
		return err;
	}
	priv->tc_dev = tc_dev;
	priv->s_data = tc_dev->s_data;
	priv->subdev = &tc_dev->s_data->subdev;
	tegracam_set_privdata(tc_dev, (void *)priv);
	mutex_init(&priv->streaming_lock);

	/*err = ati320_board_setup(priv);
	if (err) {
		tegracam_device_unregister(tc_dev);
		dev_err(dev, "board setup failed\n");
		return err;
	}*/

	err = tegracam_v4l2subdev_register(tc_dev, true);
	if (err) {
		dev_err(dev, "tegra camera subdev registration failed\n");
		return err;
	}

	dev_dbg(dev, "detected ati320 sensor\n");

	printk("ati320 probe stop !\n");
	return 0;
}

static int ati320_remove(struct i2c_client *client)
{
	struct camera_common_data *s_data = to_camera_common_data(&client->dev);
	struct ati320 *priv = (struct ati320 *)s_data->priv;

	tegracam_v4l2subdev_unregister(priv->tc_dev);
	tegracam_device_unregister(priv->tc_dev);

	mutex_destroy(&priv->streaming_lock);

	printk("ati320 removed !\n");
	return 0;
}

static const struct i2c_device_id ati320_id[] = {
	{ "ati320", 0 },
	{ }
};
MODULE_DEVICE_TABLE(i2c, ati320_id);

static struct i2c_driver ati320_i2c_driver = {
	.driver = {
		.name = "ati320",
		.owner = THIS_MODULE,
		.of_match_table = of_match_ptr(ati320_of_match),
	},
	.probe = ati320_probe,
	.remove = ati320_remove,
	.id_table = ati320_id,
};
module_i2c_driver(ati320_i2c_driver);

MODULE_DESCRIPTION("Media Controller driver for Sony ATI320");
MODULE_AUTHOR("NVIDIA Corporation");
MODULE_LICENSE("GPL v2");

and the mode_tbls code :

/*
 * ati320_tables.h - sensor mode tables for ati320 HDR sensor.
 *
 * Copyright (c) 2015-2020, NVIDIA CORPORATION, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __ATI320_I2C_TABLES__
#define __ATI320_I2C_TABLES__

#define ATI320_TABLE_WAIT_MS	0
#define ATI320_WAIT_MS	10
#define ATI320_TABLE_END	1

#define ati320_reg struct reg_8

static ati320_reg ati320_start_stream[] = {
	{0x0100, 0x01},
	{ATI320_TABLE_END, 0x00}
};

static ati320_reg ati320_stop_stream[] = {
	{0x0100, 0x00},
	{ATI320_TABLE_END, 0x00}
};

static ati320_reg ati320_mode_320x240_147fps[] = {
	{ATI320_TABLE_WAIT_MS, ATI320_WAIT_MS},
	{ATI320_TABLE_END, 0x0000}
};

enum {
	ATI320_MODE_320x240_147FPS,

	ATI320_START_STREAM,
	ATI320_STOP_STREAM,
};

static ati320_reg *mode_table[] = {
	[ATI320_MODE_320x240_147FPS] = ati320_mode_320x240_147fps,

	[ATI320_START_STREAM]  = ati320_start_stream,
	[ATI320_STOP_STREAM]  = ati320_stop_stream,
};

static const int ati320_147fps[] = {
	60,
};

/*
 * WARNING: frmfmt ordering need to match mode definition in
 * device tree!
 */
static const struct camera_common_frmfmt ati320_frmfmt[] = {
	{{320, 240},	ati320_147fps, 1, 0, ATI320_MODE_320x240_147FPS},
};

#endif /* __ATI320_I2C_TABLES__ */

Maybe you could spot an error in those files.

Thank you,
Adrien

hello adrien.leroy2,

don’t you need to program some registers to enable test-pattern, or, this is the device sending signaling continuously when it power-on?
is actual signaling coming immediately, how you determine this 10ms waiting time for mode initialization?
for example,

had you repot frame_length before senor streaming?
please have a try to hack the value for testing.