How to Using GPC-DMA MEM2MEM Function

Has Example for GPCDMA for mem2mem function?

Could you have more detail for your request?

There is already dmatest module available in driver/dma/dmatest.c which you can use as a reference.

https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/drivers/dma/dmatest.c?h=v4.14.111

A simplified version:

    dma_cap_mask_t mask;
struct dma_chan *chan = NULL;
struct dma_device *dma_dev = NULL;
u32 buf_size = 1024;
dma_addr_t dma_src_addr, dma_dst_addr;
struct dma_async_tx_descriptor *tx = NULL;
struct completion comp;
dma_cookie_t cookie;
void *srcs, *dest;
char *pattern = "############# Nvidia  ###############";

enum dma_data_direction dir = DMA_BIDIRECTIONAL;

dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);

chan = dma_request_channel(mask, filter_fn, NULL);
dma_dev = chan->device;

if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
	printk("SUPPORTS DMA_MEMCPY\n");
} else  {
	printk("does not SUPPORTS DMA_MEMCPY\n");
}

srcs = kzalloc(buf_size, GFP_KERNEL);
memset(srcs, 0, buf_size);
memcpy(srcs, pattern, strlen(pattern));
printk("Source buffer : %s\n", (char*)srcs);


dest = kzalloc(buf_size, GFP_KERNEL);

dma_src_addr = dma_map_single(dma_dev->dev, srcs, buf_size, dir);
if (dma_mapping_error(dma_dev->dev, dma_src_addr)) {
	printk(" Failed to dma_map_singl\n");
}

dma_dst_addr = dma_map_single(dma_dev->dev, dest, buf_size, dir);
if (dma_mapping_error(dma_dev->dev, dma_dst_addr)) {
	printk(" Failed to dma_map_singl\n");
}

tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst_addr,
		dma_src_addr, buf_size, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!tx) {
	printk("Failed to prepare DMA memcpy\n");
}

init_completion(&comp);
tx->callback = complete_func;
tx->callback_param = ∁

cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
	printk("dma_submit error\n");
}

dma_async_issue_pending(chan);
printk("wait for transfer completion\n");
wait_for_completion(&comp);

dma_sync_single_for_cpu(dma_dev->dev, dma_dst_addr, buf_size, dir);
printk("destination buffer: %s\n", (char*)dest);

Thank you.
i using this code in my module now. it’s worked.
but the speed is to lower.copy 4MB data speed only 51MB/S.

https://devtalk.nvidia.com/default/topic/1049482/jetson-tx2/used-gpc-dma-instead-of-memcpy-for-memory-copy/post/5327383/#5327383

i boost the system by sudo ./jetson_clocks.sh and max the emc clock by below command.

nvidia@tegra-ubuntu:~$ sudo su
root@tegra-ubuntu:/home/nvidia# echo 1 > /sys/kernel/debug/bpmp/debug/clk/emc/mrq_rate_locked
root@tegra-ubuntu:/home/nvidia# cat /sys/kernel/debug/bpmp/debug/clk/emc/max_rate
0
root@tegra-ubuntu:/home/nvidia#  echo 0 > /sys/kernel/debug/bpmp/debug/clk/emc/rate

why my board max_rate is 0 ?

my test code is

static void gpdma_callback(void *arg)
{
	struct dmatest_done *done = arg;
	done->done = true;
	wake_up_all(done->wait);
}

static size_t gpcdma_read_write(struct gpcdma_dev* gpdma, char __user* dst, char __user* src, size_t count)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(donw_wait);
	struct dmatest_done done = {.wait = &done_wait);
	struct dma_chan *chan;
	struct dma_device *dev;
	int ret;
	unsigned char *srcs, *dsts;
	dma_cookie_t cookie;
	dma_addr_t dma_src_addr, dma_dst_addr;
	struct dma_async_tx_descriptor* tx = NULL;
	int idx;
	ktime_t ktime;
	s64 runtime = 0;

	char = gpdma->dma_chan;
	dev = chan->device;
	done.done = false;

	ret = -ENOMEM;
	srcs = kzalloc(count, GFP_KERNEL);
	dsts = kzalloc(count, GFP_KERNEL);

	for(idx = 0; idx < count; idx++)
	{
		srcs[idx] = idx;
		dsts[idx] = 0;
	}	

	ktime = ktime_get();

	dma_src_addr = dma_map_single(dev->dev, srcs, count, DMA_BIDIRECTIONAL);
	if(dma_mapping_error(dev->dev, dma_src_addr))
	{
		gd_err("faild to mapping source buffer\n");
		return -1;
	}

	dma_dst_addr = dma_map_single(dev->dev, dsts, count, DMA_BIDIRECTIONAL);
	if(dma_mapping_error(dev->dev, dma_dst_addr))
	{
		gd_err("faild to mapping dest buffer\n");
		return -1;
	}

		
	tx = dev->device_prep_dma_memcpy(chane, dma_dst_addr, dma_src_addr, count, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);

	if(!tx)
	{
		gd_err("prep faild\n");
		return -1;
	}

	tx->callback = gpdma_callback;
	tx->callback_param = &done;
	cookie = tx->tx_submit(tx);

	if(dma_submit_error(cookie))
	{
		gd_err("submit error\n");
		return -1;
	}

	dma_async_issue_pending(chan);
	
	wait_event_freezable_timeout(done_wait, done.done, msecs_to_jiffies(2000));

	dma_async_is_tx_complete(chan, cookie, NULL, NULL);

	runtime = ktime_us_delta(ktime_get(), ktime);

	gd_trace("GPCDMA transfer %llu KB/s", dmatest_KBs(runtime, count));

	gpcdma_chk(srcs, dsts, count);// Check Data

	kfree(srcs);
	kfree(dsts);
}

kernel output is

GPCDMA trasfer 54153 KB/s

Can you provide the binary here.And the max_rate is 0 that’s not make sense. Should be 1866000000

thank you.
how can i provide the binary?

Move the mouse to your comment the “Add attachment” will show up.

External Media

provide my module binary file or another?

Please provide the test module and how to test it.

Hi Caysno,

We have got the data like below.
Able to check on TX2 and mem2mem transfer using dmatest module (drivers/dma/dmatest.c)

On an average of 50 tests the results are reported as:
root@jetson:/home/ubuntu#
[ 9461.159317] dmatest: dma0chan19-copy: summary 50 tests, 0 failures 4335 iops 36680 KB/s (0)

Please note the parameters as below while running the tests:
root@jetson:/home/ubuntu# grep -H . /sys/module/dmatest/parameters/*
/sys/module/dmatest/parameters/channel:dma0chan19
/sys/module/dmatest/parameters/dmatest:0
/sys/module/dmatest/parameters/error:0
/sys/module/dmatest/parameters/iterations:50
/sys/module/dmatest/parameters/max_channels:0
/sys/module/dmatest/parameters/noverify:N
/sys/module/dmatest/parameters/pq_sources:3
/sys/module/dmatest/parameters/run:N
/sys/module/dmatest/parameters/test_buf_size:16384
/sys/module/dmatest/parameters/threads_per_chan:1
/sys/module/dmatest/parameters/timeout:2000
/sys/module/dmatest/parameters/verbose:N
/sys/module/dmatest/parameters/wait:Y
/sys/module/dmatest/parameters/xor_sources:3
root@jetson:/home/ubuntu#

I have used dma0chan19 in the above dma transfer tests.

If you still have any issues please help sharing your actual use case.

Thanks & Regards,
Sandipan

I have met dma issue with pcie too,please help me!!

Hi, I used the same code for PCIE transmit , pcie endpoint is orign nano,and root port is x86,it not work. Fail used linux pcie test driver pci-epf-test on nano platform