PCIe ram share between two jetson NX, but I cann't r/w ram at second page in rp side

My hardware is two jetson NX, use a cable connect thier PCIe port,and I had set pcie ram share mode in ep/rp side, PCIe info like that:

PCIe BAR info in ep size:
[ 118.867618] pci_epf_nv_test pci_epf_nv_test.0: BAR0 RAM phys: 0x22ae8e000
[ 118.867639] pci_epf_nv_test pci_epf_nv_test.0: BAR0 RAM IOVA: 0xffff0000
[ 118.867666] pci_epf_nv_test pci_epf_nv_test.0: BAR0 RAM virt: 0xffffff800bf87000

PCIe info in rp size:
0005:01:00.0 RAM memory: NVIDIA Corporation Device 0001
Flags: fast devsel, IRQ 255
Memory at 1f40100000 (32-bit, non-prefetchable) [size=64K]
Memory at 1c00000000 (64-bit, prefetchable) [size=128K]
Memory at 1f40000000 (64-bit, non-prefetchable) [size=1M]

Now I can r/w mutil-page ram in ep size, like that:
sudo ./ummemcpy 0x22ae8e000 w 0x100000
w (1021) 0x001003FD
w (1022) 0x001003FE
w (1023) 0x001003FF
w (1024) 0x00100400
w (1025) 0x00100401
w (1026) 0x00100402

But I cann’t r/w second page ram in rp side, like that:
sudo ./ummemcpy 0x1f40100000 w
r (1022) 0x001003FE
r (1023) 0x001003FF
r (1024) 0xFFFFFFFF
r (1025) 0xFFFFFFFF
r (1026) 0xFFFFFFFF
all data ride from second page in rp side is 0xFFFFFFFF, what’s wrong with it?

My program code is ummemcpy.c,Thankyou for your advise.

ummemcpy.c:
define MAP_SIZE 8192
define MAP_MASK (MAP_SIZE - 1)
int main(int argc, char **argv) {
int fd;
void *map_base, *virt_addr;
uint32_t read_result, writeval;
off_t target;
int access_type = ‘w’;
target = strtoul(argv[1], 0, 0);
if(argc > 2)
access_type = tolower(argv[2][0]);
if((fd = open(“/dev/mem”, O_RDWR | O_SYNC)) == -1) FATAL;
map_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, target & ~MAP_MASK);
if(argc == 3) {
virt_addr = map_base;
switch(access_type) {
case ‘w’:
for(int i=0;i<MAP_SIZE/4;i+=1) {
read_result = (volatile uint32_t)(virt_addr + sizeof(uint32_t)*i);
printf(“r (%d) 0x%08X\n”, i, read_result);
}
break;
default:
fprintf(stderr, “Illegal data type ‘%c’.\n”, access_type);
exit(2);
}
}
if(argc == 4) {
virt_addr = map_base;
writeval = strtoul(argv[3], 0, 0);
switch(access_type) {
case ‘w’:
for(int i=0;i<MAP_SIZE/4;i+=1) {
(volatile uint32_t)(virt_addr + sizeof(uint32_t)*i) = writeval + i;
read_result = (volatile uint32_t)(virt_addr + sizeof(uint32_t)*i);
printf(“w (%d) 0x%08X\n”, i, read_result);
}
break;
}
}
if(munmap(map_base, MAP_SIZE) == -1) FATAL;
close(fd);
return 0;
}

Well, that is because the mapping is happening only for one PAGE_SIZE in the driver.
Please apply the below patch to increase the BAR size from one PAGE_SIZE to 512MB

diff --git a/drivers/pci/endpoint/functions/pci-epf-nv-test.c b/drivers/pci/endpoint/functions/pci-epf-nv-test.c
index 8b2a1dcecab8..d3496a199842 100644
--- a/drivers/pci/endpoint/functions/pci-epf-nv-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-nv-test.c
@@ -16,7 +16,7 @@
 #include <linux/pci-epc.h>
 #include <linux/pci-epf.h>
  
-#define BAR0_SIZE SZ_64K
+#define BAR0_SIZE SZ_512M
  
 struct pci_epf_nv_test {
    struct pci_epf_header header;
@@ -30,14 +30,11 @@ static void pci_epf_nv_test_unbind(struct pci_epf *epf)
    struct pci_epf_nv_test *epfnv = epf_get_drvdata(epf);
    struct pci_epc *epc = epf->epc;
    struct device *cdev = epc->dev.parent;
-   struct iommu_domain *domain = iommu_get_domain_for_dev(cdev);
  
    pci_epc_stop(epc);
    pci_epc_clear_bar(epc, BAR_0);
-   vunmap(epfnv->bar0_ram_map);
-   iommu_unmap(domain, epfnv->bar0_iova, PAGE_SIZE);
-   iommu_dma_free_iova(cdev, epfnv->bar0_iova, BAR0_SIZE);
-   __free_pages(epfnv->bar0_ram_page, 1);
+   dma_free_coherent(cdev, BAR0_SIZE, epfnv->bar0_ram_map,
+             epfnv->bar0_iova);
 }
  
 static int pci_epf_nv_test_bind(struct pci_epf *epf)
@@ -47,7 +44,6 @@ static int pci_epf_nv_test_bind(struct pci_epf *epf)
    struct pci_epf_header *header = epf->header;
    struct device *fdev = &epf->dev;
    struct device *cdev = epc->dev.parent;
-   struct iommu_domain *domain = iommu_get_domain_for_dev(cdev);
    int ret;
  
    ret = pci_epc_write_header(epc, header);
@@ -56,60 +52,29 @@ static int pci_epf_nv_test_bind(struct pci_epf *epf)
        return ret;
    }
  
-   epfnv->bar0_ram_page = alloc_pages(GFP_KERNEL, 1);
-   if (!epfnv->bar0_ram_page) {
-       dev_err(fdev, "alloc_pages() failed\n");
-       ret = -ENOMEM;
-       goto fail;
-   }
-   dev_info(fdev, "BAR0 RAM phys: 0x%llx\n",
-        page_to_phys(epfnv->bar0_ram_page));
-
-   epfnv->bar0_iova = iommu_dma_alloc_iova(cdev, BAR0_SIZE,
-                       cdev->coherent_dma_mask);
-   if (!epfnv->bar0_iova) {
-       dev_err(fdev, "iommu_dma_alloc_iova() failed\n");
-       ret = -ENOMEM;
-       goto fail_free_pages;
-   }
-
-   dev_info(fdev, "BAR0 RAM IOVA: 0x%08llx\n", epfnv->bar0_iova);
-
-   ret = iommu_map(domain, epfnv->bar0_iova,
-           page_to_phys(epfnv->bar0_ram_page),
-           PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
-   if (ret) {
-       dev_err(fdev, "iommu_map(RAM) failed: %d\n", ret);
-       goto fail_free_iova;
-   }
-   epfnv->bar0_ram_map = vmap(&epfnv->bar0_ram_page, 1, VM_MAP,
-                  PAGE_KERNEL);
+   epfnv->bar0_ram_map = dma_alloc_coherent(cdev, BAR0_SIZE,
+                        &epfnv->bar0_iova, GFP_KERNEL);
    if (!epfnv->bar0_ram_map) {
-       dev_err(fdev, "vmap() failed\n");
+       dev_err(fdev, "dma_alloc_coherent() failed\n");
        ret = -ENOMEM;
-       goto fail_unmap_ram_iova;
+       return ret;
    }
-   dev_info(fdev, "BAR0 RAM virt: 0x%p\n", epfnv->bar0_ram_map);
+   dev_info(fdev, "BAR0 RAM IOVA: 0x%llx\n", epfnv->bar0_iova);
  
    ret = pci_epc_set_bar(epc, BAR_0, epfnv->bar0_iova, BAR0_SIZE,
                  PCI_BASE_ADDRESS_SPACE_MEMORY |
-                 PCI_BASE_ADDRESS_MEM_TYPE_32);
+                 PCI_BASE_ADDRESS_MEM_TYPE_32 |
+                 PCI_BASE_ADDRESS_MEM_PREFETCH);
    if (ret) {
        dev_err(fdev, "pci_epc_set_bar() failed: %d\n", ret);
-       goto fail_unmap_ram_virt;
+       goto fail_set_bar;
    }
  
    return 0;
  
-fail_unmap_ram_virt:
-   vunmap(epfnv->bar0_ram_map);
-fail_unmap_ram_iova:
-   iommu_unmap(domain, epfnv->bar0_iova, PAGE_SIZE);
-fail_free_iova:
-   iommu_dma_free_iova(cdev, epfnv->bar0_iova, BAR0_SIZE);
-fail_free_pages:
-   __free_pages(epfnv->bar0_ram_page, 1);
-fail:
+fail_set_bar:
+   dma_free_coherent(cdev, BAR0_SIZE, epfnv->bar0_ram_map,
+             epfnv->bar0_iova);
    return ret;
 }

hi, vidyas
Thankyou for your reply.
I tried your patch, and now PCIe info in rp is:

0005:00:00.0 PCI bridge: NVIDIA Corporation Device 1ad0 (rev a1) (prog-if 00 [Normal decode])
Flags: bus master, fast devsel, latency 0, IRQ 35
Bus: primary=00, secondary=01, subordinate=ff, sec-latency=0
Memory behind bridge: 40000000-6fffffff
Prefetchable memory behind bridge: 0000001c00000000-0000001c000fffff
Capabilities:
Kernel driver in use: pcieport

0005:01:00.0 RAM memory: NVIDIA Corporation Device 0001
Flags: fast devsel, IRQ 255
Memory at 1f40000000 (32-bit, prefetchable) [size=512M]
Memory at 1c00000000 (64-bit, prefetchable) [size=128K]
Memory at 1f60000000 (64-bit, non-prefetchable) [size=1M]
Capabilities:

  PCIe info in ep is:

[ 546.963005] pci_epf_nv_test pci_epf_nv_test.0: BAR0 RAM IOVA: 0xe0000000
[ 548.359644] tegra-pcie-dw 141a0000.pcie_ep: EP init done
[ 548.564603] tegra-pcie-dw 141a0000.pcie_ep: Link didn’t go to detect state
[ 548.565801] tegra-pcie-dw 141a0000.pcie_ep: EP deinit done
[ 548.672534] tegra-pcie-dw 141a0000.pcie_ep: EP init done

 Now I used command to r/w data to pcie memory in rp side, It seems right in two ram page, like that:

w (1021) 0x000013FE
w (1022) 0x000013FF
w (1023) 0x00001400
w (1024) 0x00001401
w (1025) 0x00001402
w (1026) 0x00001403
w (1027) 0x00001404
w (1028) 0x00001405

But there is new problem when I r/w data from/to pcie memory in ep side, all data is unexpected。like that:

r (1020) 0x82010002
r (1021) 0x00832000
r (1022) 0x21072308
r (1023) 0x10341400
r (1024) 0x319D2504
r (1025) 0x80791042
r (1026) 0x40C00000
r (1027) 0x61000108

 what’s wrong with it?

In my scene, I use pcie memory to share data between two NX, share ram size should bigger than 200KB,

PCIe vnet mode isn’t suitable for my scene, because we should r/w data in share memory by C pointer。

How are you accessing the PCIe memory on the EP side? If any command is being used, what is that command?
If the command is like devmem which works on the physical address, then, we can’t really use that here because what we have is an IOVA and not PA. The fact that we use dma_alloc_coherent() leaves us with only CPU-VA and IOVA and not PA.

yes, I used command like devmem, source code is ummemcpy.c as showwing in comment #2 , my commands is :
sudo ./ummemcpy 0xe0000000 w 0x20001

I knew 0xe0000000 isn’t the physical addr for PCIe memory, than how could I design for my scene as I mentioned in comment #5

Hi, vidyas, Is there any suggestion for me, I think it is a very common case. Looking forward to your reply.

Nope. This is not a common case.
A common case would be
a) accessing this memory through CPU using CPU-VA which we get from dma_alloc_coherent
b) accessing this memory through the device using IOVA which again we get from dma_alloc_coherent()
Accessing it through physical address is discouraged IMHO.