// With the Code snipped below you can reproduce the memory leak we experienced. In just plain words: // // (1) Create a tiled pool with 64kb (1page). // (2) Resize it by a second page // (3) This is important. When mapping the page into a tiled resource buffer which is located // outside the initial pool size the Gpu Mem explodes in out case on a TITAN with ~383mb and on a RTX 2080 Ti with ~704mb // // bool forceLeak = true; // // -> Sysinternals ProcessExplorer: dedicated GPU MEM: 4.8 MB ( GeForce RTX 2080Ti ) // -> Sysinternals ProcessExplorer: dedicated GPU MEM: 3.6 MB ( GeForce GTX Titan ) // // TiledResourceLeakRepoCase( device, context, forceLeak ); // //-> Sysinternals ProcessExplorer: dedicated GPU MEM: 708.8 MB ( GeForce RTX 2080Ti ) //-> Sysinternals ProcessExplorer: dedicated GPU MEM: 387.6 MB ( GeForce GTX Titan ) void TiledResourceLeakRepoCase( ID3D11Device2* device, ID3D11DeviceContext2* context, bool bDoLeak ) { static Microsoft::WRL::ComPtr my_tilePool; static Microsoft::WRL::ComPtr my_tiledResource; D3D11_BUFFER_DESC tilePoolDesc; ZeroMemory(&tilePoolDesc, sizeof(tilePoolDesc)); tilePoolDesc.ByteWidth = ( 1u << 16u ); tilePoolDesc.Usage = D3D11_USAGE_DEFAULT; tilePoolDesc.MiscFlags = D3D11_RESOURCE_MISC_TILE_POOL; device->CreateBuffer(&tilePoolDesc, nullptr, &my_tilePool); context->ResizeTilePool( my_tilePool.Get(), (1u << 16u)*2u ); context->Flush(); D3D11_BUFFER_DESC tiledResourceDesc; ZeroMemory(&tiledResourceDesc, sizeof(tiledResourceDesc)); tiledResourceDesc.ByteWidth = (1u << D3D11_REQ_BUFFER_RESOURCE_TEXEL_COUNT_2_TO_EXP ); tiledResourceDesc.Usage = D3D11_USAGE_DEFAULT; tiledResourceDesc.MiscFlags = D3D11_RESOURCE_MISC_TILED; tiledResourceDesc.StructureByteStride = 16u; device->CreateBuffer(&tiledResourceDesc, nullptr, &my_tiledResource); D3D11_TILED_RESOURCE_COORDINATE kStartCoordinate[1]; ZeroMemory( kStartCoordinate, sizeof( kStartCoordinate ) ); kStartCoordinate[0u].X = 0u; kStartCoordinate[0u].Y = 0u; kStartCoordinate[0u].Z = 0u; kStartCoordinate[0u].Subresource = 0u; D3D11_TILE_REGION_SIZE kRegionSize[1]; ZeroMemory( kRegionSize, sizeof( kRegionSize ) ); kRegionSize[0].bUseBox = FALSE; kRegionSize[0].Width = 0u; kRegionSize[0].Height = 0u; kRegionSize[0].Depth = 0u; kRegionSize[0].NumTiles = 2048u; // map new buffer into tile pool const uint32_t uiRangeFlags[1] = { D3D11_TILE_RANGE_REUSE_SINGLE_TILE }; const uint32_t uiRangeStartOffset[1] = { bDoLeak?1u:0u }; // When choosing a range of 1u we get following error // D3D11 ERROR: ID3D11DeviceContext::UpdateTileMappings: The total number of tiles specified in Tile Region(s), 2048, does not equal the number of tiles in the Tile Range(s), 1. [ EXECUTION ERROR #3146124: UPDATETILEMAPPINGS_INVALID_PARAMETER] // ... //const uint32_t uiRangeTileCount[1] = { 1u }; // ... so we guess to assign 2048u. Is That right ? const uint32_t uiRangeTileCount[1] = { 2048u }; // -> Sysinternals ProcessExplorer: dedicated GPU MEM: 4.8 MB ( GeForce RTX 2080Ti ) // -> Sysinternals ProcessExplorer: dedicated GPU MEM: 3.6 MB ( GeForce GTX Titan ) HRESULT hr = context->UpdateTileMappings( my_tiledResource.Get(), 1u, &kStartCoordinate[0], &kRegionSize[0], my_tilePool.Get(), 1u, &uiRangeFlags[0], &uiRangeStartOffset[0], &uiRangeTileCount[0], 0 ); //-> Sysinternals ProcessExplorer: dedicated GPU MEM: 708.8 MB ( GeForce RTX 2080Ti ) //-> Sysinternals ProcessExplorer: dedicated GPU MEM: 387.6 MB ( GeForce GTX Titan ) //====================== -> This memory jump from 4.8 MB to 708.8 MB is wrong !!!! ================================= }