tu: Fix imported memory not being affected by DEVICE_ADDRESS_CAPTURE_REPLAY

It's valid to import memory with DEVICE_ADDRESS_CAPTURE_REPLAY_BIT
and we should allocate iova from the end of VMA heap, same as with ordinary
memory allocations. This is important for replaying such memory, when it
is being replayed without being imported.

Fixes replay errors with RenderDoc.

Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/40378>
This commit is contained in:
Danylo Piliaiev 2026-03-12 19:09:24 +01:00 committed by Marge Bot
parent 48be91d14b
commit b86ecec80b
6 changed files with 34 additions and 22 deletions

View file

@ -3571,6 +3571,16 @@ tu_AllocateMemory(VkDevice _device,
mem->size = pAllocateInfo->allocationSize;
mem->refcnt = 1;
BITMASK_ENUM(tu_bo_alloc_flags) alloc_flags = TU_BO_ALLOC_NO_FLAGS;
const VkMemoryAllocateFlagsInfo *flags_info = vk_find_struct_const(
pAllocateInfo->pNext, MEMORY_ALLOCATE_FLAGS_INFO);
if (flags_info &&
(flags_info->flags &
VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) {
alloc_flags |= TU_BO_ALLOC_REPLAYABLE;
}
const VkImportMemoryFdInfoKHR *fd_info =
vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
@ -3585,8 +3595,9 @@ tu_AllocateMemory(VkDevice _device,
* reference counting. We need to maintain a per-instance handle-to-bo
* table and add reference count to tu_bo.
*/
result = tu_bo_init_dmabuf(device, &mem->bo,
pAllocateInfo->allocationSize, fd_info->fd);
result =
tu_bo_init_dmabuf(device, &mem->bo, pAllocateInfo->allocationSize,
alloc_flags, fd_info->fd);
if (result == VK_SUCCESS) {
/* take ownership and close the fd */
close(fd_info->fd);
@ -3596,13 +3607,13 @@ tu_AllocateMemory(VkDevice _device,
const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mem->vk.ahardware_buffer);
assert(handle->numFds > 0);
size_t size = lseek(handle->data[0], 0, SEEK_END);
result = tu_bo_init_dmabuf(device, &mem->bo, size, handle->data[0]);
result = tu_bo_init_dmabuf(device, &mem->bo, size, alloc_flags,
handle->data[0]);
#else
result = VK_ERROR_FEATURE_NOT_PRESENT;
#endif
} else {
uint64_t client_address = 0;
BITMASK_ENUM(tu_bo_alloc_flags) alloc_flags = TU_BO_ALLOC_NO_FLAGS;
const VkMemoryOpaqueCaptureAddressAllocateInfo *replay_info =
vk_find_struct_const(pAllocateInfo->pNext,
@ -3612,14 +3623,6 @@ tu_AllocateMemory(VkDevice _device,
alloc_flags |= TU_BO_ALLOC_REPLAYABLE;
}
const VkMemoryAllocateFlagsInfo *flags_info = vk_find_struct_const(
pAllocateInfo->pNext, MEMORY_ALLOCATE_FLAGS_INFO);
if (flags_info &&
(flags_info->flags &
VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) {
alloc_flags |= TU_BO_ALLOC_REPLAYABLE;
}
const VkExportMemoryAllocateInfo *export_info =
vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
if (export_info && (export_info->handleTypes &

View file

@ -71,10 +71,12 @@ VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **bo,
uint64_t size,
enum tu_bo_alloc_flags flags,
int fd)
{
assert(!(flags & ~TU_BO_ALLOC_REPLAYABLE));
size = align64(size, os_page_size);
VkResult result = dev->instance->knl->bo_init_dmabuf(dev, bo, size, fd);
VkResult result = dev->instance->knl->bo_init_dmabuf(dev, bo, size, flags, fd);
if (result != VK_SUCCESS)
return result;

View file

@ -135,7 +135,7 @@ struct tu_knl {
struct tu_sparse_vma *lazy_vma,
const char *name);
VkResult (*bo_init_dmabuf)(struct tu_device *dev, struct tu_bo **out_bo,
uint64_t size, int prime_fd);
uint64_t size, enum tu_bo_alloc_flags flags, int prime_fd);
int (*bo_export_dmabuf)(struct tu_device *dev, struct tu_bo *bo);
VkResult (*bo_alloc_lazy)(struct tu_device *dev, struct tu_bo *bo);
VkResult (*bo_map)(struct tu_device *dev, struct tu_bo *bo, void *placed_addr);
@ -211,6 +211,7 @@ VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **bo,
uint64_t size,
enum tu_bo_alloc_flags flags,
int fd);
int

View file

@ -976,8 +976,11 @@ static VkResult
msm_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
enum tu_bo_alloc_flags flags,
int prime_fd)
{
flags = (enum tu_bo_alloc_flags)(flags | TU_BO_ALLOC_DMABUF);
/* lseek() to get the real size */
off_t real_size = lseek(prime_fd, 0, SEEK_END);
lseek(prime_fd, 0, SEEK_SET);
@ -1016,7 +1019,7 @@ msm_bo_init_dmabuf(struct tu_device *dev,
}
VkResult result =
tu_allocate_iova(dev, gem_handle, size, 0, TU_BO_ALLOC_DMABUF, &iova);
tu_allocate_iova(dev, gem_handle, size, 0, flags, &iova);
if (result != VK_SUCCESS) {
tu_gem_close(dev, gem_handle);
@ -1024,7 +1027,7 @@ msm_bo_init_dmabuf(struct tu_device *dev,
}
result =
tu_bo_init(dev, NULL, bo, gem_handle, size, iova, TU_BO_ALLOC_DMABUF, "dmabuf");
tu_bo_init(dev, NULL, bo, gem_handle, size, iova, flags, "dmabuf");
if (result != VK_SUCCESS) {
tu_free_iova(dev, iova, size);

View file

@ -783,12 +783,14 @@ static VkResult
virtio_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
enum tu_bo_alloc_flags flags,
int prime_fd)
{
MESA_TRACE_FUNC();
struct vdrm_device *vdrm = dev->vdev->vdrm;
VkResult result;
struct tu_bo* bo = NULL;
flags = (enum tu_bo_alloc_flags)(flags | TU_BO_ALLOC_DMABUF);
/* lseek() to get the real size */
off_t real_size = lseek(prime_fd, 0, SEEK_END);
@ -836,8 +838,8 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
bo->res_id = res_id;
mtx_lock(&dev->vma_mutex);
result = virtio_allocate_userspace_iova_locked(dev, handle, size, 0,
TU_BO_ALLOC_DMABUF, &iova);
result = virtio_allocate_userspace_iova_locked(dev, handle, size, 0, flags,
&iova);
mtx_unlock(&dev->vma_mutex);
if (result != VK_SUCCESS) {
vdrm_bo_close(dev->vdev->vdrm, handle);
@ -845,7 +847,7 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
}
result =
tu_bo_init(dev, NULL, bo, handle, size, iova, TU_BO_ALLOC_NO_FLAGS, "dmabuf");
tu_bo_init(dev, NULL, bo, handle, size, iova, flags, "dmabuf");
if (result != VK_SUCCESS) {
util_vma_heap_free(&dev->vma, iova, size);
memset(bo, 0, sizeof(*bo));

View file

@ -97,7 +97,7 @@ bo_init_new_dmaheap(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
"DMA_HEAP_IOCTL_ALLOC failed (%s)", strerror(errno));
}
return tu_bo_init_dmabuf(dev, out_bo, -1, alloc.fd);
return tu_bo_init_dmabuf(dev, out_bo, -1, TU_BO_ALLOC_NO_FLAGS, alloc.fd);
}
static VkResult
@ -118,7 +118,7 @@ bo_init_new_ion(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
"ION_IOC_NEW_ALLOC failed (%s)", strerror(errno));
}
return tu_bo_init_dmabuf(dev, out_bo, -1, alloc.fd);
return tu_bo_init_dmabuf(dev, out_bo, -1, TU_BO_ALLOC_NO_FLAGS, alloc.fd);
}
static VkResult
@ -160,7 +160,7 @@ bo_init_new_ion_legacy(struct tu_device *dev, struct tu_bo **out_bo, uint64_t si
"ION_IOC_FREE failed (%s)", strerror(errno));
}
return tu_bo_init_dmabuf(dev, out_bo, -1, share.fd);
return tu_bo_init_dmabuf(dev, out_bo, -1, TU_BO_ALLOC_NO_FLAGS, share.fd);
}
static VkResult
@ -328,6 +328,7 @@ static VkResult
kgsl_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
enum tu_bo_alloc_flags flags,
int fd)
{
struct kgsl_gpuobj_import_dma_buf import_dmabuf = {