panvk: Protect access to the virtual address heap

Device memory can be allocated from different threads and thus requires
locking when allocating/freeing virtual addresses.

Fixes: 53fb1d99ca ("panvk: Transition to explicit VA assignment on v10+")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Mary Guillemard <mary.guillemard@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31282>
This commit is contained in:
Boris Brezillon 2024-09-20 16:18:26 +02:00
parent 5bdcc290e3
commit 22841babee
5 changed files with 54 additions and 6 deletions

View file

@ -44,6 +44,11 @@ finish_render_desc_ringbuf(struct panvk_queue *queue)
ASSERTED int ret =
pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
assert(!ret);
simple_mtx_lock(&dev->as.lock);
util_vma_heap_free(&dev->as.heap, ringbuf->addr.dev,
RENDER_DESC_RINGBUF_SIZE * 2);
simple_mtx_unlock(&dev->as.lock);
}
if (ringbuf->addr.host) {
@ -64,6 +69,7 @@ init_render_desc_ringbuf(struct panvk_queue *queue)
uint32_t flags = panvk_device_adjust_bo_flags(dev, PAN_KMOD_BO_FLAG_NO_MMAP);
struct panvk_desc_ringbuf *ringbuf = &queue->render_desc_ringbuf;
const size_t size = RENDER_DESC_RINGBUF_SIZE;
uint64_t dev_addr = 0;
VkResult result;
int ret;
@ -76,7 +82,7 @@ init_render_desc_ringbuf(struct panvk_queue *queue)
ringbuf->addr.host = pan_kmod_bo_mmap(
ringbuf->bo, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
if (ringbuf->addr.host == MAP_FAILED) {
result = vk_errorf(phys_dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
result = vk_errorf(phys_dev, VK_ERROR_OUT_OF_HOST_MEMORY,
"Failed to CPU map ringbuf BO");
goto err_finish_ringbuf;
}
@ -85,7 +91,15 @@ init_render_desc_ringbuf(struct panvk_queue *queue)
/* We choose the alignment to guarantee that we won't ever cross a 4G
* boundary when accessing the mapping. This way we can encode the wraparound
* using 32-bit operations. */
uint64_t dev_addr = util_vma_heap_alloc(&dev->as.heap, size * 2, size * 2);
simple_mtx_lock(&dev->as.lock);
dev_addr = util_vma_heap_alloc(&dev->as.heap, size * 2, size * 2);
simple_mtx_unlock(&dev->as.lock);
if (!dev_addr) {
result = vk_errorf(phys_dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Failed to allocate virtual address for ringbuf BO");
goto err_finish_ringbuf;
}
struct pan_kmod_vm_op vm_ops[] = {
{
@ -152,6 +166,12 @@ init_render_desc_ringbuf(struct panvk_queue *queue)
return VK_SUCCESS;
err_finish_ringbuf:
if (dev_addr && !ringbuf->addr.dev) {
simple_mtx_lock(&dev->as.lock);
util_vma_heap_free(&dev->as.heap, dev_addr, size * 2);
simple_mtx_unlock(&dev->as.lock);
}
finish_render_desc_ringbuf(queue);
return result;
}

View file

@ -31,6 +31,7 @@ struct panvk_device {
struct vk_device vk;
struct {
simple_mtx_t lock;
struct util_vma_heap heap;
} as;

View file

@ -90,9 +90,11 @@ panvk_AllocateMemory(VkDevice _device,
};
if (!(device->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
simple_mtx_lock(&device->as.lock);
op.va.start =
util_vma_heap_alloc(&device->as.heap, op.va.size,
op.va.size > 0x200000 ? 0x200000 : 0x1000);
simple_mtx_unlock(&device->as.lock);
if (!op.va.start) {
result = vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
goto err_put_bo;
@ -103,7 +105,7 @@ panvk_AllocateMemory(VkDevice _device,
pan_kmod_vm_bind(device->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
if (ret) {
result = vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
goto err_put_bo;
goto err_return_va;
}
mem->addr.dev = op.va.start;
@ -137,6 +139,13 @@ panvk_AllocateMemory(VkDevice _device,
return VK_SUCCESS;
err_return_va:
if (!(device->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
simple_mtx_lock(&device->as.lock);
util_vma_heap_free(&device->as.heap, op.va.start, op.va.size);
simple_mtx_unlock(&device->as.lock);
}
err_put_bo:
pan_kmod_bo_put(mem->bo);
@ -175,8 +184,11 @@ panvk_FreeMemory(VkDevice _device, VkDeviceMemory _mem,
pan_kmod_vm_bind(device->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
assert(!ret);
if (!(device->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA))
if (!(device->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
simple_mtx_lock(&device->as.lock);
util_vma_heap_free(&device->as.heap, op.va.start, op.va.size);
simple_mtx_unlock(&device->as.lock);
}
pan_kmod_bo_put(mem->bo);
vk_device_memory_destroy(&device->vk, pAllocator, &mem->vk);

View file

@ -53,15 +53,17 @@ panvk_priv_bo_create(struct panvk_device *dev, size_t size, uint32_t flags,
};
if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
simple_mtx_lock(&dev->as.lock);
op.va.start = util_vma_heap_alloc(
&dev->as.heap, op.va.size, op.va.size > 0x200000 ? 0x200000 : 0x1000);
simple_mtx_unlock(&dev->as.lock);
if (!op.va.start)
goto err_munmap_bo;
}
ret = pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
if (ret)
goto err_munmap_bo;
goto err_return_va;
priv_bo->addr.dev = op.va.start;
@ -75,6 +77,13 @@ panvk_priv_bo_create(struct panvk_device *dev, size_t size, uint32_t flags,
return priv_bo;
err_return_va:
if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
simple_mtx_lock(&dev->as.lock);
util_vma_heap_free(&dev->as.heap, op.va.start, op.va.size);
simple_mtx_unlock(&dev->as.lock);
}
err_munmap_bo:
if (priv_bo->addr.host) {
ret = os_munmap(priv_bo->addr.host, pan_kmod_bo_size(bo));
@ -110,8 +119,11 @@ panvk_priv_bo_destroy(struct panvk_priv_bo *priv_bo)
pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &op, 1);
assert(!ret);
if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA))
if (!(dev->kmod.vm->flags & PAN_KMOD_VM_FLAG_AUTO_VA)) {
simple_mtx_lock(&dev->as.lock);
util_vma_heap_free(&dev->as.heap, op.va.start, op.va.size);
simple_mtx_unlock(&dev->as.lock);
}
if (priv_bo->addr.host) {
ret = os_munmap(priv_bo->addr.host, pan_kmod_bo_size(priv_bo->bo));

View file

@ -282,6 +282,7 @@ panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,
panfrost_clamp_to_usable_va_range(device->kmod.dev, 1ull << 32);
uint32_t vm_flags = PAN_ARCH <= 7 ? PAN_KMOD_VM_FLAG_AUTO_VA : 0;
simple_mtx_init(&device->as.lock, mtx_plain);
util_vma_heap_init(&device->as.heap, user_va_start,
user_va_end - user_va_start);
@ -381,6 +382,7 @@ err_free_priv_bos:
panvk_device_cleanup_mempools(device);
pan_kmod_vm_destroy(device->kmod.vm);
util_vma_heap_finish(&device->as.heap);
simple_mtx_destroy(&device->as.lock);
err_destroy_kdev:
pan_kmod_dev_destroy(device->kmod.dev);
@ -415,6 +417,7 @@ panvk_per_arch(destroy_device)(struct panvk_device *device,
panvk_device_cleanup_mempools(device);
pan_kmod_vm_destroy(device->kmod.vm);
util_vma_heap_finish(&device->as.heap);
simple_mtx_destroy(&device->as.lock);
if (device->debug.decode_ctx)
pandecode_destroy_context(device->debug.decode_ctx);