panvk: add blackhole bo

We want to be able to survive accesses to, from Vulkan's
perspective, unmapped regions of sparse resources. For that we
allocate a single page -sized bo, which we'll use to implement
sparse unmapping by mapping the address range to this bo.

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35287>
This commit is contained in:
Caterina Shablia 2025-06-18 18:25:24 +00:00 committed by Marge Bot
parent 4b30df4462
commit 7f263dd2fb
4 changed files with 144 additions and 0 deletions

View file

@ -38,6 +38,7 @@ panvk_tracepoints = custom_target(
libpanvk_files = files( libpanvk_files = files(
'panvk_buffer.c', 'panvk_buffer.c',
'panvk_cmd_pool.c', 'panvk_cmd_pool.c',
'panvk_device.c',
'panvk_device_memory.c', 'panvk_device_memory.c',
'panvk_host_copy.c', 'panvk_host_copy.c',
'panvk_image.c', 'panvk_image.c',

View file

@ -0,0 +1,131 @@
#include "panvk_device.h"
#include "drm-uapi/panthor_drm.h"
static uint64_t
panvk_choose_blackhole_size(const struct pan_kmod_vm *vm, uint64_t max_size)
{
uint64_t blackhole_size = 0;
u_foreach_bit64(pgsize_bit, vm->pgsize_bitmap) {
uint64_t pgsize = (uint64_t)1 << pgsize_bit;
if (blackhole_size > 0 && pgsize > max_size)
break;
blackhole_size = pgsize;
}
return blackhole_size;
}
static void
panvk_blackhole_init(const void *_dev)
{
struct panvk_device *dev = *(void *const *)_dev;
uint64_t blackhole_size = panvk_choose_blackhole_size(dev->kmod.vm, 0x200000);
dev->sparse_mem.blackhole =
pan_kmod_bo_alloc(dev->kmod.dev, dev->kmod.vm, blackhole_size,
PAN_KMOD_BO_FLAG_NO_MMAP);
}
struct pan_kmod_bo *
panvk_get_blackhole(struct panvk_device *dev)
{
util_call_once_data(&dev->sparse_mem.blackhole_once, panvk_blackhole_init, &dev);
return dev->sparse_mem.blackhole;
}
struct panvk_vm_binder {
uint32_t drm_fd;
uint32_t vm_id;
struct drm_panthor_vm_bind_op *ops;
size_t op_count;
size_t op_cap;
};
static int
panvk_vm_binder_flush(struct panvk_vm_binder *b)
{
if (b->op_count == 0)
return 0;
struct drm_panthor_vm_bind req = {
.vm_id = b->vm_id,
.ops = DRM_PANTHOR_OBJ_ARRAY(b->op_count, b->ops),
};
int ret = pan_kmod_ioctl(b->drm_fd, DRM_IOCTL_PANTHOR_VM_BIND, &req);
b->op_count = 0;
return ret;
}
static int
panvk_vm_binder_bind(struct panvk_vm_binder *b, const struct drm_panthor_vm_bind_op *op)
{
if (b->op_count == b->op_cap) {
int ret = panvk_vm_binder_flush(b);
if (ret)
return ret;
}
assert(b->op_count < b->op_cap);
b->ops[b->op_count++] = *op;
return 0;
}
VkResult
panvk_map_to_blackhole(struct panvk_device *device, uint64_t address, uint64_t size)
{
struct pan_kmod_bo *blackhole = panvk_get_blackhole(device);
uint64_t blackhole_size = blackhole->size;
struct drm_panthor_vm_bind_op ops[16] = {};
struct panvk_vm_binder binder = {
.drm_fd = device->drm_fd,
.vm_id = device->kmod.vm->handle,
.ops = ops,
.op_cap = ARRAY_SIZE(ops),
};
uint64_t off = 0;
while (off < size) {
uint64_t va = address + off;
uint64_t bo_offset = va & (blackhole_size - 1);
uint64_t range = MIN2(blackhole_size - bo_offset, size - off);
struct drm_panthor_vm_bind_op op = {
.flags = DRM_PANTHOR_VM_BIND_OP_TYPE_MAP,
.bo_handle = blackhole->handle,
.bo_offset = bo_offset,
.va = va,
.size = range,
};
int ret = panvk_vm_binder_bind(&binder, &op);
if (ret)
goto err_unmap;
off += range;
}
assert(off == size);
int ret = panvk_vm_binder_flush(&binder);
if (ret)
goto err_unmap;
return VK_SUCCESS;
err_unmap:
{
struct pan_kmod_vm_op unmap = {
.type = PAN_KMOD_VM_OP_TYPE_UNMAP,
.va = {
.start = address,
.size = size,
},
};
ASSERTED int ret =
pan_kmod_vm_bind(device->kmod.vm, PAN_KMOD_VM_OP_TYPE_UNMAP, &unmap, 1);
assert(!ret);
}
return panvk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}

View file

@ -24,6 +24,7 @@
#include "util/perf/u_trace.h" #include "util/perf/u_trace.h"
#include "util/simple_mtx.h" #include "util/simple_mtx.h"
#include "util/u_call_once.h"
#include "util/u_printf.h" #include "util/u_printf.h"
#include "util/vma.h" #include "util/vma.h"
@ -83,6 +84,11 @@ struct panvk_device {
struct panvk_pool exec; struct panvk_pool exec;
} mempools; } mempools;
struct {
util_once_flag blackhole_once;
struct pan_kmod_bo *blackhole;
} sparse_mem;
/* For each subqueue, maximum size of the register dump region needed by /* For each subqueue, maximum size of the register dump region needed by
* exception handlers or functions */ * exception handlers or functions */
uint32_t *dump_region_size; uint32_t *dump_region_size;
@ -173,6 +179,11 @@ panvk_as_free(struct panvk_device *device, uint64_t address, uint64_t size)
simple_mtx_unlock(&device->as.lock); simple_mtx_unlock(&device->as.lock);
} }
VkResult panvk_map_to_blackhole(struct panvk_device *device,
uint64_t address, uint64_t size);
struct pan_kmod_bo *panvk_get_blackhole(struct panvk_device *device);
#if PAN_ARCH #if PAN_ARCH
VkResult VkResult
panvk_per_arch(create_device)(struct panvk_physical_device *physical_device, panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,

View file

@ -601,6 +601,7 @@ panvk_per_arch(destroy_device)(struct panvk_device *device,
panvk_per_arch(device_draw_context_cleanup)(device); panvk_per_arch(device_draw_context_cleanup)(device);
#endif #endif
panvk_meta_cleanup(device); panvk_meta_cleanup(device);
pan_kmod_bo_put(device->sparse_mem.blackhole);
u_printf_destroy(&device->printf.ctx); u_printf_destroy(&device->printf.ctx);
panvk_priv_bo_unref(device->printf.bo); panvk_priv_bo_unref(device->printf.bo);
panvk_priv_bo_unref(device->tiler_oom.handlers_bo); panvk_priv_bo_unref(device->tiler_oom.handlers_bo);