venus: add renderer support for placed mapping

Prepare for VK_EXT_map_memory_placed support.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38706>
This commit is contained in:
Yiwei Zhang 2025-11-28 00:04:52 -08:00 committed by Marge Bot
parent 38090d5be0
commit 8adfdc3304
4 changed files with 27 additions and 14 deletions

View file

@ -457,7 +457,7 @@ vn_MapMemory2(VkDevice device,
return vn_error(dev->instance, result);
}
ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo);
ptr = vn_renderer_bo_map(dev->renderer, mem->base_bo, NULL);
if (!ptr) {
/* vn_renderer_bo_map implies a roundtrip on success, but not here. */
if (need_bo) {

View file

@ -160,7 +160,9 @@ struct vn_renderer_bo_ops {
struct vn_renderer_bo *bo);
/* map is not thread-safe */
void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
void *(*map)(struct vn_renderer *renderer,
struct vn_renderer_bo *bo,
void *placed_addr);
void (*flush)(struct vn_renderer *renderer,
struct vn_renderer_bo *bo,
@ -368,9 +370,11 @@ vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
}
static inline void *
vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
vn_renderer_bo_map(struct vn_renderer *renderer,
struct vn_renderer_bo *bo,
void *placed_addr)
{
return renderer->bo_ops.map(renderer, bo);
return renderer->bo_ops.map(renderer, bo, placed_addr);
}
static inline void

View file

@ -727,7 +727,10 @@ virtgpu_ioctl_prime_fd_to_handle(struct virtgpu *gpu, int fd)
}
static void *
virtgpu_ioctl_map(struct virtgpu *gpu, uint32_t gem_handle, size_t size)
virtgpu_ioctl_map(struct virtgpu *gpu,
uint32_t gem_handle,
size_t size,
void *placed_addr)
{
struct drm_virtgpu_map args = {
.handle = gem_handle,
@ -739,8 +742,9 @@ virtgpu_ioctl_map(struct virtgpu *gpu, uint32_t gem_handle, size_t size)
return NULL;
}
void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, gpu->fd,
args.offset);
void *ptr =
mmap(placed_addr, size, PROT_READ | PROT_WRITE,
MAP_SHARED | (placed_addr ? MAP_FIXED : 0), gpu->fd, args.offset);
if (ptr == MAP_FAILED) {
vn_log(
gpu->instance,
@ -1101,7 +1105,9 @@ virtgpu_bo_flush(struct vn_renderer *renderer,
}
static void *
virtgpu_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
virtgpu_bo_map(struct vn_renderer *renderer,
struct vn_renderer_bo *_bo,
void *placed_addr)
{
struct virtgpu *gpu = (struct virtgpu *)renderer;
struct virtgpu_bo *bo = (struct virtgpu_bo *)_bo;
@ -1109,8 +1115,8 @@ virtgpu_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
/* not thread-safe but is fine */
if (!bo->base.mmap_ptr && mappable) {
bo->base.mmap_ptr =
virtgpu_ioctl_map(gpu, bo->gem_handle, bo->base.mmap_size);
bo->base.mmap_ptr = virtgpu_ioctl_map(gpu, bo->gem_handle,
bo->base.mmap_size, placed_addr);
}
return bo->base.mmap_ptr;
@ -1393,7 +1399,7 @@ virtgpu_shmem_create(struct vn_renderer *renderer, size_t size)
if (!gem_handle)
return NULL;
void *ptr = virtgpu_ioctl_map(gpu, gem_handle, size);
void *ptr = virtgpu_ioctl_map(gpu, gem_handle, size, NULL);
if (!ptr) {
virtgpu_ioctl_gem_close(gpu, gem_handle);
return NULL;

View file

@ -666,7 +666,9 @@ vtest_bo_flush(struct vn_renderer *renderer,
}
static void *
vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
vtest_bo_map(struct vn_renderer *renderer,
struct vn_renderer_bo *_bo,
void *placed_addr)
{
struct vtest *vtest = (struct vtest *)renderer;
struct vtest_bo *bo = (struct vtest_bo *)_bo;
@ -680,8 +682,9 @@ vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
* check for VCMD_PARAM_HOST_COHERENT_DMABUF_BLOB, we know vtest can
* lie.
*/
void *ptr = mmap(NULL, bo->base.mmap_size, PROT_READ | PROT_WRITE,
MAP_SHARED, bo->res_fd, 0);
void *ptr =
mmap(placed_addr, bo->base.mmap_size, PROT_READ | PROT_WRITE,
MAP_SHARED | (placed_addr ? MAP_FIXED : 0), bo->res_fd, 0);
if (ptr == MAP_FAILED) {
vn_log(vtest->instance, "failed to mmap %d of size %zu rw: %s",
bo->res_fd, bo->base.mmap_size, strerror(errno));