panvk: Make panvk_device.{c,h} panvk_queue agnostic

We will soon have a panvk_bind_queue object to expose bind queues,
so let's modify the code to make this reasonably clean by letting
the CSF/JM backing keep their queue object opaque.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-By: Caterina Shablia <caterina.shablia@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35692>
This commit is contained in:
Boris Brezillon 2025-05-23 19:39:49 +00:00 committed by Marge Bot
parent 4d972bce5b
commit 33060373c0
9 changed files with 121 additions and 102 deletions

View file

@ -85,12 +85,4 @@ struct panvk_queue {
VK_DEFINE_HANDLE_CASTS(panvk_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
void panvk_per_arch(queue_finish)(struct panvk_queue *queue);
VkResult panvk_per_arch(queue_init)(struct panvk_device *device,
struct panvk_queue *queue, int idx,
const VkDeviceQueueCreateInfo *create_info);
VkResult panvk_per_arch(queue_check_status)(struct panvk_queue *queue);
#endif

View file

@ -1,29 +0,0 @@
/*
* Copyright © 2024 Collabora Ltd.
* SPDX-License-Identifier: MIT
*/
#include "panvk_device.h"
#include "panvk_queue.h"
VkResult
panvk_per_arch(device_check_status)(struct vk_device *vk_dev)
{
struct panvk_device *dev = to_panvk_device(vk_dev);
VkResult result = panvk_common_check_status(dev);
for (uint32_t qfi = 0; qfi < PANVK_MAX_QUEUE_FAMILIES; qfi++) {
for (uint32_t q = 0; q < dev->queue_count[qfi]; q++) {
struct panvk_queue *queue = &dev->queues[qfi][q];
if (panvk_per_arch(queue_check_status)(queue) != VK_SUCCESS)
result = VK_ERROR_DEVICE_LOST;
}
}
if (pan_kmod_vm_query_state(dev->kmod.vm) != PAN_KMOD_VM_USABLE) {
vk_device_set_lost(&dev->vk, "vm state: not usable");
result = VK_ERROR_DEVICE_LOST;
}
return result;
}

View file

@ -1267,12 +1267,20 @@ get_panthor_group_priority(const VkDeviceQueueCreateInfo *create_info)
}
VkResult
panvk_per_arch(queue_init)(struct panvk_device *dev, struct panvk_queue *queue,
int idx, const VkDeviceQueueCreateInfo *create_info)
panvk_per_arch(queue_create)(struct panvk_device *dev, uint32_t family_idx,
uint32_t queue_idx,
const VkDeviceQueueCreateInfo *create_info,
struct vk_queue **out_queue)
{
VkResult result = vk_queue_init(&queue->vk, &dev->vk, create_info, idx);
struct panvk_queue *queue = vk_zalloc(&dev->vk.alloc, sizeof(*queue), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!queue)
return panvk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result =
vk_queue_init(&queue->vk, &dev->vk, create_info, queue_idx);
if (result != VK_SUCCESS)
return result;
goto err_free_queue;
int ret = drmSyncobjCreate(dev->drm_fd, 0, &queue->syncobj_handle);
if (ret) {
@ -1294,6 +1302,7 @@ panvk_per_arch(queue_init)(struct panvk_device *dev, struct panvk_queue *queue,
goto err_destroy_group;
queue->vk.driver_submit = panvk_queue_submit;
*out_queue = &queue->vk;
return VK_SUCCESS;
err_destroy_group:
@ -1307,12 +1316,16 @@ err_destroy_syncobj:
err_finish_queue:
vk_queue_finish(&queue->vk);
err_free_queue:
vk_free(&dev->vk.alloc, queue);
return result;
}
void
panvk_per_arch(queue_finish)(struct panvk_queue *queue)
panvk_per_arch(queue_destroy)(struct vk_queue *vk_queue)
{
struct panvk_queue *queue = container_of(vk_queue, struct panvk_queue, vk);
struct panvk_device *dev = to_panvk_device(queue->vk.base.device);
cleanup_queue(queue);
@ -1320,11 +1333,13 @@ panvk_per_arch(queue_finish)(struct panvk_queue *queue)
cleanup_tiler(queue);
drmSyncobjDestroy(dev->drm_fd, queue->syncobj_handle);
vk_queue_finish(&queue->vk);
vk_free(&dev->vk.alloc, queue);
}
VkResult
panvk_per_arch(queue_check_status)(struct panvk_queue *queue)
panvk_per_arch(queue_check_status)(struct vk_queue *vk_queue)
{
struct panvk_queue *queue = container_of(vk_queue, struct panvk_queue, vk);
struct panvk_device *dev = to_panvk_device(queue->vk.base.device);
struct drm_panthor_group_get_state state = {
.group_handle = queue->group_handle,

View file

@ -23,17 +23,4 @@ struct panvk_queue {
VK_DEFINE_HANDLE_CASTS(panvk_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
static inline void
panvk_per_arch(queue_finish)(struct panvk_queue *queue)
{
struct panvk_device *dev = to_panvk_device(queue->vk.base.device);
vk_queue_finish(&queue->vk);
drmSyncobjDestroy(dev->drm_fd, queue->sync);
}
VkResult panvk_per_arch(queue_init)(struct panvk_device *device,
struct panvk_queue *queue, int idx,
const VkDeviceQueueCreateInfo *create_info);
#endif

View file

@ -1,13 +0,0 @@
/*
* Copyright © 2024 Collabora Ltd.
* SPDX-License-Identifier: MIT
*/
#include "panvk_device.h"
VkResult
panvk_per_arch(device_check_status)(struct vk_device *vk_dev)
{
struct panvk_device *dev = to_panvk_device(vk_dev);
return panvk_common_check_status(dev);
}

View file

@ -307,9 +307,10 @@ panvk_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit)
}
VkResult
panvk_per_arch(queue_init)(struct panvk_device *device,
struct panvk_queue *queue, int idx,
const VkDeviceQueueCreateInfo *create_info)
panvk_per_arch(queue_create)(struct panvk_device *device, uint32_t family_idx,
uint32_t queue_idx,
const VkDeviceQueueCreateInfo *create_info,
struct vk_queue **out_queue)
{
ASSERTED const VkDeviceQueueGlobalPriorityCreateInfoKHR *priority_info =
vk_find_struct_const(create_info->pNext,
@ -321,18 +322,48 @@ panvk_per_arch(queue_init)(struct panvk_device *device,
/* XXX: Panfrost kernel module doesn't support priorities so far */
assert(priority == VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR);
VkResult result = vk_queue_init(&queue->vk, &device->vk, create_info, idx);
struct panvk_queue *queue = vk_zalloc(&device->vk.alloc, sizeof(*queue), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!queue)
return panvk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result =
vk_queue_init(&queue->vk, &device->vk, create_info, queue_idx);
if (result != VK_SUCCESS)
return result;
goto err_free_queue;
int ret = drmSyncobjCreate(device->drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED,
&queue->sync);
if (ret) {
vk_queue_finish(&queue->vk);
return panvk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
result = panvk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_finish_queue;
}
queue->vk.driver_submit = panvk_queue_submit;
*out_queue = &queue->vk;
return VK_SUCCESS;
err_finish_queue:
vk_queue_finish(&queue->vk);
err_free_queue:
vk_free(&device->vk.alloc, queue);
return result;
}
void panvk_per_arch(queue_destroy)(struct vk_queue *vk_queue)
{
struct panvk_queue *queue = container_of(vk_queue, struct panvk_queue, vk);
struct panvk_device *dev = to_panvk_device(vk_queue->base.device);
vk_queue_finish(&queue->vk);
drmSyncobjDestroy(dev->drm_fd, queue->sync);
vk_free(&dev->vk.alloc, queue);
}
VkResult
panvk_per_arch(queue_check_status)(struct vk_queue *vk_queue)
{
return VK_SUCCESS;
}

View file

@ -76,7 +76,6 @@ jm_files = [
'jm/panvk_vX_cmd_event.c',
'jm/panvk_vX_cmd_query.c',
'jm/panvk_vX_cmd_precomp.c',
'jm/panvk_vX_device.c',
'jm/panvk_vX_event.c',
'jm/panvk_vX_queue.c',
]
@ -90,7 +89,6 @@ csf_files = [
'csf/panvk_vX_cmd_event.c',
'csf/panvk_vX_cmd_query.c',
'csf/panvk_vX_cmd_precomp.c',
'csf/panvk_vX_device.c',
'csf/panvk_vX_event.c',
'csf/panvk_vX_exception_handler.c',
'csf/panvk_vX_queue.c',

View file

@ -32,6 +32,11 @@
struct panvk_precomp_cache;
struct panvk_device_draw_context;
struct panvk_device_queue_family {
struct vk_queue **queues;
int queue_count;
};
struct panvk_device {
struct vk_device vk;
@ -68,8 +73,7 @@ struct panvk_device {
struct vk_device_dispatch_table cmd_dispatch;
struct panvk_queue *queues[PANVK_MAX_QUEUE_FAMILIES];
int queue_count[PANVK_MAX_QUEUE_FAMILIES];
struct panvk_device_queue_family queue_families[PANVK_MAX_QUEUE_FAMILIES];
struct panvk_precomp_cache *precomp_cache;
@ -136,14 +140,15 @@ panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,
void panvk_per_arch(destroy_device)(struct panvk_device *device,
const VkAllocationCallbacks *pAllocator);
static inline VkResult
panvk_common_check_status(struct panvk_device *dev)
{
return vk_check_printf_status(&dev->vk, &dev->printf.ctx);
}
VkResult panvk_per_arch(device_check_status)(struct vk_device *vk_dev);
VkResult panvk_per_arch(queue_create)(struct panvk_device *device, uint32_t family_idx,
uint32_t queue_idx,
const VkDeviceQueueCreateInfo *create_info,
struct vk_queue **out_queue);
void panvk_per_arch(queue_destroy)(struct vk_queue *queue);
VkResult panvk_per_arch(queue_check_status)(struct vk_queue *queue);
#if PAN_ARCH >= 10
VkResult panvk_per_arch(init_tiler_oom)(struct panvk_device *device);
#endif

View file

@ -222,6 +222,31 @@ check_global_priority(const struct panvk_physical_device *phys_dev,
return VK_ERROR_NOT_PERMITTED_KHR;
}
static VkResult
panvk_device_check_status(struct vk_device *vk_dev)
{
struct panvk_device *dev = to_panvk_device(vk_dev);
VkResult result = vk_check_printf_status(&dev->vk, &dev->printf.ctx);
for (uint32_t qfi = 0; qfi < PANVK_MAX_QUEUE_FAMILIES; qfi++) {
struct panvk_device_queue_family *qf = &dev->queue_families[qfi];
for (uint32_t q = 0; q < qf->queue_count; q++) {
struct vk_queue *queue = qf->queues[q];
if (panvk_per_arch(queue_check_status)(queue) != VK_SUCCESS)
result = VK_ERROR_DEVICE_LOST;
}
}
if (pan_kmod_vm_query_state(dev->kmod.vm) != PAN_KMOD_VM_USABLE) {
vk_device_set_lost(&dev->vk, "vm state: not usable");
result = VK_ERROR_DEVICE_LOST;
}
return result;
}
VkResult
panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,
const VkDeviceCreateInfo *pCreateInfo,
@ -279,7 +304,7 @@ panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,
device->vk.command_dispatch_table = &device->cmd_dispatch;
device->vk.command_buffer_ops = &panvk_per_arch(cmd_buffer_ops);
device->vk.shader_ops = &panvk_per_arch(device_shader_ops);
device->vk.check_status = panvk_per_arch(device_check_status);
device->vk.check_status = panvk_device_check_status;
device->kmod.allocator = (struct pan_kmod_allocator){
.zalloc = panvk_kmod_zalloc,
@ -415,22 +440,24 @@ panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,
goto err_finish_queues;
uint32_t qfi = queue_create->queueFamilyIndex;
device->queues[qfi] =
struct panvk_device_queue_family *qf = &device->queue_families[qfi];
qf->queues =
vk_zalloc(&device->vk.alloc,
queue_create->queueCount * sizeof(struct panvk_queue), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device->queues[qfi]) {
queue_create->queueCount * sizeof(qf->queues[0]), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!qf->queues) {
result = panvk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto err_finish_queues;
}
for (unsigned q = 0; q < queue_create->queueCount; q++) {
result = panvk_per_arch(queue_init)(device, &device->queues[qfi][q], q,
queue_create);
result = panvk_per_arch(queue_create)(device, qfi, q, queue_create,
&qf->queues[q]);
if (result != VK_SUCCESS)
goto err_finish_queues;
device->queue_count[qfi]++;
qf->queue_count++;
}
}
@ -446,10 +473,13 @@ panvk_per_arch(create_device)(struct panvk_physical_device *physical_device,
err_finish_queues:
for (unsigned i = 0; i < PANVK_MAX_QUEUE_FAMILIES; i++) {
for (unsigned q = 0; q < device->queue_count[i]; q++)
panvk_per_arch(queue_finish)(&device->queues[i][q]);
if (device->queues[i])
vk_free(&device->vk.alloc, device->queues[i]);
struct panvk_device_queue_family *qf = &device->queue_families[i];
for (unsigned q = 0; q < qf->queue_count; q++)
panvk_per_arch(queue_destroy)(qf->queues[q]);
if (qf->queues)
vk_free(&device->vk.alloc, qf->queues);
}
panvk_meta_cleanup(device);
@ -494,10 +524,13 @@ panvk_per_arch(destroy_device)(struct panvk_device *device,
panvk_per_arch(utrace_context_fini)(device);
for (unsigned i = 0; i < PANVK_MAX_QUEUE_FAMILIES; i++) {
for (unsigned q = 0; q < device->queue_count[i]; q++)
panvk_per_arch(queue_finish)(&device->queues[i][q]);
if (device->queue_count[i])
vk_free(&device->vk.alloc, device->queues[i]);
struct panvk_device_queue_family *qf = &device->queue_families[i];
for (unsigned q = 0; q < qf->queue_count; q++)
panvk_per_arch(queue_destroy)(qf->queues[q]);
if (qf->queues)
vk_free(&device->vk.alloc, qf->queues);
}
panvk_precomp_cleanup(device);