radv: Move to the common command pool framework

Now the common code support reuse, port radv over to using it.

Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18324>
This commit is contained in:
Jason Ekstrand 2022-08-30 14:26:03 -05:00 committed by Marge Bot
parent b825c566bb
commit aea0b6292c
5 changed files with 43 additions and 198 deletions

View file

@ -304,9 +304,9 @@ radv_emit_clear_data(struct radv_cmd_buffer *cmd_buffer, unsigned engine_sel, ui
}
static void
radv_destroy_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
radv_destroy_cmd_buffer(struct vk_command_buffer *vk_cmd_buffer)
{
list_del(&cmd_buffer->pool_link);
struct radv_cmd_buffer *cmd_buffer = container_of(vk_cmd_buffer, struct radv_cmd_buffer, vk);
list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, &cmd_buffer->upload.list, list)
{
@ -340,38 +340,38 @@ radv_destroy_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
vk_object_base_finish(&cmd_buffer->meta_push_descriptors.base);
vk_command_buffer_finish(&cmd_buffer->vk);
vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer);
}
static VkResult
radv_create_cmd_buffer(struct radv_device *device, struct radv_cmd_pool *pool,
VkCommandBufferLevel level, VkCommandBuffer *pCommandBuffer)
radv_create_cmd_buffer(struct vk_command_pool *pool,
struct vk_command_buffer **cmd_buffer_out)
{
struct radv_device *device = container_of(pool->base.device, struct radv_device, vk);
struct radv_cmd_buffer *cmd_buffer;
unsigned ring;
cmd_buffer = vk_zalloc(&pool->vk.alloc, sizeof(*cmd_buffer), 8,
cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result =
vk_command_buffer_init(&pool->vk, &cmd_buffer->vk, NULL, level);
vk_command_buffer_init(pool, &cmd_buffer->vk, &radv_cmd_buffer_ops, 0);
if (result != VK_SUCCESS) {
vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer);
return result;
}
cmd_buffer->device = device;
cmd_buffer->pool = pool;
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
cmd_buffer->qf = vk_queue_to_radv(device->physical_device, pool->vk.queue_family_index);
cmd_buffer->qf = vk_queue_to_radv(device->physical_device, pool->queue_family_index);
ring = radv_queue_family_to_ring(device->physical_device, cmd_buffer->qf);
cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
if (!cmd_buffer->cs) {
radv_destroy_cmd_buffer(cmd_buffer);
radv_destroy_cmd_buffer(&cmd_buffer->vk);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
@ -382,16 +382,19 @@ radv_create_cmd_buffer(struct radv_device *device, struct radv_cmd_pool *pool,
vk_object_base_init(&device->vk, &cmd_buffer->descriptors[i].push_set.set.base,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
*pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
*cmd_buffer_out = &cmd_buffer->vk;
list_inithead(&cmd_buffer->upload.list);
return VK_SUCCESS;
}
static VkResult
radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
static void
radv_reset_cmd_buffer(struct vk_command_buffer *vk_cmd_buffer,
UNUSED VkCommandBufferResetFlags flags)
{
struct radv_cmd_buffer *cmd_buffer = container_of(vk_cmd_buffer, struct radv_cmd_buffer, vk);
vk_command_buffer_reset(&cmd_buffer->vk);
cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
@ -477,10 +480,14 @@ radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
}
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
return vk_command_buffer_get_record_result(&cmd_buffer->vk);
}
const struct vk_command_buffer_ops radv_cmd_buffer_ops = {
.create = radv_create_cmd_buffer,
.reset = radv_reset_cmd_buffer,
.destroy = radv_destroy_cmd_buffer,
};
static bool
radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, uint64_t min_needed)
{
@ -4566,7 +4573,7 @@ radv_cmd_state_setup_sample_locations(struct radv_cmd_buffer *cmd_buffer,
}
state->subpass_sample_locs =
vk_alloc(&cmd_buffer->pool->vk.alloc,
vk_alloc(&cmd_buffer->vk.pool->alloc,
sample_locs->postSubpassSampleLocationsCount * sizeof(state->subpass_sample_locs[0]),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (state->subpass_sample_locs == NULL) {
@ -4611,7 +4618,7 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, struct radv
}
state->attachments =
vk_alloc(&cmd_buffer->pool->vk.alloc, pass->attachment_count * sizeof(state->attachments[0]),
vk_alloc(&cmd_buffer->vk.pool->alloc, pass->attachment_count * sizeof(state->attachments[0]),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (state->attachments == NULL) {
return vk_command_buffer_set_error(&cmd_buffer->vk, VK_ERROR_OUT_OF_HOST_MEMORY);
@ -4671,83 +4678,6 @@ radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, struct radv
return VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_AllocateCommandBuffers(VkDevice _device, const VkCommandBufferAllocateInfo *pAllocateInfo,
VkCommandBuffer *pCommandBuffers)
{
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
VkResult result = VK_SUCCESS;
uint32_t i;
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
if (!list_is_empty(&pool->free_cmd_buffers)) {
struct radv_cmd_buffer *cmd_buffer =
list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
result = radv_reset_cmd_buffer(cmd_buffer);
vk_command_buffer_finish(&cmd_buffer->vk);
VkResult init_result =
vk_command_buffer_init(&pool->vk, &cmd_buffer->vk, NULL, pAllocateInfo->level);
if (init_result != VK_SUCCESS)
result = init_result;
pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
} else {
result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level, &pCommandBuffers[i]);
}
if (result != VK_SUCCESS)
break;
}
if (result != VK_SUCCESS) {
radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i, pCommandBuffers);
/* From the Vulkan 1.0.66 spec:
*
* "vkAllocateCommandBuffers can be used to create multiple
* command buffers. If the creation of any of those command
* buffers fails, the implementation must destroy all
* successfully created command buffer objects from this
* command, set all entries of the pCommandBuffers array to
* NULL and return the error."
*/
memset(pCommandBuffers, 0, sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
}
return result;
}
VKAPI_ATTR void VKAPI_CALL
radv_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers)
{
RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
for (uint32_t i = 0; i < commandBufferCount; i++) {
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
if (!cmd_buffer)
continue;
assert(cmd_buffer->pool == pool);
list_del(&cmd_buffer->pool_link);
list_addtail(&cmd_buffer->pool_link, &pool->free_cmd_buffers);
}
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
return radv_reset_cmd_buffer(cmd_buffer);
}
static void
radv_inherit_dynamic_rendering(struct radv_cmd_buffer *cmd_buffer,
const VkCommandBufferInheritanceInfo *inherit_info,
@ -4854,9 +4784,7 @@ radv_BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBegi
/* If the command buffer has already been resetted with
* vkResetCommandBuffer, no need to do it again.
*/
result = radv_reset_cmd_buffer(cmd_buffer);
if (result != VK_SUCCESS)
return result;
radv_reset_cmd_buffer(&cmd_buffer->vk, 0);
}
memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
@ -5308,8 +5236,8 @@ radv_EndCommandBuffer(VkCommandBuffer commandBuffer)
radv_describe_end_cmd_buffer(cmd_buffer);
vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer->state.subpass_sample_locs);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer->state.subpass_sample_locs);
VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
if (result != VK_SUCCESS)
@ -6105,83 +6033,6 @@ radv_CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCou
radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateCommandPool(VkDevice _device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCmdPool)
{
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_cmd_pool *pool;
pool =
vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result = vk_command_pool_init(&device->vk, &pool->vk, pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, pool);
return result;
}
list_inithead(&pool->cmd_buffers);
list_inithead(&pool->free_cmd_buffers);
*pCmdPool = radv_cmd_pool_to_handle(pool);
return VK_SUCCESS;
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroyCommandPool(VkDevice _device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator)
{
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
if (!pool)
return;
list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link)
{
radv_destroy_cmd_buffer(cmd_buffer);
}
list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link)
{
radv_destroy_cmd_buffer(cmd_buffer);
}
vk_command_pool_finish(&pool->vk);
vk_free2(&device->vk.alloc, pAllocator, pool);
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
{
RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
VkResult result;
list_for_each_entry(struct radv_cmd_buffer, cmd_buffer, &pool->cmd_buffers, pool_link)
{
result = radv_reset_cmd_buffer(cmd_buffer);
if (result != VK_SUCCESS)
return result;
}
return VK_SUCCESS;
}
VKAPI_ATTR void VKAPI_CALL
radv_TrimCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
{
RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, &pool->free_cmd_buffers, pool_link)
{
radv_destroy_cmd_buffer(cmd_buffer);
}
}
static void
radv_cmd_buffer_begin_subpass(struct radv_cmd_buffer *cmd_buffer, uint32_t subpass_id)
{
@ -8679,8 +8530,8 @@ radv_CmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pS
radv_cmd_buffer_end_subpass(cmd_buffer);
vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->pool->vk.alloc, cmd_buffer->state.subpass_sample_locs);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer->state.attachments);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer->state.subpass_sample_locs);
cmd_buffer->state.pass = NULL;
cmd_buffer->state.subpass = NULL;

View file

@ -3425,6 +3425,8 @@ radv_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCr
return result;
}
device->vk.command_buffer_ops = &radv_cmd_buffer_ops;
device->instance = physical_device->instance;
device->physical_device = physical_device;
simple_mtx_init(&device->trace_mtx, mtx_plain);

View file

@ -483,7 +483,7 @@ blit_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *src_image,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
},
&cmd_buffer->pool->vk.alloc, &sampler);
&cmd_buffer->vk.pool->alloc, &sampler);
/* VK_EXT_conditional_rendering says that blit commands should not be
* affected by conditional rendering.
@ -607,7 +607,7 @@ blit_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *src_image,
radv_meta_restore(&saved_state, cmd_buffer);
radv_DestroySampler(radv_device_to_handle(device), sampler, &cmd_buffer->pool->vk.alloc);
radv_DestroySampler(radv_device_to_handle(device), sampler, &cmd_buffer->vk.pool->alloc);
}
VKAPI_ATTR void VKAPI_CALL

View file

@ -1553,12 +1553,6 @@ struct radv_cmd_state {
bool uses_perf_counters;
};
struct radv_cmd_pool {
struct vk_command_pool vk;
struct list_head cmd_buffers;
struct list_head free_cmd_buffers;
};
struct radv_cmd_buffer_upload {
uint8_t *map;
unsigned offset;
@ -1580,9 +1574,7 @@ struct radv_cmd_buffer {
struct radv_device *device;
struct radv_cmd_pool *pool;
struct list_head pool_link;
struct util_dynarray cached_vertex_formats;
VkCommandBufferUsageFlags usage_flags;
enum radv_cmd_buffer_status status;
struct radeon_cmdbuf *cs;
@ -1656,6 +1648,8 @@ struct radv_cmd_buffer {
enum radv_cmd_flush_bits active_query_flush_bits;
};
extern const struct vk_command_buffer_ops radv_cmd_buffer_ops;
struct radv_image;
struct radv_image_view;
@ -3124,8 +3118,6 @@ VK_DEFINE_HANDLE_CASTS(radv_instance, vk.base, VkInstance, VK_OBJECT_TYPE_INSTAN
VK_DEFINE_HANDLE_CASTS(radv_physical_device, vk.base, VkPhysicalDevice,
VK_OBJECT_TYPE_PHYSICAL_DEVICE)
VK_DEFINE_HANDLE_CASTS(radv_queue, vk.base, VkQueue, VK_OBJECT_TYPE_QUEUE)
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_cmd_pool, vk.base, VkCommandPool,
VK_OBJECT_TYPE_COMMAND_POOL)
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_buffer, vk.base, VkBuffer, VK_OBJECT_TYPE_BUFFER)
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_buffer_view, base, VkBufferView,
VK_OBJECT_TYPE_BUFFER_VIEW)

View file

@ -874,7 +874,7 @@ rra_init_acceleration_structure_copy(VkDevice vk_device, uint32_t family_index,
.queueFamilyIndex = family_index,
};
VkResult result = radv_CreateCommandPool(vk_device, &pool_info, NULL, &dst->pool);
VkResult result = vk_common_CreateCommandPool(vk_device, &pool_info, NULL, &dst->pool);
if (result != VK_SUCCESS)
goto fail;
@ -883,7 +883,7 @@ rra_init_acceleration_structure_copy(VkDevice vk_device, uint32_t family_index,
.commandPool = dst->pool,
.commandBufferCount = 1,
};
result = radv_AllocateCommandBuffers(vk_device, &cmdbuf_alloc_info, &dst->cmd_buffer);
result = vk_common_AllocateCommandBuffers(vk_device, &cmdbuf_alloc_info, &dst->cmd_buffer);
if (result != VK_SUCCESS)
goto fail_pool;
@ -941,7 +941,7 @@ fail_memory:
fail_buffer:
radv_DestroyBuffer(vk_device, dst->buffer, NULL);
fail_pool:
radv_DestroyCommandPool(vk_device, dst->pool, NULL);
vk_common_DestroyCommandPool(vk_device, dst->pool, NULL);
fail:
return result;
}
@ -959,7 +959,7 @@ rra_copy_acceleration_structures(VkQueue vk_queue, struct rra_accel_struct_copy
RADV_FROM_HANDLE(radv_cmd_buffer, cmdbuf, dst->cmd_buffer);
radv_ResetCommandPool(vk_device, dst->pool, 0);
vk_common_ResetCommandPool(vk_device, dst->pool, 0);
/*
* Wait for possible AS build/trace calls on all queues.
@ -1127,7 +1127,7 @@ radv_rra_dump_trace(VkQueue vk_queue, char *filename)
copy_fail:
radv_DestroyBuffer(vk_device, copy.buffer, NULL);
radv_FreeMemory(vk_device, copy.memory, NULL);
radv_DestroyCommandPool(vk_device, copy.pool, NULL);
vk_common_DestroyCommandPool(vk_device, copy.pool, NULL);
fail:
free(accel_struct_offsets);
return result;