diff --git a/src/virtio/vulkan/vn_command_buffer.c b/src/virtio/vulkan/vn_command_buffer.c index fe6cf72baba..e8ed22e59a0 100644 --- a/src/virtio/vulkan/vn_command_buffer.c +++ b/src/virtio/vulkan/vn_command_buffer.c @@ -681,7 +681,6 @@ vn_CreateCommandPool(VkDevice device, pool->queue_family_index = pCreateInfo->queueFamilyIndex; list_inithead(&pool->command_buffers); list_inithead(&pool->free_query_batches); - list_inithead(&pool->free_query_feedback_cmds); VkCommandPool pool_handle = vn_command_pool_to_handle(pool); vn_async_vkCreateCommandPool(dev->primary_ring, device, pCreateInfo, NULL, @@ -698,8 +697,8 @@ static inline void vn_recycle_query_feedback_cmd(struct vn_command_buffer *cmd) { vn_ResetCommandBuffer( - vn_command_buffer_to_handle(cmd->linked_query_feedback_cmd), 0); - list_add(&cmd->linked_query_feedback_cmd->feedback_head, + vn_command_buffer_to_handle(cmd->linked_query_feedback_cmd->cmd), 0); + list_add(&cmd->linked_query_feedback_cmd->head, &cmd->linked_query_feedback_cmd->pool->free_query_feedback_cmds); cmd->linked_query_feedback_cmd = NULL; } diff --git a/src/virtio/vulkan/vn_command_buffer.h b/src/virtio/vulkan/vn_command_buffer.h index 1ce050032c5..81b17f967c3 100644 --- a/src/virtio/vulkan/vn_command_buffer.h +++ b/src/virtio/vulkan/vn_command_buffer.h @@ -26,7 +26,6 @@ struct vn_command_pool { struct list_head command_buffers; struct list_head free_query_batches; - struct list_head free_query_feedback_cmds; /* Temporary storage for scrubbing VK_IMAGE_LAYOUT_PRESENT_SRC_KHR. The * storage's lifetime is the command pool's lifetime. We increase the @@ -84,11 +83,9 @@ struct vn_command_buffer { struct vn_command_buffer_builder builder; - struct vn_command_buffer *linked_query_feedback_cmd; + struct vn_query_feedback_cmd *linked_query_feedback_cmd; struct list_head head; - - struct list_head feedback_head; }; VK_DEFINE_HANDLE_CASTS(vn_command_buffer, base.base, diff --git a/src/virtio/vulkan/vn_feedback.c b/src/virtio/vulkan/vn_feedback.c index 0feafedb886..80944bb9038 100644 --- a/src/virtio/vulkan/vn_feedback.c +++ b/src/virtio/vulkan/vn_feedback.c @@ -595,23 +595,10 @@ vn_feedback_query_cmd_record(VkCommandBuffer cmd_handle, offset, buf_size); } -static void -vn_cmd_record_batched_query_feedback(VkCommandBuffer *cmd_handle, - struct list_head *combined_query_batches) -{ - list_for_each_entry_safe(struct vn_feedback_query_batch, batch, - combined_query_batches, head) { - vn_feedback_query_cmd_record( - *cmd_handle, vn_query_pool_to_handle(batch->query_pool), - batch->query, batch->query_count, batch->copy); - } -} - VkResult -vn_feedback_query_batch_record(VkDevice dev_handle, - struct vn_feedback_cmd_pool *feedback_pool, - struct list_head *combined_query_batches, - VkCommandBuffer *out_cmd_handle) +vn_feedback_query_cmd_alloc(VkDevice dev_handle, + struct vn_feedback_cmd_pool *feedback_pool, + struct vn_query_feedback_cmd **out_feedback_cmd) { const VkCommandBufferAllocateInfo info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, @@ -622,49 +609,83 @@ vn_feedback_query_batch_record(VkDevice dev_handle, }; struct vn_command_pool *cmd_pool = vn_command_pool_from_handle(feedback_pool->pool); - VkCommandBuffer feedback_cmd_handle; - VkResult result; + struct vn_query_feedback_cmd *feedback_cmd = NULL; simple_mtx_lock(&feedback_pool->mutex); + if (!list_is_empty(&feedback_pool->free_query_feedback_cmds)) { + feedback_cmd = + list_first_entry(&feedback_pool->free_query_feedback_cmds, + struct vn_query_feedback_cmd, head); + list_del(&feedback_cmd->head); + } + simple_mtx_unlock(&feedback_pool->mutex); - if (!list_is_empty(&cmd_pool->free_query_feedback_cmds)) { - struct vn_command_buffer *free_cmd = - list_first_entry(&cmd_pool->free_query_feedback_cmds, - struct vn_command_buffer, feedback_head); - feedback_cmd_handle = vn_command_buffer_to_handle(free_cmd); - list_del(&free_cmd->feedback_head); - } else { + if (!feedback_cmd) { + VkCommandBuffer feedback_cmd_handle; + VkResult result; + + feedback_cmd = + vk_alloc(&cmd_pool->allocator, sizeof(*feedback_cmd), + VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (!feedback_cmd) + return VK_ERROR_OUT_OF_HOST_MEMORY; + + simple_mtx_lock(&feedback_pool->mutex); result = vn_AllocateCommandBuffers(dev_handle, &info, &feedback_cmd_handle); - if (result != VK_SUCCESS) - goto out_unlock; + simple_mtx_unlock(&feedback_pool->mutex); + + if (result != VK_SUCCESS) { + vk_free(&cmd_pool->allocator, feedback_cmd); + return result; + } + + feedback_cmd->pool = feedback_pool; + feedback_cmd->cmd = vn_command_buffer_from_handle(feedback_cmd_handle); } + *out_feedback_cmd = feedback_cmd; + + return VK_SUCCESS; +} + +VkResult +vn_feedback_query_batch_record(VkDevice dev_handle, + struct vn_query_feedback_cmd *feedback_cmd, + struct list_head *combined_query_batches) +{ static const VkCommandBufferBeginInfo begin_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, }; + VkCommandBuffer feedback_cmd_handle = + vn_command_buffer_to_handle(feedback_cmd->cmd); + VkResult result; + + simple_mtx_lock(&feedback_cmd->pool->mutex); result = vn_BeginCommandBuffer(feedback_cmd_handle, &begin_info); if (result != VK_SUCCESS) { - vn_FreeCommandBuffers(dev_handle, feedback_pool->pool, 1, + vn_FreeCommandBuffers(dev_handle, feedback_cmd->pool->pool, 1, &feedback_cmd_handle); goto out_unlock; } - vn_cmd_record_batched_query_feedback(&feedback_cmd_handle, - combined_query_batches); + list_for_each_entry_safe(struct vn_feedback_query_batch, batch, + combined_query_batches, head) { + vn_feedback_query_cmd_record( + feedback_cmd_handle, vn_query_pool_to_handle(batch->query_pool), + batch->query, batch->query_count, batch->copy); + } result = vn_EndCommandBuffer(feedback_cmd_handle); if (result != VK_SUCCESS) { - vn_FreeCommandBuffers(dev_handle, feedback_pool->pool, 1, + vn_FreeCommandBuffers(dev_handle, feedback_cmd->pool->pool, 1, &feedback_cmd_handle); goto out_unlock; } - *out_cmd_handle = feedback_cmd_handle; - out_unlock: - simple_mtx_unlock(&feedback_pool->mutex); + simple_mtx_unlock(&feedback_cmd->pool->mutex); return result; } @@ -754,6 +775,7 @@ vn_feedback_cmd_pools_init(struct vn_device *dev) } simple_mtx_init(&pools[i].mutex, mtx_plain); + list_inithead(&pools[i].free_query_feedback_cmds); } dev->cmd_pools = pools; @@ -771,6 +793,11 @@ vn_feedback_cmd_pools_fini(struct vn_device *dev) return; for (uint32_t i = 0; i < dev->queue_family_count; i++) { + list_for_each_entry_safe(struct vn_query_feedback_cmd, feedback_cmd, + &dev->cmd_pools[i].free_query_feedback_cmds, + head) + vk_free(alloc, feedback_cmd); + vn_DestroyCommandPool(dev_handle, dev->cmd_pools[i].pool, alloc); simple_mtx_destroy(&dev->cmd_pools[i].mutex); } diff --git a/src/virtio/vulkan/vn_feedback.h b/src/virtio/vulkan/vn_feedback.h index 6355da9d5f4..ea7bed85d83 100644 --- a/src/virtio/vulkan/vn_feedback.h +++ b/src/virtio/vulkan/vn_feedback.h @@ -53,6 +53,7 @@ struct vn_feedback_cmd_pool { simple_mtx_t mutex; VkCommandPool pool; + struct list_head free_query_feedback_cmds; }; /* coherent buffer with bound and mapped memory */ @@ -74,6 +75,13 @@ struct vn_feedback_query_batch { struct list_head head; }; +struct vn_query_feedback_cmd { + struct vn_feedback_cmd_pool *pool; + struct vn_command_buffer *cmd; + + struct list_head head; +}; + VkResult vn_feedback_pool_init(struct vn_device *dev, struct vn_feedback_pool *pool, @@ -146,11 +154,15 @@ vn_feedback_event_cmd_record(VkCommandBuffer cmd_handle, VkResult status, bool sync2); +VkResult +vn_feedback_query_cmd_alloc(VkDevice dev_handle, + struct vn_feedback_cmd_pool *feedback_pool, + struct vn_query_feedback_cmd **out_feedback_cmd); + VkResult vn_feedback_query_batch_record(VkDevice dev_handle, - struct vn_feedback_cmd_pool *feedback_pool, - struct list_head *combined_query_batches, - VkCommandBuffer *out_cmd_handle); + struct vn_query_feedback_cmd *feedback_cmd, + struct list_head *combined_query_batches); VkResult vn_feedback_cmd_alloc(VkDevice dev_handle, diff --git a/src/virtio/vulkan/vn_queue.c b/src/virtio/vulkan/vn_queue.c index 22d469bba2a..7d716788e5d 100644 --- a/src/virtio/vulkan/vn_queue.c +++ b/src/virtio/vulkan/vn_queue.c @@ -48,7 +48,7 @@ struct vn_queue_submission { const struct vn_device_memory *wsi_mem; uint32_t feedback_cmd_buffer_count; struct vn_sync_payload_external external_payload; - struct vn_command_buffer *recycle_query_feedback_cmd; + struct vn_query_feedback_cmd *recycle_query_feedback_cmd; /* Temporary storage allocation for submission * A single alloc for storage is performed and the offsets inside @@ -519,7 +519,7 @@ vn_combine_query_feedback_batches_and_record( uint32_t cmd_count, uint32_t stride, struct vn_feedback_cmd_pool *feedback_cmd_pool, - VkCommandBuffer *out_feedback_cmd_handle) + struct vn_query_feedback_cmd **out_feedback_cmd) { struct vn_command_pool *cmd_pool = vn_command_pool_from_handle(feedback_cmd_pool->pool); @@ -573,9 +573,15 @@ vn_combine_query_feedback_batches_and_record( cmd_handle_ptr += stride; } - result = vn_feedback_query_batch_record(dev_handle, feedback_cmd_pool, - &combined_batches, - out_feedback_cmd_handle); + struct vn_query_feedback_cmd *feedback_cmd; + result = vn_feedback_query_cmd_alloc(dev_handle, feedback_cmd_pool, + &feedback_cmd); + if (result == VK_SUCCESS) { + result = vn_feedback_query_batch_record(dev_handle, feedback_cmd, + &combined_batches); + if (result != VK_SUCCESS) + vk_free(&cmd_pool->allocator, feedback_cmd); + } recycle_combined_batches: simple_mtx_lock(&feedback_cmd_pool->mutex); @@ -584,6 +590,8 @@ recycle_combined_batches: list_move_to(&batch_clone->head, &cmd_pool->free_query_batches); simple_mtx_unlock(&feedback_cmd_pool->mutex); + *out_feedback_cmd = feedback_cmd; + return result; } @@ -611,12 +619,15 @@ vn_queue_submission_add_query_feedback(struct vn_queue_submission *submit, } } + struct vn_query_feedback_cmd *feedback_cmd; VkResult result = vn_combine_query_feedback_batches_and_record( dev_handle, src_cmd_handles, cmd_count, stride, feedback_cmd_pool, - feedback_cmd_handle); + &feedback_cmd); if (result != VK_SUCCESS) return result; + *feedback_cmd_handle = vn_command_buffer_to_handle(feedback_cmd->cmd); + /* link query feedback cmd lifecycle with a cmd in the original batch so * that the feedback cmd can be reset and recycled when that cmd gets * reset/freed. @@ -654,8 +665,7 @@ vn_queue_submission_add_query_feedback(struct vn_queue_submission *submit, submit->recycle_query_feedback_cmd = linked_cmd->linked_query_feedback_cmd; - linked_cmd->linked_query_feedback_cmd = - vn_command_buffer_from_handle(*feedback_cmd_handle); + linked_cmd->linked_query_feedback_cmd = feedback_cmd; return VK_SUCCESS; } @@ -990,9 +1000,10 @@ vn_queue_submission_cleanup(struct vn_queue_submission *submit) if (submit->recycle_query_feedback_cmd) { vn_ResetCommandBuffer( - vn_command_buffer_to_handle(submit->recycle_query_feedback_cmd), 0); + vn_command_buffer_to_handle(submit->recycle_query_feedback_cmd->cmd), + 0); list_add( - &submit->recycle_query_feedback_cmd->feedback_head, + &submit->recycle_query_feedback_cmd->head, &submit->recycle_query_feedback_cmd->pool->free_query_feedback_cmds); }