venus: drop VkQueueBindSparse

Sparse binding is already disabled with fence feedback enabled by
default due to the difficulty getting both to work.

Signed-off-by: Juston Li <justonli@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20088>
This commit is contained in:
Juston Li 2022-12-20 14:51:51 -08:00 committed by Marge Bot
parent 3b107962a2
commit 2889a0478b
2 changed files with 21 additions and 88 deletions

View file

@ -218,9 +218,7 @@ vn_physical_device_init_features(struct vn_physical_device *physical_dev)
/* clang-format off */
/* TODO allow sparse resource along with sync feedback
*
* vkQueueBindSparse relies on explicit sync primitives. To intercept the
/* vkQueueBindSparse relies on explicit sync primitives. To intercept the
* timeline semaphores within each bind info to write the feedback buffer,
* we have to split the call into bindInfoCount number of calls while
* inserting vkQueueSubmit to wait on the signal timeline semaphores before
@ -230,17 +228,15 @@ vn_physical_device_init_features(struct vn_physical_device *physical_dev)
* Those would make the code overly complex, so we disable sparse binding
* for simplicity.
*/
if (!VN_PERF(NO_FENCE_FEEDBACK)) {
VN_SET_CORE_VALUE(vk10_feats, sparseBinding, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyBuffer, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyImage2D, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyImage3D, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency2Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency4Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency8Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency16Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyAliased, false);
}
VN_SET_CORE_VALUE(vk10_feats, sparseBinding, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyBuffer, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyImage2D, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyImage3D, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency2Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency4Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency8Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidency16Samples, false);
VN_SET_CORE_VALUE(vk10_feats, sparseResidencyAliased, false);
if (renderer_version < VK_API_VERSION_1_2) {
/* Vulkan 1.1 */
@ -540,11 +536,9 @@ vn_physical_device_init_properties(struct vn_physical_device *physical_dev)
/* clang-format off */
/* TODO allow sparse resource along with sync feedback */
if (!VN_PERF(NO_FENCE_FEEDBACK)) {
VN_SET_CORE_VALUE(vk10_props, limits.sparseAddressSpaceSize, 0);
VN_SET_CORE_VALUE(vk10_props, sparseProperties, (VkPhysicalDeviceSparseProperties){ 0 });
}
VN_SET_CORE_VALUE(vk10_props, limits.sparseAddressSpaceSize, 0);
VN_SET_CORE_VALUE(vk10_props, sparseProperties, (VkPhysicalDeviceSparseProperties){ 0 });
if (renderer_version < VK_API_VERSION_1_2) {
/* Vulkan 1.1 */
VN_SET_CORE_ARRAY(vk11_props, deviceUUID, local_props.id);
@ -2122,24 +2116,13 @@ vn_GetPhysicalDeviceSparseImageFormatProperties2(
uint32_t *pPropertyCount,
VkSparseImageFormatProperties2 *pProperties)
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
/* TODO allow sparse resource along with sync feedback
*
* If VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT is not supported for the given
/* If VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT is not supported for the given
* arguments, pPropertyCount will be set to zero upon return, and no data
* will be written to pProperties.
*/
if (!VN_PERF(NO_FENCE_FEEDBACK)) {
*pPropertyCount = 0;
return;
}
/* TODO per-device cache */
vn_call_vkGetPhysicalDeviceSparseImageFormatProperties2(
physical_dev->instance, physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
*pPropertyCount = 0;
return;
}
void

View file

@ -53,7 +53,6 @@ struct vn_queue_submission {
union {
const void *batches;
const VkSubmitInfo *submit_batches;
const VkBindSparseInfo *bind_sparse_batches;
};
VkFence fence;
@ -68,7 +67,6 @@ vn_queue_submission_fix_batch_semaphores(struct vn_queue_submission *submit,
{
union {
const VkSubmitInfo *submit_batch;
const VkBindSparseInfo *bind_sparse_batch;
} u;
uint32_t wait_count;
uint32_t signal_count;
@ -83,13 +81,6 @@ vn_queue_submission_fix_batch_semaphores(struct vn_queue_submission *submit,
signal_count = u.submit_batch->signalSemaphoreCount;
signal_sems = u.submit_batch->pSignalSemaphores;
break;
case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO:
u.bind_sparse_batch = &submit->bind_sparse_batches[batch_index];
wait_count = u.bind_sparse_batch->waitSemaphoreCount;
wait_sems = u.bind_sparse_batch->pWaitSemaphores;
signal_count = u.bind_sparse_batch->signalSemaphoreCount;
signal_sems = u.bind_sparse_batch->pSignalSemaphores;
break;
default:
unreachable("unexpected batch type");
break;
@ -191,27 +182,6 @@ vn_queue_submission_prepare_submit(struct vn_queue_submission *submit,
return result;
}
static VkResult
vn_queue_submission_prepare_bind_sparse(
struct vn_queue_submission *submit,
VkQueue queue,
uint32_t batch_count,
const VkBindSparseInfo *bind_sparse_batches,
VkFence fence)
{
submit->batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
submit->queue = queue;
submit->batch_count = batch_count;
submit->bind_sparse_batches = bind_sparse_batches;
submit->fence = fence;
VkResult result = vn_queue_submission_prepare(submit);
if (result != VK_SUCCESS)
return result;
return VK_SUCCESS;
}
static const VkCommandBuffer
vn_get_fence_feedback_cmd(struct vn_queue *queue, struct vn_fence *fence)
{
@ -549,32 +519,12 @@ vn_QueueSubmit2(VkQueue queue_h,
}
VkResult
vn_QueueBindSparse(VkQueue _queue,
uint32_t bindInfoCount,
const VkBindSparseInfo *pBindInfo,
VkFence fence)
vn_QueueBindSparse(UNUSED VkQueue _queue,
UNUSED uint32_t bindInfoCount,
UNUSED const VkBindSparseInfo *pBindInfo,
UNUSED VkFence fence)
{
VN_TRACE_FUNC();
struct vn_queue *queue = vn_queue_from_handle(_queue);
struct vn_device *dev = queue->device;
/* TODO allow sparse resource along with sync feedback */
assert(VN_PERF(NO_FENCE_FEEDBACK));
struct vn_queue_submission submit;
VkResult result = vn_queue_submission_prepare_bind_sparse(
&submit, _queue, bindInfoCount, pBindInfo, fence);
if (result != VK_SUCCESS)
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
result = vn_call_vkQueueBindSparse(
dev->instance, submit.queue, submit.batch_count,
submit.bind_sparse_batches, submit.fence);
if (result != VK_SUCCESS) {
return vn_error(dev->instance, result);
}
return VK_SUCCESS;
return VK_ERROR_DEVICE_LOST;
}
VkResult