mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-03 05:38:16 +02:00
venus: back out implicit fencing handling for NV proprietary
It is a tech debt now since NV proprietary is on sw wsi path, and rendering to the prime blit dst buffer may never get supported there. For later, when performance optimization is needed for venus on nv, we can downgrade the sw wsi device workaround to a venus dri config, so that setups with tiled explicit modifier support can be perf optimal. Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35984>
This commit is contained in:
parent
054c8e117e
commit
0282ce8211
5 changed files with 4 additions and 123 deletions
|
|
@ -48,11 +48,6 @@ struct vn_buffer {
|
|||
struct vn_object_base base;
|
||||
|
||||
struct vn_buffer_memory_requirements requirements;
|
||||
|
||||
struct {
|
||||
/* buffer is prime blit dst */
|
||||
struct vn_device_memory *mem;
|
||||
} wsi;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_buffer,
|
||||
base.vk,
|
||||
|
|
|
|||
|
|
@ -1481,19 +1481,6 @@ vn_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
|
|||
uint32_t regionCount,
|
||||
const VkBufferImageCopy *pRegions)
|
||||
{
|
||||
struct vn_image *img = vn_image_from_handle(srcImage);
|
||||
struct vn_buffer *buf = vn_buffer_from_handle(dstBuffer);
|
||||
|
||||
/* The prime blit dst buffer is internal to common wsi layer. Only the
|
||||
* corresponding wsi image can blit to it.
|
||||
*/
|
||||
if (buf->wsi.mem) {
|
||||
assert(img->wsi.is_wsi);
|
||||
assert(img->wsi.is_prime_blit_src);
|
||||
assert(!img->wsi.blit_mem || img->wsi.blit_mem == buf->wsi.mem);
|
||||
img->wsi.blit_mem = buf->wsi.mem;
|
||||
}
|
||||
|
||||
VN_CMD_ENQUEUE(vkCmdCopyImageToBuffer, commandBuffer, srcImage,
|
||||
srcImageLayout, dstBuffer, regionCount, pRegions);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -363,18 +363,14 @@ vn_AllocateMemory(VkDevice device,
|
|||
|
||||
const VkImportMemoryFdInfoKHR *import_fd_info = NULL;
|
||||
const VkMemoryDedicatedAllocateInfo *dedicated_info = NULL;
|
||||
const struct wsi_memory_allocate_info *wsi_info = NULL;
|
||||
vk_foreach_struct_const(pnext, pAllocateInfo->pNext) {
|
||||
switch ((uint32_t)pnext->sType) {
|
||||
switch (pnext->sType) {
|
||||
case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
|
||||
import_fd_info = (const void *)pnext;
|
||||
break;
|
||||
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
|
||||
dedicated_info = (const void *)pnext;
|
||||
break;
|
||||
case VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA:
|
||||
wsi_info = (const void *)pnext;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -395,16 +391,6 @@ vn_AllocateMemory(VkDevice device,
|
|||
import_fd_info->fd);
|
||||
} else {
|
||||
result = vn_device_memory_alloc(dev, mem, pAllocateInfo);
|
||||
|
||||
/* track prime blit dst buffer memory */
|
||||
if (wsi_info && result == VK_SUCCESS) {
|
||||
assert(dedicated_info);
|
||||
if (dedicated_info->buffer != VK_NULL_HANDLE) {
|
||||
struct vn_buffer *buf =
|
||||
vn_buffer_from_handle(dedicated_info->buffer);
|
||||
buf->wsi.mem = mem;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vn_device_memory_emit_report(dev, mem, /* is_alloc */ true, result);
|
||||
|
|
|
|||
|
|
@ -76,9 +76,6 @@ struct vn_image {
|
|||
|
||||
struct vn_device_memory *memory;
|
||||
|
||||
/* memory backing the prime blit dst buffer */
|
||||
struct vn_device_memory *blit_mem;
|
||||
|
||||
/* For VK_ANDROID_native_buffer, the WSI image owns the memory. */
|
||||
bool memory_owned;
|
||||
} wsi;
|
||||
|
|
|
|||
|
|
@ -10,9 +10,6 @@
|
|||
|
||||
#include "vn_wsi.h"
|
||||
|
||||
#include <xf86drm.h>
|
||||
|
||||
#include "drm-uapi/dma-buf.h"
|
||||
#include "vk_enum_to_str.h"
|
||||
#include "wsi_common_entrypoints.h"
|
||||
|
||||
|
|
@ -163,39 +160,6 @@ vn_wsi_create_image(struct vn_device *dev,
|
|||
|
||||
/* swapchain commands */
|
||||
|
||||
static int
|
||||
vn_wsi_export_sync_file(struct vn_device *dev, struct vn_renderer_bo *bo)
|
||||
{
|
||||
/* Don't keep trying an IOCTL that doesn't exist. */
|
||||
static bool no_dma_buf_sync_file = false;
|
||||
if (no_dma_buf_sync_file)
|
||||
return -1;
|
||||
|
||||
/* For simplicity, export dma-buf here and rely on the dma-buf sync file
|
||||
* export api. On legacy kernels without the new uapi, for the record, we
|
||||
* do have the fallback option to track the wsi bo in the sync payload and
|
||||
* do DRM_IOCTL_VIRTGPU_WAIT where we do sync_wait.
|
||||
*/
|
||||
int dma_buf_fd = vn_renderer_bo_export_dma_buf(dev->renderer, bo);
|
||||
if (dma_buf_fd < 0)
|
||||
return -1;
|
||||
|
||||
struct dma_buf_export_sync_file export = {
|
||||
.flags = DMA_BUF_SYNC_RW,
|
||||
.fd = -1,
|
||||
};
|
||||
int ret = drmIoctl(dma_buf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &export);
|
||||
|
||||
close(dma_buf_fd);
|
||||
|
||||
if (ret && (errno == ENOTTY || errno == EBADF || errno == ENOSYS)) {
|
||||
no_dma_buf_sync_file = true;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return export.fd;
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_AcquireNextImage2KHR(VkDevice device,
|
||||
const VkAcquireNextImageInfoKHR *pAcquireInfo,
|
||||
|
|
@ -216,57 +180,16 @@ vn_AcquireNextImage2KHR(VkDevice device,
|
|||
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
|
||||
return vn_error(dev->instance, result);
|
||||
|
||||
/* Extract compositor implicit fence and resolve on the driver side upon
|
||||
* the acquire fence being submitted. Since we used to rely on renderer
|
||||
* side drivers being able to handle implicit in-fence, here we only opt-in
|
||||
* the new behavior for those known to be unable to handle it.
|
||||
*/
|
||||
int sync_fd = -1;
|
||||
if (dev->physical_device->renderer_driver_id ==
|
||||
VK_DRIVER_ID_NVIDIA_PROPRIETARY) {
|
||||
struct vn_image *wsi_img = vn_image_from_handle(
|
||||
wsi_common_get_image(pAcquireInfo->swapchain, *pImageIndex));
|
||||
assert(wsi_img->wsi.is_wsi);
|
||||
|
||||
struct vn_device_memory *wsi_mem = wsi_img->wsi.is_prime_blit_src
|
||||
? wsi_img->wsi.blit_mem
|
||||
: wsi_img->wsi.memory;
|
||||
if (wsi_mem)
|
||||
sync_fd = vn_wsi_export_sync_file(dev, wsi_mem->base_bo);
|
||||
}
|
||||
|
||||
int sem_fd = -1, fence_fd = -1;
|
||||
if (sync_fd >= 0) {
|
||||
if (pAcquireInfo->semaphore != VK_NULL_HANDLE &&
|
||||
pAcquireInfo->fence != VK_NULL_HANDLE) {
|
||||
sem_fd = sync_fd;
|
||||
fence_fd = dup(sync_fd);
|
||||
if (fence_fd < 0) {
|
||||
result = errno == EMFILE ? VK_ERROR_TOO_MANY_OBJECTS
|
||||
: VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
close(sync_fd);
|
||||
return vn_error(dev->instance, result);
|
||||
}
|
||||
} else if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
|
||||
sem_fd = sync_fd;
|
||||
} else {
|
||||
assert(pAcquireInfo->fence != VK_NULL_HANDLE);
|
||||
fence_fd = sync_fd;
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX this relies on renderer side doing implicit fencing */
|
||||
if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
|
||||
/* venus waits on the driver side when this semaphore is submitted */
|
||||
const VkImportSemaphoreFdInfoKHR info = {
|
||||
.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR,
|
||||
.semaphore = pAcquireInfo->semaphore,
|
||||
.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT,
|
||||
.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
|
||||
.fd = sem_fd,
|
||||
.fd = -1,
|
||||
};
|
||||
result = vn_ImportSemaphoreFdKHR(device, &info);
|
||||
if (result == VK_SUCCESS)
|
||||
sem_fd = -1;
|
||||
}
|
||||
|
||||
if (result == VK_SUCCESS && pAcquireInfo->fence != VK_NULL_HANDLE) {
|
||||
|
|
@ -275,17 +198,10 @@ vn_AcquireNextImage2KHR(VkDevice device,
|
|||
.fence = pAcquireInfo->fence,
|
||||
.flags = VK_FENCE_IMPORT_TEMPORARY_BIT,
|
||||
.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
|
||||
.fd = fence_fd,
|
||||
.fd = -1,
|
||||
};
|
||||
result = vn_ImportFenceFdKHR(device, &info);
|
||||
if (result == VK_SUCCESS)
|
||||
fence_fd = -1;
|
||||
}
|
||||
|
||||
if (sem_fd >= 0)
|
||||
close(sem_fd);
|
||||
if (fence_fd >= 0)
|
||||
close(fence_fd);
|
||||
|
||||
return vn_result(dev->instance, result);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue