mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-22 15:00:22 +01:00
v3dv: limit V3D_TFU_READAHEAD to buffers/images with USAGE_TRANSFER_SRC flag
We avoid adding unconditionally the 64-bytes padding to all usages of the vulkan memory allocations. The readahead padding is only added for buffers/images with USAGE_TRANSFER_SRC_BIT usage enabled as this is enough for having a full vk-cts without reported MMU TFU errors. vk-cts doesn't exercise the added image memory requirements codepath to handle this readahead. This is because the required 64-bytes image alignments for images with flag VK_IMAGE_USAGE_TRANSFER_SRC_BIT. But the alignment didn't cover when the image is already aligned to 64-bytes at the end of the memory page. Reviewed-by: Iago Toral Quiroga <itoral@igalia.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36159>
This commit is contained in:
parent
4e033ffb27
commit
f0b3a4fcaf
1 changed files with 28 additions and 24 deletions
|
|
@ -2182,15 +2182,7 @@ v3dv_AllocateMemory(VkDevice _device,
|
|||
|
||||
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
|
||||
|
||||
/* We always allocate device memory in multiples of a page, so round up
|
||||
* requested size to that. We need to add a V3D_TFU_READAHEAD padding to
|
||||
* avoid invalid reads done by the TFU unit after the end of the last page
|
||||
* allocated.
|
||||
*/
|
||||
|
||||
const VkDeviceSize alloc_size = align64(pAllocateInfo->allocationSize +
|
||||
V3D_TFU_READAHEAD_SIZE, 4096);
|
||||
|
||||
const VkDeviceSize alloc_size = align64(pAllocateInfo->allocationSize, 4096);
|
||||
if (unlikely(alloc_size > MAX_MEMORY_ALLOCATION_SIZE))
|
||||
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
|
||||
|
|
@ -2360,10 +2352,18 @@ get_image_memory_requirements(struct v3dv_image *image,
|
|||
VkImageAspectFlagBits planeAspect,
|
||||
VkMemoryRequirements2 *pMemoryRequirements)
|
||||
{
|
||||
uint32_t readahead = 0;
|
||||
/* The TFU unit has a 64-bytes readahead so we need to add a
|
||||
* V3D_TFU_READAHEAD padding to avoid invalid reads done by the TFU after
|
||||
* the end of the last allocated memory page causing MMU error.
|
||||
*/
|
||||
if (image->vk.usage & (VK_IMAGE_USAGE_TRANSFER_SRC_BIT))
|
||||
readahead = V3D_TFU_READAHEAD_SIZE;
|
||||
|
||||
pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
|
||||
.memoryTypeBits = 0x1,
|
||||
.alignment = image->planes[0].alignment,
|
||||
.size = image->non_disjoint_size
|
||||
.size = image->non_disjoint_size ? image->non_disjoint_size + readahead : 0
|
||||
};
|
||||
|
||||
if (planeAspect != VK_IMAGE_ASPECT_NONE) {
|
||||
|
|
@ -2376,7 +2376,7 @@ get_image_memory_requirements(struct v3dv_image *image,
|
|||
VkMemoryRequirements *mem_reqs =
|
||||
&pMemoryRequirements->memoryRequirements;
|
||||
mem_reqs->alignment = image->planes[plane].alignment;
|
||||
mem_reqs->size = image->planes[plane].size;
|
||||
mem_reqs->size = image->planes[plane].size + readahead;
|
||||
}
|
||||
|
||||
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
|
||||
|
|
@ -2580,24 +2580,28 @@ static void
|
|||
get_buffer_memory_requirements(struct v3dv_buffer *buffer,
|
||||
VkMemoryRequirements2 *pMemoryRequirements)
|
||||
{
|
||||
uint32_t readahead = 0;
|
||||
/* UBO and SSBO may be read using ldunifa, which prefetches the next 4
|
||||
* bytes after a read. If the buffer's size is exactly a multiple of a page
|
||||
* size and the shader reads the last 4 bytes with ldunifa the prefetching
|
||||
* would read out of bounds and cause an MMU error, so we allocate extra
|
||||
* space to avoid kernel error spamming. The TFU unit has also a 64-bytes
|
||||
* readahead so we need to add a V3D_TFU_READAHEAD padding to avoid invalid
|
||||
* reads done by the TFU after the end of the last allocated memory page.
|
||||
*/
|
||||
if (buffer->usage & (VK_BUFFER_USAGE_TRANSFER_SRC_BIT))
|
||||
readahead = V3D_TFU_READAHEAD_SIZE;
|
||||
else if (buffer->usage & (VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) {
|
||||
readahead = 4;
|
||||
}
|
||||
|
||||
pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
|
||||
.memoryTypeBits = 0x1,
|
||||
.alignment = buffer->alignment,
|
||||
.size = align64(buffer->size, buffer->alignment),
|
||||
.size = align64(buffer->size + readahead, buffer->alignment),
|
||||
};
|
||||
|
||||
/* UBO and SSBO may be read using ldunifa, which prefetches the next
|
||||
* 4 bytes after a read. If the buffer's size is exactly a multiple
|
||||
* of a page size and the shader reads the last 4 bytes with ldunifa
|
||||
* the prefetching would read out of bounds and cause an MMU error,
|
||||
* so we allocate extra space to avoid kernel error spamming.
|
||||
*/
|
||||
bool can_ldunifa = buffer->usage &
|
||||
(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
|
||||
if (can_ldunifa && (buffer->size % 4096 == 0))
|
||||
pMemoryRequirements->memoryRequirements.size += buffer->alignment;
|
||||
|
||||
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
|
||||
switch (ext->sType) {
|
||||
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue