zink: zink_heap isn't 1-to-1 with memoryTypeIndex

Clarify the relationship between zink heaps and vulkan memory type
indices, and resolve the issues from mixing the two up.

Closes: #7588, #7813
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20264>
This commit is contained in:
Julia Tatz 2023-01-16 13:09:02 -05:00 committed by Marge Bot
parent 75276deebc
commit f6d3a5755f
2 changed files with 21 additions and 19 deletions

View file

@ -246,7 +246,8 @@ static struct zink_bo *
bo_create_internal(struct zink_screen *screen, bo_create_internal(struct zink_screen *screen,
uint64_t size, uint64_t size,
unsigned alignment, unsigned alignment,
unsigned heap_idx, enum zink_heap heap,
unsigned mem_type_idx,
unsigned flags, unsigned flags,
const void *pNext) const void *pNext)
{ {
@ -272,14 +273,14 @@ bo_create_internal(struct zink_screen *screen,
else else
mai.pNext = pNext; mai.pNext = pNext;
mai.allocationSize = size; mai.allocationSize = size;
mai.memoryTypeIndex = heap_idx; mai.memoryTypeIndex = mem_type_idx;
if (screen->info.mem_props.memoryTypes[mai.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) { if (screen->info.mem_props.memoryTypes[mai.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment); alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
mai.allocationSize = align64(mai.allocationSize, screen->info.props.limits.minMemoryMapAlignment); mai.allocationSize = align64(mai.allocationSize, screen->info.props.limits.minMemoryMapAlignment);
} }
unsigned heap = screen->info.mem_props.memoryTypes[heap_idx].heapIndex; unsigned vk_heap_idx = screen->info.mem_props.memoryTypes[mem_type_idx].heapIndex;
if (mai.allocationSize > screen->info.mem_props.memoryHeaps[heap].size) { if (mai.allocationSize > screen->info.mem_props.memoryHeaps[vk_heap_idx].size) {
mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[heap].size); mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[vk_heap_idx].size);
return NULL; return NULL;
} }
@ -294,13 +295,13 @@ bo_create_internal(struct zink_screen *screen,
VkResult ret = VKSCR(AllocateMemory)(screen->dev, &mai, NULL, &bo->mem); VkResult ret = VKSCR(AllocateMemory)(screen->dev, &mai, NULL, &bo->mem);
if (!zink_screen_handle_vkresult(screen, ret)) { if (!zink_screen_handle_vkresult(screen, ret)) {
mesa_loge("zink: couldn't allocate memory: heap=%u size=%" PRIu64, heap_idx, size); mesa_loge("zink: couldn't allocate memory: heap=%u size=%" PRIu64, heap, size);
goto fail; goto fail;
} }
if (init_pb_cache) { if (init_pb_cache) {
bo->u.real.use_reusable_pool = true; bo->u.real.use_reusable_pool = true;
pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap_idx); pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap);
} else { } else {
#ifdef ZINK_USE_DMABUF #ifdef ZINK_USE_DMABUF
list_inithead(&bo->u.real.exports); list_inithead(&bo->u.real.exports);
@ -314,7 +315,7 @@ bo_create_internal(struct zink_screen *screen,
bo->base.alignment_log2 = util_logbase2(alignment); bo->base.alignment_log2 = util_logbase2(alignment);
bo->base.size = mai.allocationSize; bo->base.size = mai.allocationSize;
bo->base.vtbl = &bo_vtbl; bo->base.vtbl = &bo_vtbl;
bo->base.placement = heap_idx; bo->base.placement = mem_type_idx;
bo->base.usage = flags; bo->base.usage = flags;
bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id); bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id);
@ -608,14 +609,15 @@ zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, en
unsigned low_bound = 128 * 1024 * 1024; //128MB is a very small BAR unsigned low_bound = 128 * 1024 * 1024; //128MB is a very small BAR
if (screen->info.driver_props.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY) if (screen->info.driver_props.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY)
low_bound *= 2; //nvidia has fat textures or something low_bound *= 2; //nvidia has fat textures or something
reclaim_all = screen->info.mem_props.memoryHeaps[heap_idx].size <= low_bound; unsigned vk_heap_idx = screen->info.mem_props.memoryTypes[heap_idx].heapIndex;
reclaim_all = screen->info.mem_props.memoryHeaps[vk_heap_idx].size <= low_bound;
} }
entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap_idx, reclaim_all); entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap, reclaim_all);
if (!entry) { if (!entry) {
/* Clean up buffer managers and try again. */ /* Clean up buffer managers and try again. */
clean_up_buffer_managers(screen); clean_up_buffer_managers(screen);
entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap_idx, true); entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap, true);
} }
if (!entry) if (!entry)
return NULL; return NULL;
@ -649,18 +651,18 @@ no_slab:
if (use_reusable_pool) { if (use_reusable_pool) {
/* Get a buffer from the cache. */ /* Get a buffer from the cache. */
bo = (struct zink_bo*) bo = (struct zink_bo*)
pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap_idx); pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap);
if (bo) if (bo)
return &bo->base; return &bo->base;
} }
/* Create a new one. */ /* Create a new one. */
bo = bo_create_internal(screen, size, alignment, heap_idx, flags, pNext); bo = bo_create_internal(screen, size, alignment, heap, heap_idx, flags, pNext);
if (!bo) { if (!bo) {
/* Clean up buffer managers and try again. */ /* Clean up buffer managers and try again. */
clean_up_buffer_managers(screen); clean_up_buffer_managers(screen);
bo = bo_create_internal(screen, size, alignment, heap_idx, flags, pNext); bo = bo_create_internal(screen, size, alignment, heap, heap_idx, flags, pNext);
if (!bo) if (!bo)
return NULL; return NULL;
} }
@ -1212,9 +1214,8 @@ bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_ind
} }
assert(slab_size != 0); assert(slab_size != 0);
slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size, slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size, heap,
zink_heap_from_domain_flags(screen->info.mem_props.memoryTypes[heap].propertyFlags, 0), 0, screen->heap_map[heap][0], NULL));
0, heap, NULL));
if (!slab->buffer) if (!slab->buffer)
goto fail; goto fail;

View file

@ -192,6 +192,7 @@ enum zink_resource_access {
}; };
/* zink heaps are based off of vulkan memory types, but are not a 1-to-1 mapping to vulkan memory type indices and have no direct relation to vulkan memory heaps*/
enum zink_heap { enum zink_heap {
ZINK_HEAP_DEVICE_LOCAL, ZINK_HEAP_DEVICE_LOCAL,
ZINK_HEAP_DEVICE_LOCAL_SPARSE, ZINK_HEAP_DEVICE_LOCAL_SPARSE,
@ -1256,8 +1257,8 @@ struct zink_screen {
unsigned min_alloc_size; unsigned min_alloc_size;
uint32_t next_bo_unique_id; uint32_t next_bo_unique_id;
} pb; } pb;
uint8_t heap_map[ZINK_HEAP_MAX][VK_MAX_MEMORY_TYPES]; uint8_t heap_map[ZINK_HEAP_MAX][VK_MAX_MEMORY_TYPES]; // mapping from zink heaps to memory type indices
uint8_t heap_count[ZINK_HEAP_MAX]; uint8_t heap_count[ZINK_HEAP_MAX]; // number of memory types per zink heap
bool resizable_bar; bool resizable_bar;
uint64_t total_video_mem; uint64_t total_video_mem;