zink: allocate/place memory using memoryTypeIndex directly

it's possible for drivers to have multiple heaps with identical flags,
so this will enable passing the heap that should actually be used for
allocation

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19281>
This commit is contained in:
Mike Blumenkrantz 2022-10-24 13:26:31 -04:00 committed by Marge Bot
parent a3552048c7
commit f9515d9376
3 changed files with 24 additions and 23 deletions

View file

@ -246,7 +246,7 @@ static struct zink_bo *
bo_create_internal(struct zink_screen *screen, bo_create_internal(struct zink_screen *screen,
uint64_t size, uint64_t size,
unsigned alignment, unsigned alignment,
enum zink_heap heap, unsigned heap_idx,
unsigned flags, unsigned flags,
const void *pNext) const void *pNext)
{ {
@ -272,14 +272,14 @@ bo_create_internal(struct zink_screen *screen,
else else
mai.pNext = pNext; mai.pNext = pNext;
mai.allocationSize = size; mai.allocationSize = size;
mai.memoryTypeIndex = screen->heap_map[heap]; mai.memoryTypeIndex = heap_idx;
if (screen->info.mem_props.memoryTypes[mai.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) { if (screen->info.mem_props.memoryTypes[mai.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment); alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
mai.allocationSize = align64(mai.allocationSize, screen->info.props.limits.minMemoryMapAlignment); mai.allocationSize = align64(mai.allocationSize, screen->info.props.limits.minMemoryMapAlignment);
} }
unsigned heap_idx = screen->info.mem_props.memoryTypes[screen->heap_map[heap]].heapIndex; unsigned heap = screen->info.mem_props.memoryTypes[heap_idx].heapIndex;
if (mai.allocationSize > screen->info.mem_props.memoryHeaps[heap_idx].size) { if (mai.allocationSize > screen->info.mem_props.memoryHeaps[heap].size) {
mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[heap_idx].size); mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[heap].size);
return NULL; return NULL;
} }
@ -294,13 +294,13 @@ bo_create_internal(struct zink_screen *screen,
VkResult ret = VKSCR(AllocateMemory)(screen->dev, &mai, NULL, &bo->mem); VkResult ret = VKSCR(AllocateMemory)(screen->dev, &mai, NULL, &bo->mem);
if (!zink_screen_handle_vkresult(screen, ret)) { if (!zink_screen_handle_vkresult(screen, ret)) {
mesa_loge("zink: couldn't allocate memory: heap=%u size=%" PRIu64, heap, size); mesa_loge("zink: couldn't allocate memory: heap=%u size=%" PRIu64, heap_idx, size);
goto fail; goto fail;
} }
if (init_pb_cache) { if (init_pb_cache) {
bo->u.real.use_reusable_pool = true; bo->u.real.use_reusable_pool = true;
pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap); pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap_idx);
} else { } else {
#ifdef ZINK_USE_DMABUF #ifdef ZINK_USE_DMABUF
list_inithead(&bo->u.real.exports); list_inithead(&bo->u.real.exports);
@ -314,7 +314,7 @@ bo_create_internal(struct zink_screen *screen,
bo->base.alignment_log2 = util_logbase2(alignment); bo->base.alignment_log2 = util_logbase2(alignment);
bo->base.size = mai.allocationSize; bo->base.size = mai.allocationSize;
bo->base.vtbl = &bo_vtbl; bo->base.vtbl = &bo_vtbl;
bo->base.placement = screen->heap_flags[heap]; bo->base.placement = heap_idx;
bo->base.usage = flags; bo->base.usage = flags;
bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id); bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id);
@ -381,7 +381,7 @@ sparse_backing_alloc(struct zink_screen *screen, struct zink_bo *bo,
size = MAX2(size, ZINK_SPARSE_BUFFER_PAGE_SIZE); size = MAX2(size, ZINK_SPARSE_BUFFER_PAGE_SIZE);
buf = zink_bo_create(screen, size, ZINK_SPARSE_BUFFER_PAGE_SIZE, buf = zink_bo_create(screen, size, ZINK_SPARSE_BUFFER_PAGE_SIZE,
ZINK_HEAP_DEVICE_LOCAL, 0, NULL); ZINK_HEAP_DEVICE_LOCAL, 0, screen->heap_map[ZINK_HEAP_DEVICE_LOCAL], NULL);
if (!buf) { if (!buf) {
FREE(best_backing->chunks); FREE(best_backing->chunks);
FREE(best_backing); FREE(best_backing);
@ -559,7 +559,7 @@ error_alloc_commitments:
} }
struct pb_buffer * struct pb_buffer *
zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext) zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, unsigned heap_idx, const void *pNext)
{ {
struct zink_bo *bo; struct zink_bo *bo;
/* pull in sparse flag */ /* pull in sparse flag */
@ -608,15 +608,14 @@ zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, en
unsigned low_bound = 128 * 1024 * 1024; //128MB is a very small BAR unsigned low_bound = 128 * 1024 * 1024; //128MB is a very small BAR
if (screen->info.driver_props.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY) if (screen->info.driver_props.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY)
low_bound *= 2; //nvidia has fat textures or something low_bound *= 2; //nvidia has fat textures or something
unsigned heapidx = screen->info.mem_props.memoryTypes[screen->heap_map[heap]].heapIndex; reclaim_all = screen->info.mem_props.memoryHeaps[heap_idx].size <= low_bound;
reclaim_all = screen->info.mem_props.memoryHeaps[heapidx].size <= low_bound;
} }
entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap, reclaim_all); entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap_idx, reclaim_all);
if (!entry) { if (!entry) {
/* Clean up buffer managers and try again. */ /* Clean up buffer managers and try again. */
clean_up_buffer_managers(screen); clean_up_buffer_managers(screen);
entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap, true); entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap_idx, true);
} }
if (!entry) if (!entry)
return NULL; return NULL;
@ -650,18 +649,18 @@ no_slab:
if (use_reusable_pool) { if (use_reusable_pool) {
/* Get a buffer from the cache. */ /* Get a buffer from the cache. */
bo = (struct zink_bo*) bo = (struct zink_bo*)
pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap); pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap_idx);
if (bo) if (bo)
return &bo->base; return &bo->base;
} }
/* Create a new one. */ /* Create a new one. */
bo = bo_create_internal(screen, size, alignment, heap, flags, pNext); bo = bo_create_internal(screen, size, alignment, heap_idx, flags, pNext);
if (!bo) { if (!bo) {
/* Clean up buffer managers and try again. */ /* Clean up buffer managers and try again. */
clean_up_buffer_managers(screen); clean_up_buffer_managers(screen);
bo = bo_create_internal(screen, size, alignment, heap, flags, pNext); bo = bo_create_internal(screen, size, alignment, heap_idx, flags, pNext);
if (!bo) if (!bo)
return NULL; return NULL;
} }
@ -1213,7 +1212,9 @@ bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size, unsigned group_ind
} }
assert(slab_size != 0); assert(slab_size != 0);
slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size, heap, 0, NULL)); slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size,
zink_heap_from_domain_flags(screen->info.mem_props.memoryTypes[heap].propertyFlags, 0),
0, heap, NULL));
if (!slab->buffer) if (!slab->buffer)
goto fail; goto fail;

View file

@ -100,7 +100,7 @@ void
zink_bo_deinit(struct zink_screen *screen); zink_bo_deinit(struct zink_screen *screen);
struct pb_buffer * struct pb_buffer *
zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext); zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, unsigned heap_idx, const void *pNext);
bool bool
zink_bo_get_kms_handle(struct zink_screen *screen, struct zink_bo *bo, int fd, uint32_t *handle); zink_bo_get_kms_handle(struct zink_screen *screen, struct zink_bo *bo, int fd, uint32_t *handle);

View file

@ -1035,7 +1035,7 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment); alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
obj->alignment = alignment; obj->alignment = alignment;
retry: retry:
obj->bo = zink_bo(zink_bo_create(screen, reqs.size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.pNext)); obj->bo = zink_bo(zink_bo_create(screen, reqs.size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.memoryTypeIndex, mai.pNext));
if (!obj->bo) { if (!obj->bo) {
if (heap == ZINK_HEAP_DEVICE_LOCAL_VISIBLE) { if (heap == ZINK_HEAP_DEVICE_LOCAL_VISIBLE) {
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC) if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
@ -1053,9 +1053,9 @@ retry:
obj->size = zink_bo_get_size(obj->bo); obj->size = zink_bo_get_size(obj->bo);
} }
obj->coherent = obj->bo->base.placement & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; obj->coherent = screen->info.mem_props.memoryTypes[obj->bo->base.placement].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE)) { if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE)) {
obj->host_visible = obj->bo->base.placement & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; obj->host_visible = screen->info.mem_props.memoryTypes[obj->bo->base.placement].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
} }
if (templ->target == PIPE_BUFFER) { if (templ->target == PIPE_BUFFER) {
@ -1854,7 +1854,7 @@ zink_buffer_map(struct pipe_context *pctx,
usage |= PIPE_MAP_UNSYNCHRONIZED; usage |= PIPE_MAP_UNSYNCHRONIZED;
} else if (!(usage & PIPE_MAP_UNSYNCHRONIZED) && } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
(((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) && (((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) &&
((res->obj->bo->base.placement & VK_STAGING_RAM) != VK_STAGING_RAM)) || ((screen->info.mem_props.memoryTypes[res->obj->bo->base.placement].propertyFlags & VK_STAGING_RAM) != VK_STAGING_RAM)) ||
!res->obj->host_visible)) { !res->obj->host_visible)) {
assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE))); assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE)));
if (!res->obj->host_visible || !(usage & PIPE_MAP_ONCE)) { if (!res->obj->host_visible || !(usage & PIPE_MAP_ONCE)) {