zink: stop unmapping resources

it turns out there's not actually a requirement that resources be unmapped,
which means that a ton of overhead can be saved both in the unmap codepath
(the cpu overhead here is pretty insane) and then also when mapping cached
resource memory, as the map can now be added to the cache for immediate reuse

as seen in radeonsi

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9980>
This commit is contained in:
Mike Blumenkrantz 2021-01-26 18:10:13 -05:00 committed by Marge Bot
parent eab985d070
commit f19946ca6e
4 changed files with 26 additions and 31 deletions

View file

@ -125,8 +125,9 @@ cache_or_free_mem(struct zink_screen *screen, struct zink_resource_object *obj)
util_dynarray_init(array, screen->resource_mem_cache);
_mesa_hash_table_insert_pre_hashed(screen->resource_mem_cache, obj->mem_hash, mkey, array);
}
if (util_dynarray_num_elements(array, VkDeviceMemory) < 5) {
util_dynarray_append(array, VkDeviceMemory, obj->mem);
if (util_dynarray_num_elements(array, struct mem_cache_entry) < 5) {
struct mem_cache_entry mc = { obj->mem, obj->map };
util_dynarray_append(array, struct mem_cache_entry, mc);
simple_mtx_unlock(&screen->mem_cache_mtx);
return;
}
@ -138,7 +139,6 @@ cache_or_free_mem(struct zink_screen *screen, struct zink_resource_object *obj)
void
zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *obj)
{
assert(!obj->map_count);
if (obj->is_buffer) {
if (obj->sbuffer)
vkDestroyBuffer(screen->dev, obj->sbuffer, NULL);
@ -146,7 +146,6 @@ zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_ob
} else {
vkDestroyImage(screen->dev, obj->image, NULL);
}
simple_mtx_destroy(&obj->map_mtx);
zink_descriptor_set_refs_clear(&obj->desc_set_refs, obj);
cache_or_free_mem(screen, obj);
@ -363,7 +362,6 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
pipe_reference_init(&obj->reference, 1);
util_dynarray_init(&obj->desc_set_refs.refs, NULL);
simple_mtx_init(&obj->map_mtx, mtx_plain);
if (templ->target == PIPE_BUFFER) {
VkBufferCreateInfo bci = create_bci(screen, templ, templ->bind);
@ -503,8 +501,10 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(screen->resource_mem_cache, obj->mem_hash, &obj->mkey);
struct util_dynarray *array = he ? (void*)he->data : NULL;
if (array && util_dynarray_num_elements(array, VkDeviceMemory)) {
obj->mem = util_dynarray_pop(array, VkDeviceMemory);
if (array && util_dynarray_num_elements(array, struct mem_cache_entry)) {
struct mem_cache_entry mc = util_dynarray_pop(array, struct mem_cache_entry);
obj->mem = mc.mem;
obj->map = mc.map;
}
simple_mtx_unlock(&screen->mem_cache_mtx);
}
@ -776,26 +776,19 @@ get_most_recent_access(struct zink_resource *res, enum zink_resource_access flag
static void *
map_resource(struct zink_screen *screen, struct zink_resource *res)
{
simple_mtx_lock(&res->obj->map_mtx);
VkResult result = VK_SUCCESS;
if (!res->obj->map_count)
result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset,
res->obj->size, 0, &res->obj->map);
res->obj->map_count++;
simple_mtx_unlock(&res->obj->map_mtx);
if (res->obj->map)
return res->obj->map;
result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset,
res->obj->size, 0, &res->obj->map);
return result == VK_SUCCESS ? res->obj->map : NULL;
}
static void
unmap_resource(struct zink_screen *screen, struct zink_resource *res)
{
simple_mtx_lock(&res->obj->map_mtx);
res->obj->map_count--;
if (!res->obj->map_count) {
res->obj->map = NULL;
vkUnmapMemory(screen->dev, res->obj->mem);
}
simple_mtx_unlock(&res->obj->map_mtx);
res->obj->map = NULL;
vkUnmapMemory(screen->dev, res->obj->mem);
}
static void *
@ -805,6 +798,9 @@ buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigne
struct zink_screen *screen = zink_screen(ctx->base.screen);
void *ptr = NULL;
if (res->base.is_user_ptr)
usage |= PIPE_MAP_PERSISTENT;
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
@ -859,10 +855,6 @@ buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigne
(struct pipe_resource **)&trans->staging_res, (void **)&ptr);
res = zink_resource(trans->staging_res);
trans->offset = offset;
/* replacing existing map, still need to increment refcount for tracking since
* unmaps will still occur
*/
p_atomic_inc(&res->obj->map_count);
res->obj->map = ptr;
} else {
/* At this point, the buffer is always idle (we checked it above). */
@ -1091,9 +1083,7 @@ zink_transfer_unmap(struct pipe_context *pctx,
zink_transfer_flush_region(pctx, ptrans, &ptrans->box);
}
if (trans->staging_res) {
unmap_resource(screen, zink_resource(trans->staging_res));
} else
if (trans->base.b.usage & PIPE_MAP_ONCE && !trans->staging_res)
unmap_resource(screen, res);
if ((trans->base.b.usage & PIPE_MAP_PERSISTENT) && !(trans->base.b.usage & PIPE_MAP_COHERENT))
res->obj->persistent_maps--;

View file

@ -73,8 +73,6 @@ struct zink_resource_object {
struct zink_batch_usage reads;
struct zink_batch_usage writes;
simple_mtx_t map_mtx;
unsigned map_count;
void *map;
bool is_buffer;
bool host_visible;

View file

@ -903,8 +903,9 @@ static void
resource_cache_entry_destroy(struct zink_screen *screen, struct hash_entry *he)
{
struct util_dynarray *array = (void*)he->data;
util_dynarray_foreach(array, VkDeviceMemory, mem)
vkFreeMemory(screen->dev, *mem, NULL);
util_dynarray_foreach(array, struct mem_cache_entry, mc) {
vkFreeMemory(screen->dev, mc->mem, NULL);
}
util_dynarray_fini(array);
}

View file

@ -197,6 +197,12 @@ zink_screen(struct pipe_screen *pipe)
return (struct zink_screen *)pipe;
}
struct mem_cache_entry {
VkDeviceMemory mem;
void *map;
};
VkFormat
zink_get_format(struct zink_screen *screen, enum pipe_format format);