zink: move batch usage to substruct on zink_bo objects

no functional changes

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23035>
This commit is contained in:
Mike Blumenkrantz 2023-05-12 12:25:41 -04:00 committed by Marge Bot
parent 143da5f2e4
commit 5e1943db7f
7 changed files with 34 additions and 30 deletions

View file

@ -58,8 +58,8 @@ reset_obj(struct zink_screen *screen, struct zink_batch_state *bs, struct zink_r
/* prune all existing views */
obj->view_prune_count = util_dynarray_num_elements(&obj->views, VkBufferView);
/* prune them when the views will definitely not be in use */
obj->view_prune_timeline = MAX2(obj->bo->reads ? obj->bo->reads->usage : 0,
obj->bo->writes ? obj->bo->writes->usage : 0);
obj->view_prune_timeline = MAX2(obj->bo->reads.u ? obj->bo->reads.u->usage : 0,
obj->bo->writes.u ? obj->bo->writes.u->usage : 0);
}
simple_mtx_unlock(&obj->view_lock);
}

View file

@ -157,7 +157,7 @@ bo_can_reclaim(struct zink_screen *screen, struct pb_buffer *pbuf)
{
struct zink_bo *bo = zink_bo(pbuf);
return zink_screen_usage_check_completion(screen, bo->reads) && zink_screen_usage_check_completion(screen, bo->writes);
return zink_screen_usage_check_completion(screen, bo->reads.u) && zink_screen_usage_check_completion(screen, bo->writes.u);
}
static bool
@ -227,8 +227,8 @@ bo_destroy_or_cache(struct zink_screen *screen, struct pb_buffer *pbuf)
struct zink_bo *bo = zink_bo(pbuf);
assert(bo->mem); /* slab buffers have a separate vtbl */
bo->reads = NULL;
bo->writes = NULL;
bo->reads.u = NULL;
bo->writes.u = NULL;
if (bo->u.real.use_reusable_pool)
pb_cache_add_buffer(bo->cache_entry);

View file

@ -145,30 +145,30 @@ zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned l
static inline bool
zink_bo_has_unflushed_usage(const struct zink_bo *bo)
{
return zink_batch_usage_is_unflushed(bo->reads) ||
zink_batch_usage_is_unflushed(bo->writes);
return zink_batch_usage_is_unflushed(bo->reads.u) ||
zink_batch_usage_is_unflushed(bo->writes.u);
}
static inline bool
zink_bo_has_usage(const struct zink_bo *bo)
{
return zink_batch_usage_exists(bo->reads) ||
zink_batch_usage_exists(bo->writes);
return zink_batch_usage_exists(bo->reads.u) ||
zink_batch_usage_exists(bo->writes.u);
}
static inline bool
zink_bo_usage_matches(const struct zink_bo *bo, const struct zink_batch_state *bs)
{
return zink_batch_usage_matches(bo->reads, bs) ||
zink_batch_usage_matches(bo->writes, bs);
return zink_batch_usage_matches(bo->reads.u, bs) ||
zink_batch_usage_matches(bo->writes.u, bs);
}
static inline bool
zink_bo_usage_check_completion(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
{
if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads))
if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, bo->reads.u))
return false;
if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes))
if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, bo->writes.u))
return false;
return true;
}
@ -176,9 +176,9 @@ zink_bo_usage_check_completion(struct zink_screen *screen, struct zink_bo *bo, e
static inline bool
zink_bo_usage_check_completion_fast(struct zink_screen *screen, struct zink_bo *bo, enum zink_resource_access access)
{
if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion_fast(screen, bo->reads))
if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion_fast(screen, bo->reads.u))
return false;
if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion_fast(screen, bo->writes))
if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion_fast(screen, bo->writes.u))
return false;
return true;
}
@ -187,35 +187,35 @@ static inline void
zink_bo_usage_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
{
if (access & ZINK_RESOURCE_ACCESS_READ)
zink_batch_usage_wait(ctx, bo->reads);
zink_batch_usage_wait(ctx, bo->reads.u);
if (access & ZINK_RESOURCE_ACCESS_WRITE)
zink_batch_usage_wait(ctx, bo->writes);
zink_batch_usage_wait(ctx, bo->writes.u);
}
static inline void
zink_bo_usage_try_wait(struct zink_context *ctx, struct zink_bo *bo, enum zink_resource_access access)
{
if (access & ZINK_RESOURCE_ACCESS_READ)
zink_batch_usage_try_wait(ctx, bo->reads);
zink_batch_usage_try_wait(ctx, bo->reads.u);
if (access & ZINK_RESOURCE_ACCESS_WRITE)
zink_batch_usage_try_wait(ctx, bo->writes);
zink_batch_usage_try_wait(ctx, bo->writes.u);
}
static inline void
zink_bo_usage_set(struct zink_bo *bo, struct zink_batch_state *bs, bool write)
{
if (write)
zink_batch_usage_set(&bo->writes, bs);
zink_batch_usage_set(&bo->writes.u, bs);
else
zink_batch_usage_set(&bo->reads, bs);
zink_batch_usage_set(&bo->reads.u, bs);
}
static inline bool
zink_bo_usage_unset(struct zink_bo *bo, struct zink_batch_state *bs)
{
zink_batch_usage_unset(&bo->reads, bs);
zink_batch_usage_unset(&bo->writes, bs);
return bo->reads || bo->writes;
zink_batch_usage_unset(&bo->reads.u, bs);
zink_batch_usage_unset(&bo->writes.u, bs);
return bo->reads.u || bo->writes.u;
}

View file

@ -102,7 +102,7 @@ check_resource_for_batch_ref(struct zink_context *ctx, struct zink_resource *res
* TODO: somehow fix this for perf because it's an extra hash lookup
*/
if (!res->obj->dt && zink_resource_has_usage(res))
zink_batch_reference_resource_rw(&ctx->batch, res, !!res->obj->bo->writes);
zink_batch_reference_resource_rw(&ctx->batch, res, !!res->obj->bo->writes.u);
else
zink_batch_reference_resource(&ctx->batch, res);
}

View file

@ -113,7 +113,7 @@ zink_resource_usage_is_unflushed(const struct zink_resource *res)
static inline bool
zink_resource_usage_is_unflushed_write(const struct zink_resource *res)
{
return zink_batch_usage_is_unflushed(res->obj->bo->writes);
return zink_batch_usage_is_unflushed(res->obj->bo->writes.u);
}

View file

@ -258,10 +258,10 @@ unordered_res_exec(const struct zink_context *ctx, const struct zink_resource *r
if (res->obj->unordered_read && res->obj->unordered_write)
return true;
/* if testing write access but have any ordered read access, cannot promote */
if (is_write && zink_batch_usage_matches(res->obj->bo->reads, ctx->batch.state) && !res->obj->unordered_read)
if (is_write && zink_batch_usage_matches(res->obj->bo->reads.u, ctx->batch.state) && !res->obj->unordered_read)
return false;
/* if write access is unordered or nonexistent, always promote */
return res->obj->unordered_write || !zink_batch_usage_matches(res->obj->bo->writes, ctx->batch.state);
return res->obj->unordered_write || !zink_batch_usage_matches(res->obj->bo->writes.u, ctx->batch.state);
}
VkCommandBuffer

View file

@ -549,6 +549,10 @@ struct zink_batch_usage {
bool unflushed;
};
struct zink_bo_usage {
struct zink_batch_usage *u;
};
struct zink_batch_obj_list {
unsigned max_buffers;
unsigned num_buffers;
@ -682,8 +686,8 @@ struct zink_bo {
simple_mtx_t lock;
struct zink_batch_usage *reads;
struct zink_batch_usage *writes;
struct zink_bo_usage reads;
struct zink_bo_usage writes;
struct pb_cache_entry cache_entry[];
};