zink: change descriptor flushing to assert

there should never be flushing due to pool depletion; instead, trigger an
oom flush and stall to replenish the pool after the draw/compute

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12012>
This commit is contained in:
Mike Blumenkrantz 2021-05-21 16:54:46 -04:00 committed by Marge Bot
parent 8ba44103b3
commit 4bd6a0299b
5 changed files with 21 additions and 53 deletions

View file

@ -806,15 +806,13 @@ skip_hash_tables:
}
}
if (pool->num_sets_allocated == ZINK_DEFAULT_MAX_DESCS) {
simple_mtx_unlock(&pool->mtx);
zink_fence_wait(&ctx->base);
zink_batch_reference_program(batch, pg);
return zink_descriptor_set_get(ctx, type, is_compute, cache_hit);
}
assert(pool->num_sets_allocated < ZINK_DEFAULT_MAX_DESCS);
zds = allocate_desc_set(ctx, pg, type, descs_used, is_compute);
out:
if (unlikely(pool->num_sets_allocated >= ZINK_DEFAULT_DESC_CLAMP &&
_mesa_hash_table_num_entries(pool->free_desc_sets) < ZINK_DEFAULT_MAX_DESCS - ZINK_DEFAULT_DESC_CLAMP))
ctx->oom_flush = ctx->oom_stall = true;
zds->hash = hash;
populate_zds_key(ctx, type, is_compute, &zds->key, pg->dd->push_usage);
zds->recycled = false;
@ -1326,11 +1324,10 @@ update_descriptors_internal(struct zink_context *ctx, struct zink_descriptor_set
static void
zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg);
bool
void
zink_descriptors_update(struct zink_context *ctx, bool is_compute)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_batch_state *bs = ctx->batch.state;
zink_context_update_descriptor_states(ctx, pg);
bool cache_hit[ZINK_DESCRIPTOR_TYPES + 1];
@ -1379,7 +1376,6 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
pg->layout, 0, pg->num_dsl, sets,
dynamic_offset_idx, dynamic_offsets);
ctx->dd->pg[is_compute] = pg;
return bs != batch->state;
}
void

View file

@ -227,7 +227,7 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg);
void
zink_descriptor_program_deinit(struct zink_screen *screen, struct zink_program *pg);
bool
void
zink_descriptors_update(struct zink_context *ctx, bool is_compute);
@ -261,7 +261,7 @@ zink_descriptor_program_init_lazy(struct zink_context *ctx, struct zink_program
void
zink_descriptor_program_deinit_lazy(struct zink_screen *screen, struct zink_program *pg);
bool
void
zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute);

View file

@ -414,23 +414,15 @@ populate_sets(struct zink_context *ctx, struct zink_program *pg, uint8_t *change
return false;
} else
sets[0] = VK_NULL_HANDLE;
/* may have flushed */
if (bs != ctx->batch.state)
*changed_sets = pg->dd->binding_usage;
/* no flushing allowed */
assert(ctx->batch.state == bs);
bs = ctx->batch.state;
u_foreach_bit(type, *changed_sets) {
if (pg->dd->layout_key[type]) {
struct zink_descriptor_pool *pool = get_descriptor_pool_lazy(ctx, pg, type, bs, pg->is_compute);
sets[type + 1] = get_descriptor_set_lazy(ctx, pg, type, pool, pg->is_compute);
if (ctx->batch.state != bs && (sets[0] || type != ffs(*changed_sets))) {
/* sets are allocated by batch state, so if flush occurs on anything
* but the first set that has been fetched here, get all new sets
*/
*changed_sets = pg->dd->binding_usage;
if (pg->dd->push_usage)
need_push = true;
return populate_sets(ctx, pg, changed_sets, need_push, sets);
}
/* no flushing allowed */
assert(ctx->batch.state == bs);
} else
sets[type + 1] = ctx->dd->dummy_set;
if (!sets[type + 1])
@ -446,14 +438,13 @@ zink_descriptor_set_update_lazy(struct zink_context *ctx, struct zink_program *p
screen->vk.UpdateDescriptorSetWithTemplate(screen->dev, set, pg->dd->layouts[type + 1]->desc_template, ctx);
}
bool
void
zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch *batch = &ctx->batch;
struct zink_batch_state *bs = ctx->batch.state;
struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
bool ret = false;
bool batch_changed = !bdd_lazy(bs)->pg[is_compute];
if (batch_changed) {
@ -485,16 +476,10 @@ zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
(dd_lazy(ctx)->push_state_changed[is_compute] || batch_changed);
if (!populate_sets(ctx, pg, &changed_sets, need_push, desc_sets)) {
debug_printf("ZINK: couldn't get descriptor sets!\n");
return false;
}
if (ctx->batch.state != bs) {
/* recheck: populate may have overflowed the pool and triggered a flush */
batch_changed = true;
ret = true;
dd_lazy(ctx)->state_changed[is_compute] = pg->dd->binding_usage;
changed_sets = pg->dd->binding_usage & dd_lazy(ctx)->state_changed[is_compute];
dd_lazy(ctx)->push_state_changed[is_compute] = !!pg->dd->push_usage;
return;
}
/* no flushing allowed */
assert(ctx->batch.state == bs);
bs = ctx->batch.state;
if (pg->dd->binding_usage && changed_sets) {
@ -534,7 +519,6 @@ zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
/* set again in case of flushing */
bdd_lazy(bs)->pg[is_compute] = pg;
ctx->dd->pg[is_compute] = pg;
return ret;
}
void

View file

@ -455,15 +455,6 @@ zink_draw_vbo(struct pipe_context *pctx,
ctx->gfx_prim_mode = mode;
update_gfx_program(ctx);
if (zink_program_has_descriptors(&ctx->curr_program->base)) {
if (screen->descriptors_update(ctx, false)) {
/* descriptors have flushed the batch */
zink_select_draw_vbo(ctx);
pctx->draw_vbo(pctx, dinfo, drawid_offset, dindirect, draws, num_draws);
return;
}
}
if (ctx->gfx_pipeline_state.primitive_restart != dinfo->primitive_restart)
ctx->gfx_pipeline_state.dirty = true;
ctx->gfx_pipeline_state.primitive_restart = dinfo->primitive_restart;
@ -493,6 +484,9 @@ zink_draw_vbo(struct pipe_context *pctx,
vkCmdBindIndexBuffer(batch->state->cmdbuf, res->obj->buffer, index_offset, index_type[index_size >> 1]);
}
if (zink_program_has_descriptors(&ctx->curr_program->base))
screen->descriptors_update(ctx, false);
bool have_streamout = !!ctx->num_so_targets;
if (have_streamout) {
if (ctx->xfb_barrier)
@ -781,14 +775,8 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
update_compute_program(ctx);
if (zink_program_has_descriptors(&ctx->curr_compute->base)) {
if (screen->descriptors_update(ctx, true)) {
/* descriptors have flushed the batch */
zink_select_launch_grid(ctx);
pctx->launch_grid(pctx, info);
return;
}
}
if (zink_program_has_descriptors(&ctx->curr_compute->base))
screen->descriptors_update(ctx, true);
zink_program_update_compute_pipeline_state(ctx, ctx->curr_compute, info->block);
VkPipeline prev_pipeline = ctx->compute_pipeline_state.pipeline;

View file

@ -136,7 +136,7 @@ struct zink_screen {
bool (*descriptor_program_init)(struct zink_context *ctx, struct zink_program *pg);
void (*descriptor_program_deinit)(struct zink_screen *screen, struct zink_program *pg);
bool (*descriptors_update)(struct zink_context *ctx, bool is_compute);
void (*descriptors_update)(struct zink_context *ctx, bool is_compute);
void (*context_update_descriptor_states)(struct zink_context *ctx, bool is_compute);
void (*context_invalidate_descriptor_state)(struct zink_context *ctx, enum pipe_shader_type shader,
enum zink_descriptor_type type,