zink: even better handling for descriptor oom

in addition to ensuring that all our batches stay under the max size by cycling
them whenever we get too many active descriptors going, we now do per-program
descriptor pools, so we can do some limiting there as well to ensure that we
aren't letting any one program hog all the resources

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9348>
This commit is contained in:
Mike Blumenkrantz 2020-09-16 15:38:38 -04:00 committed by Marge Bot
parent 4ec1d5e70c
commit d21e88719a
5 changed files with 30 additions and 52 deletions

View file

@ -48,7 +48,6 @@ struct zink_batch {
VkCommandPool cmdpool;
VkCommandBuffer cmdbuf;
unsigned short max_descs; //set if the device gives oom when allocating a new desc set
unsigned short descs_used; //number of descriptors currently allocated
struct zink_fence *fence;

View file

@ -1795,7 +1795,6 @@ init_batch(struct zink_context *ctx, struct zink_batch *batch, unsigned idx)
if (!batch->fence)
return false;
batch->max_descs = 1500;
return true;
}

View file

@ -223,6 +223,21 @@ get_gfx_program(struct zink_context *ctx)
return ctx->curr_program;
}
static struct zink_descriptor_set *
get_descriptor_set(struct zink_context *ctx, bool is_compute)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_batch *batch = is_compute ? &ctx->compute_batch : zink_curr_batch(ctx);
unsigned num_descriptors = pg->num_descriptors;
/* if we're about to exceed our limit, flush until we're back under */
while (batch->descs_used + num_descriptors >= ZINK_BATCH_DESC_SIZE) {
batch = zink_flush_batch(ctx, batch);
}
zink_batch_reference_program(batch, pg);
return zink_program_allocate_desc_set(ctx, batch, pg);
}
struct zink_transition {
struct zink_resource *res;
VkImageLayout layout;
@ -483,55 +498,10 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
}
}
unsigned num_descriptors;
if (is_compute) {
num_descriptors = ctx->curr_compute->base.num_descriptors;
batch = &ctx->compute_batch;
} else {
batch = zink_curr_batch(ctx);
num_descriptors = ctx->curr_program->base.num_descriptors;
}
if (batch->descs_used + num_descriptors >= batch->max_descs) {
batch->descs_used += num_descriptors;
if (is_compute)
zink_wait_on_batch(ctx, ZINK_COMPUTE_BATCH_ID);
else {
ctx->base.flush(&ctx->base, NULL, 0);
batch = zink_curr_batch(ctx);
}
}
struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
zink_batch_reference_program(batch, pg);
assert(pg->num_descriptors == num_descriptors);
struct zink_descriptor_set *zds = zink_program_allocate_desc_set(screen, batch, pg);
/* probably oom, so we need to stall until we free up some descriptors */
if (!zds) {
/* update our max descriptor count so we can try and avoid this happening again */
unsigned short max_descs = 0;
for (int i = 0; i < ZINK_COMPUTE_BATCH_ID; i++)
max_descs += ctx->batches[i].descs_used;
if (ctx->compute_batch.descs_used) {
max_descs += ctx->compute_batch.descs_used;
/* try to split evenly between number of batches */
max_descs /= ZINK_COMPUTE_BATCH_ID;
}
for (int i = 0; i < ZINK_COMPUTE_BATCH_ID; i++)
ctx->batches[i].max_descs = MIN2(max_descs, ctx->batches[i].max_descs);
ctx->compute_batch.max_descs = MIN2(max_descs, ctx->compute_batch.max_descs);
zink_wait_on_batch(ctx, batch->batch_id);
if (!is_compute) {
batch = zink_curr_batch(ctx);
for (int i = 0; i < ZINK_COMPUTE_BATCH_ID; i++) {
zink_reset_batch(ctx, &ctx->batches[i]);
}
}
zink_batch_reference_program(batch, pg);
zds = zink_program_allocate_desc_set(screen, batch, pg);
}
struct zink_descriptor_set *zds = get_descriptor_set(ctx, is_compute);
assert(zds != VK_NULL_HANDLE);
batch = is_compute ? &ctx->compute_batch : zink_curr_batch(ctx);
unsigned check_flush_id = is_compute ? 0 : ZINK_COMPUTE_BATCH_ID;
bool need_flush = false;
@ -553,9 +523,11 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
if (is_compute)
vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_COMPUTE,
ctx->curr_compute->layout, 0, 1, &zds->desc_set, 0, NULL);
else
else {
batch = zink_batch_rp(ctx);
vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
ctx->curr_program->layout, 0, 1, &zds->desc_set, 0, NULL);
}
for (int i = 0; i < num_stages; i++) {
struct zink_shader *shader = stages[i];

View file

@ -675,11 +675,12 @@ fail:
}
struct zink_descriptor_set *
zink_program_allocate_desc_set(struct zink_screen *screen,
zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_batch *batch,
struct zink_program *pg)
{
struct zink_descriptor_set *zds;
struct zink_screen *screen = zink_screen(ctx->base.screen);
if (util_dynarray_num_elements(&pg->alloc_desc_sets, struct zink_descriptor_set *)) {
/* grab one off the allocated array */
@ -687,6 +688,12 @@ zink_program_allocate_desc_set(struct zink_screen *screen,
goto out;
}
unsigned descs_used = pg->descs_used;
if (descs_used + pg->num_descriptors > ZINK_DEFAULT_MAX_DESCS) {
batch = zink_flush_batch(ctx, batch);
return zink_program_allocate_desc_set(ctx, batch, pg);
}
VkDescriptorSetAllocateInfo dsai;
memset((void *)&dsai, 0, sizeof(dsai));
dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
@ -704,6 +711,7 @@ zink_program_allocate_desc_set(struct zink_screen *screen,
assert(zds);
pipe_reference_init(&zds->reference, 1);
zds->desc_set = desc_set;
pg->descs_used++;
out:
if (zink_batch_add_desc_set(batch, pg, zds))
batch->descs_used += pg->num_descriptors;

View file

@ -170,7 +170,7 @@ void
zink_program_invalidate_desc_set(struct zink_program *pg, struct zink_descriptor_set *zds);
struct zink_descriptor_set *
zink_program_allocate_desc_set(struct zink_screen *screen,
zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_batch *batch,
struct zink_program *pg);
#endif