zink: clamp to 500 max batch states on nvidia

I've been advised that leaving this unclamped will use up all the fds
allotted to a process

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13844>
This commit is contained in:
Mike Blumenkrantz 2021-11-17 14:15:18 -05:00 committed by Marge Bot
parent a3be30665f
commit 35ffadb9e7
3 changed files with 12 additions and 2 deletions

View file

@ -339,8 +339,8 @@ post_submit(void *data, void *gdata, int thread_index)
if (bs->ctx->reset.reset)
bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
screen->device_lost = true;
} else if (bs->ctx->batch_states_count > 5000) {
zink_screen_batch_id_wait(screen, bs->fence.batch_id - 2500, PIPE_TIMEOUT_INFINITE);
} else if (bs->ctx->batch_states_count > screen->max_fences) {
zink_screen_batch_id_wait(screen, bs->fence.batch_id - (screen->max_fences / 2), PIPE_TIMEOUT_INFINITE);
}
}

View file

@ -2028,6 +2028,15 @@ zink_internal_create_screen(const struct pipe_screen_config *config)
if (!os_get_total_physical_memory(&screen->total_mem))
goto fail;
switch (screen->info.driver_props.driverID) {
case VK_DRIVER_ID_NVIDIA_PROPRIETARY:
screen->max_fences = 500;
break;
default:
screen->max_fences = 5000;
break;
}
if (debug_get_bool_option("ZINK_NO_TIMELINES", false))
screen->info.have_KHR_timeline_semaphore = false;
if (screen->info.have_KHR_timeline_semaphore)

View file

@ -134,6 +134,7 @@ struct zink_screen {
uint32_t gfx_queue;
uint32_t max_queues;
uint32_t timestamp_valid_bits;
unsigned max_fences;
VkDevice dev;
VkQueue queue; //gfx+compute
VkQueue thread_queue; //gfx+compute