zink: move queue submit thread to screen

this needs to be a screen-based queue in order to serialize properly
with multiple contexts

Acked-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11437>
This commit is contained in:
Mike Blumenkrantz 2021-05-09 12:55:39 -04:00 committed by Marge Bot
parent d4159963e3
commit 478f129ee7
5 changed files with 12 additions and 13 deletions

View file

@ -300,9 +300,6 @@ zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
if (ctx->last_fence) {
struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
batch->last_batch_usage = &last_state->usage;
} else {
if (zink_screen(ctx->base.screen)->threaded)
util_queue_init(&batch->flush_queue, "zfq", 8, 1, UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
}
if (!ctx->queries_disabled)
zink_resume_queries(ctx, batch);
@ -546,9 +543,9 @@ zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
if (screen->device_lost)
return;
if (util_queue_is_initialized(&batch->flush_queue)) {
if (screen->threaded) {
batch->state->queue = screen->thread_queue;
util_queue_add_job(&batch->flush_queue, batch->state, &batch->state->flush_completed,
util_queue_add_job(&screen->flush_queue, batch->state, &batch->state->flush_completed,
submit_queue, post_submit, 0);
} else {
batch->state->queue = screen->queue;

View file

@ -101,7 +101,6 @@ struct zink_batch {
struct zink_batch_state *state;
struct zink_batch_usage *last_batch_usage;
struct util_queue flush_queue; //TODO: move to wsi
bool has_work;
bool in_rp; //renderpass is currently active

View file

@ -90,9 +90,6 @@ zink_context_destroy(struct pipe_context *pctx)
zink_surface_reference(screen, (struct zink_surface**)&ctx->dummy_surface, NULL);
zink_buffer_view_reference(screen, &ctx->dummy_bufferview, NULL);
if (ctx->tc)
util_queue_destroy(&ctx->batch.flush_queue);
simple_mtx_destroy(&ctx->batch_mtx);
zink_clear_batch_state(ctx, ctx->batch.state);
zink_batch_state_reference(screen, &ctx->batch.state, NULL);
@ -187,7 +184,7 @@ zink_set_context_param(struct pipe_context *pctx, enum pipe_context_param param,
switch (param) {
case PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE:
util_set_thread_affinity(ctx->batch.flush_queue.threads[0],
util_set_thread_affinity(zink_screen(ctx->base.screen)->flush_queue.threads[0],
util_get_cpu_caps()->L3_affinity_mask[value],
NULL, util_get_cpu_caps()->num_cpu_mask_bits);
break;
@ -1688,7 +1685,7 @@ zink_end_render_pass(struct zink_context *ctx, struct zink_batch *batch)
static void
sync_flush(struct zink_context *ctx, struct zink_batch_state *bs)
{
if (util_queue_is_initialized(&ctx->batch.flush_queue))
if (zink_screen(ctx->base.screen)->threaded)
util_queue_fence_wait(&bs->flush_completed);
}
@ -2560,7 +2557,7 @@ zink_check_batch_completion(struct zink_context *ctx, uint32_t batch_id)
}
simple_mtx_unlock(&ctx->batch_mtx);
assert(fence);
if (util_queue_is_initialized(&ctx->batch.flush_queue) &&
if (zink_screen(ctx->base.screen)->threaded &&
!util_queue_fence_is_signalled(&zink_batch_state(fence)->flush_completed))
return false;
return zink_vkfence_wait(zink_screen(ctx->base.screen), fence, 0);
@ -3177,7 +3174,7 @@ zink_resource_commit(struct pipe_context *pctx, struct pipe_resource *pres, unsi
mem_bind.memoryOffset = box->x;
mem_bind.flags = 0;
sparse_bind.pBinds = &mem_bind;
VkQueue queue = util_queue_is_initialized(&ctx->batch.flush_queue) ? screen->thread_queue : screen->queue;
VkQueue queue = screen->threaded ? screen->thread_queue : screen->queue;
VkResult ret = vkQueueBindSparse(queue, 1, &sparse, VK_NULL_HANDLE);
if (!zink_screen_handle_vkresult(screen, ret)) {

View file

@ -1020,6 +1020,8 @@ zink_destroy_screen(struct pipe_screen *pscreen)
if (screen->prev_sem)
vkDestroySemaphore(screen->dev, screen->prev_sem, NULL);
if (screen->threaded)
util_queue_destroy(&screen->flush_queue);
vkDestroyDevice(screen->dev, NULL);
vkDestroyInstance(screen->instance, NULL);
@ -1596,6 +1598,8 @@ zink_internal_create_screen(const struct pipe_screen_config *config)
util_cpu_detect();
screen->threaded = util_get_cpu_caps()->nr_cpus > 1 && debug_get_bool_option("GALLIUM_THREAD", util_get_cpu_caps()->nr_cpus > 1);
if (screen->threaded)
util_queue_init(&screen->flush_queue, "zfq", 8, 1, UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
zink_debug = debug_get_option_zink_debug();
screen->descriptor_mode = debug_get_option_zink_descriptor_mode();

View file

@ -35,6 +35,7 @@
#include "util/disk_cache.h"
#include "util/log.h"
#include "util/simple_mtx.h"
#include "util/u_queue.h"
#include "util/u_live_shader_cache.h"
#include <vulkan/vulkan.h>
@ -72,6 +73,7 @@ struct zink_screen {
uint32_t last_finished; //this is racy but ultimately doesn't matter
VkSemaphore sem;
VkSemaphore prev_sem;
struct util_queue flush_queue;
bool device_lost;
struct sw_winsys *winsys;