gallium/u_threaded: Add helper to assert driver thread

Useful for drivers to add some sanity checks to avoid/detect threading
issues caused by things that might be called (indirectly) from frontend
thread.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9323>
This commit is contained in:
Rob Clark 2021-02-13 10:05:35 -08:00 committed by Marge Bot
parent d2a920ee6e
commit f2f72ec3fe
2 changed files with 67 additions and 6 deletions

View file

@ -82,10 +82,26 @@ tc_debug_check(struct threaded_context *tc)
{
for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
tc_batch_check(&tc->batch_slots[i]);
tc_assert(tc->batch_slots[i].pipe == tc->pipe);
tc_assert(tc->batch_slots[i].tc == tc);
}
}
static void
tc_set_driver_thread(struct threaded_context *tc)
{
#ifndef NDEBUG
tc->driver_thread = util_get_thread_id();
#endif
}
static void
tc_clear_driver_thread(struct threaded_context *tc)
{
#ifndef NDEBUG
memset(&tc->driver_thread, 0, sizeof(tc->driver_thread));
#endif
}
/* We don't want to read or write min_index and max_index, because
* it shouldn't be needed by drivers at this point.
*/
@ -148,10 +164,11 @@ static void
tc_batch_execute(void *job, UNUSED int thread_index)
{
struct tc_batch *batch = job;
struct pipe_context *pipe = batch->pipe;
struct pipe_context *pipe = batch->tc->pipe;
struct tc_call *last = &batch->call[batch->num_total_call_slots];
tc_batch_check(batch);
tc_set_driver_thread(batch->tc);
assert(!batch->token);
@ -210,6 +227,7 @@ tc_batch_execute(void *job, UNUSED int thread_index)
iter += iter->num_call_slots;
}
tc_clear_driver_thread(batch->tc);
tc_batch_check(batch);
batch->num_total_call_slots = 0;
}
@ -544,12 +562,18 @@ tc_get_query_result(struct pipe_context *_pipe,
struct threaded_context *tc = threaded_context(_pipe);
struct threaded_query *tq = threaded_query(query);
struct pipe_context *pipe = tc->pipe;
bool flushed = tq->flushed;
if (!tq->flushed)
if (!flushed) {
tc_sync_msg(tc, wait ? "wait" : "nowait");
tc_set_driver_thread(tc);
}
bool success = pipe->get_query_result(pipe, query, wait, result);
if (!flushed)
tc_clear_driver_thread(tc);
if (success) {
tq->flushed = true;
if (list_is_linked(&tq->head_unflushed)) {
@ -1691,12 +1715,18 @@ tc_transfer_map(struct pipe_context *_pipe,
tc_sync_msg(tc, resource->target != PIPE_BUFFER ? " texture" :
usage & PIPE_MAP_DISCARD_RANGE ? " discard_range" :
usage & PIPE_MAP_READ ? " read" : " staging conflict");
tc_set_driver_thread(tc);
}
tc->bytes_mapped_estimate += box->width;
return pipe->transfer_map(pipe, tres->latest ? tres->latest : resource,
void *ret = pipe->transfer_map(pipe, tres->latest ? tres->latest : resource,
level, usage, box, transfer);
if (!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC))
tc_clear_driver_thread(tc);
return ret;
}
struct tc_transfer_flush_region {
@ -1991,8 +2021,10 @@ tc_texture_subdata(struct pipe_context *_pipe,
struct pipe_context *pipe = tc->pipe;
tc_sync(tc);
tc_set_driver_thread(tc);
pipe->texture_subdata(pipe, resource, level, usage, box, data,
stride, layer_stride);
tc_clear_driver_thread(tc);
}
}
@ -2319,7 +2351,9 @@ out_of_memory:
if (!(flags & PIPE_FLUSH_DEFERRED))
tc_flush_queries(tc);
tc_set_driver_thread(tc);
pipe->flush(pipe, fence, flags);
tc_clear_driver_thread(tc);
}
static void
@ -3034,7 +3068,7 @@ threaded_context_create(struct pipe_context *pipe,
for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
tc->batch_slots[i].sentinel = TC_SENTINEL;
tc->batch_slots[i].pipe = pipe;
tc->batch_slots[i].tc = tc;
util_queue_fence_init(&tc->batch_slots[i].fence);
}

View file

@ -181,11 +181,13 @@
#ifndef U_THREADED_CONTEXT_H
#define U_THREADED_CONTEXT_H
#include "c11/threads.h"
#include "pipe/p_context.h"
#include "pipe/p_state.h"
#include "util/u_inlines.h"
#include "util/u_queue.h"
#include "util/u_range.h"
#include "util/u_thread.h"
#include "util/slab.h"
struct threaded_context;
@ -341,7 +343,7 @@ struct tc_unflushed_batch_token {
};
struct tc_batch {
struct pipe_context *pipe;
struct threaded_context *tc;
unsigned sentinel;
unsigned num_total_call_slots;
struct tc_unflushed_batch_token *token;
@ -376,6 +378,15 @@ struct threaded_context {
struct util_queue queue;
struct util_queue_fence *fence;
#ifndef NDEBUG
/**
* The driver thread is normally the queue thread, but
* there are cases where the queue is flushed directly
* from the frontend thread
*/
thread_id driver_thread;
#endif
unsigned last, next;
struct tc_batch batch_slots[TC_MAX_BATCHES];
};
@ -435,4 +446,20 @@ tc_unflushed_batch_token_reference(struct tc_unflushed_batch_token **dst,
*dst = src;
}
/**
* Helper for !NDEBUG builds to assert that it is called from driver
* thread. This is to help drivers ensure that various code-paths
* are not hit indirectly from pipe entry points that are called from
* front-end/state-tracker thread.
*/
static inline void
tc_assert_driver_thread(struct threaded_context *tc)
{
if (!tc)
return;
#ifndef NDEBUG
assert(util_thread_id_equal(tc->driver_thread, util_get_thread_id()));
#endif
}
#endif