diff --git a/src/gallium/drivers/freedreno/a4xx/fd4_query.c b/src/gallium/drivers/freedreno/a4xx/fd4_query.c index 73a5af1268c..1f379167f2a 100644 --- a/src/gallium/drivers/freedreno/a4xx/fd4_query.c +++ b/src/gallium/drivers/freedreno/a4xx/fd4_query.c @@ -115,10 +115,11 @@ time_elapsed_enable(struct fd_context *ctx, struct fd_ringbuffer *ring) * just hard coded. If we start exposing more countables than we * have counters, we will need to be more clever. */ - struct fd_batch *batch = fd_context_batch(ctx); + struct fd_batch *batch = fd_context_batch_locked(ctx); fd_wfi(batch, ring); OUT_PKT0(ring, REG_A4XX_CP_PERFCTR_CP_SEL_0, 1); OUT_RING(ring, CP_ALWAYS_COUNT); + fd_batch_unlock_submit(batch); fd_batch_reference(&batch, NULL); } diff --git a/src/gallium/drivers/freedreno/a6xx/fd6_blitter.c b/src/gallium/drivers/freedreno/a6xx/fd6_blitter.c index c1019dc114a..e60de0bb46c 100644 --- a/src/gallium/drivers/freedreno/a6xx/fd6_blitter.c +++ b/src/gallium/drivers/freedreno/a6xx/fd6_blitter.c @@ -808,6 +808,9 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info) fd_screen_unlock(ctx->screen); + bool ret = fd_batch_lock_submit(batch); + assert(ret); + /* Clearing last_fence must come after the batch dependency tracking * (resource_read()/resource_write()), as that can trigger a flush, * re-populating last_fence @@ -841,6 +844,8 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info) fd6_event_write(batch, batch->draw, CACHE_FLUSH_TS, true); fd6_cache_inv(batch, batch->draw); + fd_batch_unlock_submit(batch); + fd_resource(info->dst.resource)->valid = true; batch->needs_flush = true; diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c b/src/gallium/drivers/freedreno/freedreno_batch.c index fadd4f32816..d9017b5c73a 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.c +++ b/src/gallium/drivers/freedreno/freedreno_batch.c @@ -127,6 +127,8 @@ fd_batch_create(struct fd_context *ctx, bool nondraw) batch->ctx = ctx; batch->nondraw = nondraw; + simple_mtx_init(&batch->submit_lock, mtx_plain); + batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal); @@ -294,6 +296,9 @@ __fd_batch_destroy(struct fd_batch *batch) util_copy_framebuffer_state(&batch->framebuffer, NULL); batch_fini(batch); + + simple_mtx_destroy(&batch->submit_lock); + free(batch); fd_screen_lock(ctx->screen); } @@ -319,7 +324,7 @@ batch_flush(struct fd_batch *batch) { DBG("%p: needs_flush=%d", batch, batch->needs_flush); - if (batch->flushed) + if (!fd_batch_lock_submit(batch)) return; batch->needs_flush = false; @@ -332,6 +337,8 @@ batch_flush(struct fd_batch *batch) batch_flush_reset_dependencies(batch, true); batch->flushed = true; + if (batch == batch->ctx->batch) + fd_batch_reference(&batch->ctx->batch, NULL); fd_fence_ref(&batch->ctx->last_fence, batch->fence); @@ -341,8 +348,14 @@ batch_flush(struct fd_batch *batch) debug_assert(batch->reference.count > 0); fd_screen_lock(batch->ctx->screen); + /* NOTE: remove=false removes the patch from the hashtable, so future + * lookups won't cache-hit a flushed batch, but leaves the weak reference + * to the batch to avoid having multiple batches with same batch->idx, as + * that causes all sorts of hilarity. + */ fd_bc_invalidate_batch(batch, false); fd_screen_unlock(batch->ctx->screen); + fd_batch_unlock_submit(batch); } /* NOTE: could drop the last ref to batch @@ -357,13 +370,7 @@ fd_batch_flush(struct fd_batch *batch) * up used_resources */ fd_batch_reference(&tmp, batch); - batch_flush(tmp); - - if (batch == batch->ctx->batch) { - fd_batch_reference(&batch->ctx->batch, NULL); - } - fd_batch_reference(&tmp, NULL); } diff --git a/src/gallium/drivers/freedreno/freedreno_batch.h b/src/gallium/drivers/freedreno/freedreno_batch.h index aae3471d032..a08b7826d06 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.h +++ b/src/gallium/drivers/freedreno/freedreno_batch.h @@ -30,6 +30,7 @@ #include "util/u_inlines.h" #include "util/u_queue.h" #include "util/list.h" +#include "util/simple_mtx.h" #include "freedreno_context.h" #include "freedreno_util.h" @@ -57,6 +58,11 @@ struct fd_batch { struct fd_context *ctx; + /* emit_lock serializes cmdstream emission and flush. Acquire before + * screen->lock. + */ + simple_mtx_t submit_lock; + /* do we need to mem2gmem before rendering. We don't, if for example, * there was a glClear() that invalidated the entire previous buffer * contents. Keep track of which buffer(s) are cleared, or needs @@ -297,6 +303,26 @@ fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch) fd_screen_unlock(ctx->screen); } +static inline void +fd_batch_unlock_submit(struct fd_batch *batch) +{ + simple_mtx_unlock(&batch->submit_lock); +} + +/** + * Returns true if emit-lock was aquired, false if failed to aquire lock, + * ie. batch already flushed. + */ +static inline bool MUST_CHECK +fd_batch_lock_submit(struct fd_batch *batch) +{ + simple_mtx_lock(&batch->submit_lock); + bool ret = !batch->flushed; + if (!ret) + fd_batch_unlock_submit(batch); + return ret; +} + static inline void fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage) { diff --git a/src/gallium/drivers/freedreno/freedreno_context.c b/src/gallium/drivers/freedreno/freedreno_context.c index 9393965e22a..b8149513d7a 100644 --- a/src/gallium/drivers/freedreno/freedreno_context.c +++ b/src/gallium/drivers/freedreno/freedreno_context.c @@ -202,13 +202,18 @@ fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len) if (!ctx->batch) return; + struct fd_batch *batch = fd_context_batch_locked(ctx); + ctx->batch->needs_flush = true; if (ctx->screen->gpu_id >= 500) { - fd_emit_string5(ctx->batch->draw, string, len); + fd_emit_string5(batch->draw, string, len); } else { - fd_emit_string(ctx->batch->draw, string, len); + fd_emit_string(batch->draw, string, len); } + + fd_batch_unlock_submit(batch); + fd_batch_reference(&batch, NULL); } /** @@ -256,12 +261,32 @@ fd_context_batch(struct fd_context *ctx) fd_batch_reference(&ctx->batch, batch); fd_context_all_dirty(ctx); } - fd_context_switch_to(ctx, batch); return batch; } +/** + * Return a locked reference to the current batch. A batch with emit + * lock held is protected against flushing while the lock is held. + * The emit-lock should be acquired before screen-lock. The emit-lock + * should be held while emitting cmdstream. + */ +struct fd_batch * +fd_context_batch_locked(struct fd_context *ctx) +{ + struct fd_batch *batch = NULL; + + while (!batch) { + batch = fd_context_batch(ctx); + if (!fd_batch_lock_submit(batch)) { + fd_batch_reference(&batch, NULL); + } + } + + return batch; +} + void fd_context_destroy(struct pipe_context *pctx) { diff --git a/src/gallium/drivers/freedreno/freedreno_context.h b/src/gallium/drivers/freedreno/freedreno_context.h index 7637f0b4788..e2064c0d5d4 100644 --- a/src/gallium/drivers/freedreno/freedreno_context.h +++ b/src/gallium/drivers/freedreno/freedreno_context.h @@ -504,6 +504,7 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim) void fd_context_switch_from(struct fd_context *ctx); void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch); struct fd_batch * fd_context_batch(struct fd_context *ctx); +struct fd_batch * fd_context_batch_locked(struct fd_context *ctx); void fd_context_setup_common_vbos(struct fd_context *ctx); void fd_context_cleanup_common_vbos(struct fd_context *ctx); diff --git a/src/gallium/drivers/freedreno/freedreno_draw.c b/src/gallium/drivers/freedreno/freedreno_draw.c index 5db8391b55f..9437c7d1eae 100644 --- a/src/gallium/drivers/freedreno/freedreno_draw.c +++ b/src/gallium/drivers/freedreno/freedreno_draw.c @@ -268,7 +268,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) batch_draw_tracking(batch, info); - if (unlikely(ctx->batch != batch)) { + while (unlikely(!fd_batch_lock_submit(batch))) { /* The current batch was flushed in batch_draw_tracking() * so start anew. We know this won't happen a second time * since we are dealing with a fresh batch: @@ -330,6 +330,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) fd_context_all_dirty(ctx); fd_batch_check_size(batch); + fd_batch_unlock_submit(batch); fd_batch_reference(&batch, NULL); if (info == &new_info) @@ -406,7 +407,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers, batch_clear_tracking(batch, buffers); - if (unlikely(ctx->batch != batch)) { + while (unlikely(!fd_batch_lock_submit(batch))) { /* The current batch was flushed in batch_clear_tracking() * so start anew. We know this won't happen a second time * since we are dealing with a fresh batch: @@ -445,11 +446,13 @@ fd_clear(struct pipe_context *pctx, unsigned buffers, } } + fd_batch_check_size(batch); + fd_batch_unlock_submit(batch); + if (fallback) { fd_blitter_clear(pctx, buffers, color, depth, stencil); } - fd_batch_check_size(batch); fd_batch_reference(&batch, NULL); } diff --git a/src/gallium/drivers/freedreno/freedreno_query_acc.c b/src/gallium/drivers/freedreno/freedreno_query_acc.c index f02e22c4f43..b06de302656 100644 --- a/src/gallium/drivers/freedreno/freedreno_query_acc.c +++ b/src/gallium/drivers/freedreno/freedreno_query_acc.c @@ -113,8 +113,9 @@ fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q) * need to just emit the capture at this moment. */ if (skip_begin_query(q->type)) { - struct fd_batch *batch = fd_context_batch(ctx); + struct fd_batch *batch = fd_context_batch_locked(ctx); fd_acc_query_resume(aq, batch); + fd_batch_unlock_submit(batch); fd_batch_reference(&batch, NULL); } } diff --git a/src/gallium/drivers/freedreno/freedreno_query_hw.c b/src/gallium/drivers/freedreno/freedreno_query_hw.c index 28d270adb2d..43d1739cec6 100644 --- a/src/gallium/drivers/freedreno/freedreno_query_hw.c +++ b/src/gallium/drivers/freedreno/freedreno_query_hw.c @@ -135,7 +135,7 @@ fd_hw_destroy_query(struct fd_context *ctx, struct fd_query *q) static void fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q) { - struct fd_batch *batch = fd_context_batch(ctx); + struct fd_batch *batch = fd_context_batch_locked(ctx); struct fd_hw_query *hq = fd_hw_query(q); DBG("%p", q); @@ -150,13 +150,14 @@ fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q) assert(list_is_empty(&hq->list)); list_addtail(&hq->list, &ctx->hw_active_queries); + fd_batch_unlock_submit(batch); fd_batch_reference(&batch, NULL); } static void fd_hw_end_query(struct fd_context *ctx, struct fd_query *q) { - struct fd_batch *batch = fd_context_batch(ctx); + struct fd_batch *batch = fd_context_batch_locked(ctx); struct fd_hw_query *hq = fd_hw_query(q); DBG("%p", q); @@ -167,6 +168,7 @@ fd_hw_end_query(struct fd_context *ctx, struct fd_query *q) /* remove from active list: */ list_delinit(&hq->list); + fd_batch_unlock_submit(batch); fd_batch_reference(&batch, NULL); }