freedreno: Drop fd_context_lock() and friends

These were actually just wrappers for the screen->lock, left over from
moving things around a long time ago.  Lets drop them to make things
more explicit (that we are locking the screen, not the context).

Involves a bit of shuffling things around to untangle header deps, but
no functional change.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7342>
This commit is contained in:
Rob Clark 2020-10-27 16:44:38 -07:00 committed by Marge Bot
parent 0d007349f9
commit 57a2a5db81
10 changed files with 96 additions and 114 deletions

View file

@ -29,6 +29,7 @@
#include "pipe/p_context.h"
#include "freedreno_batch.h"
#include "freedreno_context.h"
#include "fd3_format.h"
#include "fd3_program.h"

View file

@ -25,6 +25,7 @@
*/
#include "freedreno_query_hw.h"
#include "freedreno_batch.h"
#include "freedreno_context.h"
#include "freedreno_util.h"

View file

@ -29,6 +29,7 @@
#include "pipe/p_context.h"
#include "freedreno_batch.h"
#include "freedreno_context.h"
#include "fd5_context.h"
#include "fd5_format.h"

View file

@ -276,7 +276,7 @@ __fd_batch_destroy(struct fd_batch *batch)
DBG("%p", batch);
fd_context_assert_locked(batch->ctx);
fd_screen_assert_locked(batch->ctx->screen);
if (BATCH_DEBUG) {
_mesa_set_remove_key(ctx->screen->live_batches, batch);
@ -288,14 +288,14 @@ __fd_batch_destroy(struct fd_batch *batch)
debug_assert(batch->resources->entries == 0);
_mesa_set_destroy(batch->resources, NULL);
fd_context_unlock(ctx);
fd_screen_unlock(ctx->screen);
batch_flush_reset_dependencies(batch, false);
debug_assert(batch->dependents_mask == 0);
util_copy_framebuffer_state(&batch->framebuffer, NULL);
batch_fini(batch);
free(batch);
fd_context_lock(ctx);
fd_screen_lock(ctx->screen);
}
void

View file

@ -31,6 +31,7 @@
#include "util/u_queue.h"
#include "util/list.h"
#include "freedreno_context.h"
#include "freedreno_util.h"
#ifdef DEBUG
@ -39,31 +40,7 @@
# define BATCH_DEBUG 0
#endif
struct fd_context;
struct fd_resource;
enum fd_resource_status;
/* Bitmask of stages in rendering that a particular query query is
* active. Queries will be automatically started/stopped (generating
* additional fd_hw_sample_period's) on entrance/exit from stages that
* are applicable to the query.
*
* NOTE: set the stage to NULL at end of IB to ensure no query is still
* active. Things aren't going to work out the way you want if a query
* is active across IB's (or between tile IB and draw IB)
*/
enum fd_render_stage {
FD_STAGE_NULL = 0x00,
FD_STAGE_DRAW = 0x01,
FD_STAGE_CLEAR = 0x02,
/* used for driver internal draws (ie. util_blitter_blit()): */
FD_STAGE_BLIT = 0x04,
FD_STAGE_ALL = 0xff,
};
#define MAX_HW_SAMPLE_PROVIDERS 7
struct fd_hw_sample_provider;
struct fd_hw_sample;
/* A batch tracks everything about a cmdstream batch/submit, including the
* ringbuffers used for binning, draw, and gmem cmds, list of associated
@ -289,11 +266,6 @@ void __fd_batch_destroy(struct fd_batch *batch);
* you.
*/
/* fwd-decl prototypes to untangle header dependency :-/ */
static inline void fd_context_assert_locked(struct fd_context *ctx);
static inline void fd_context_lock(struct fd_context *ctx);
static inline void fd_context_unlock(struct fd_context *ctx);
static inline void
fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch)
{
@ -301,7 +273,7 @@ fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch)
/* only need lock if a reference is dropped: */
if (old_batch)
fd_context_assert_locked(old_batch->ctx);
fd_screen_assert_locked(old_batch->ctx->screen);
if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
(debug_reference_descriptor)__fd_batch_describe))
@ -317,15 +289,24 @@ fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
struct fd_context *ctx = old_batch ? old_batch->ctx : NULL;
if (ctx)
fd_context_lock(ctx);
fd_screen_lock(ctx->screen);
fd_batch_reference_locked(ptr, batch);
if (ctx)
fd_context_unlock(ctx);
fd_screen_unlock(ctx->screen);
}
#include "freedreno_context.h"
static inline void
fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
struct fd_context *ctx = batch->ctx;
if (ctx->query_set_stage)
ctx->query_set_stage(batch, stage);
batch->stage = stage;
}
static inline void
fd_reset_wfi(struct fd_batch *batch)

View file

@ -138,7 +138,7 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
struct fd_batch *batch;
unsigned n = 0;
fd_context_lock(ctx);
fd_screen_lock(ctx->screen);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx) {
@ -156,9 +156,9 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
}
}
fd_context_unlock(ctx);
fd_screen_unlock(ctx->screen);
} else {
fd_context_unlock(ctx);
fd_screen_unlock(ctx->screen);
for (unsigned i = 0; i < n; i++) {
fd_batch_flush(batches[i]);
@ -261,7 +261,7 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct key *key = (struct key *)batch->key;
fd_context_assert_locked(batch->ctx);
fd_screen_assert_locked(batch->ctx->screen);
if (remove) {
cache->batches[batch->idx] = NULL;

View file

@ -203,6 +203,49 @@ fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
}
}
/**
* If we have a pending fence_server_sync() (GPU side sync), flush now.
* The alternative to try to track this with batch dependencies gets
* hairy quickly.
*
* Call this before switching to a different batch, to handle this case.
*/
void
fd_context_switch_from(struct fd_context *ctx)
{
if (ctx->batch && (ctx->batch->in_fence_fd != -1))
fd_batch_flush(ctx->batch);
}
/**
* If there is a pending fence-fd that we need to sync on, this will
* transfer the reference to the next batch we are going to render
* to.
*/
void
fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch)
{
if (ctx->in_fence_fd != -1) {
sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd);
close(ctx->in_fence_fd);
ctx->in_fence_fd = -1;
}
}
struct fd_batch *
fd_context_batch(struct fd_context *ctx)
{
if (unlikely(!ctx->batch)) {
struct fd_batch *batch =
fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
ctx->batch = batch;
fd_context_all_dirty(ctx);
}
fd_context_switch_to(ctx, ctx->batch);
return ctx->batch;
}
void
fd_context_destroy(struct pipe_context *pctx)
{

View file

@ -35,7 +35,6 @@
#include "util/slab.h"
#include "util/u_string.h"
#include "freedreno_batch.h"
#include "freedreno_screen.h"
#include "freedreno_gmem.h"
#include "freedreno_util.h"
@ -43,6 +42,7 @@
#define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)
struct fd_vertex_stateobj;
struct fd_batch;
struct fd_texture_stateobj {
struct pipe_sampler_view *textures[PIPE_MAX_SAMPLERS];
@ -164,6 +164,28 @@ enum fd_dirty_shader_state {
FD_DIRTY_SHADER_IMAGE = BIT(4),
};
/* Bitmask of stages in rendering that a particular query is active.
* Queries will be automatically started/stopped (generating additional
* fd_hw_sample_period's) on entrance/exit from stages that are
* applicable to the query.
*
* NOTE: set the stage to NULL at end of IB to ensure no query is still
* active. Things aren't going to work out the way you want if a query
* is active across IB's (or between tile IB and draw IB)
*/
enum fd_render_stage {
FD_STAGE_NULL = 0x00,
FD_STAGE_DRAW = 0x01,
FD_STAGE_CLEAR = 0x02,
/* used for driver internal draws (ie. util_blitter_blit()): */
FD_STAGE_BLIT = 0x04,
FD_STAGE_ALL = 0xff,
};
#define MAX_HW_SAMPLE_PROVIDERS 7
struct fd_hw_sample_provider;
struct fd_hw_sample;
struct fd_context {
struct pipe_context base;
@ -437,24 +459,6 @@ fd_context(struct pipe_context *pctx)
return (struct fd_context *)pctx;
}
static inline void
fd_context_assert_locked(struct fd_context *ctx)
{
fd_screen_assert_locked(ctx->screen);
}
static inline void
fd_context_lock(struct fd_context *ctx)
{
fd_screen_lock(ctx->screen);
}
static inline void
fd_context_unlock(struct fd_context *ctx)
{
fd_screen_unlock(ctx->screen);
}
/* mark all state dirty: */
static inline void
fd_context_all_dirty(struct fd_context *ctx)
@ -494,59 +498,9 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim)
return (1 << prim) & ctx->primtype_mask;
}
/**
* If we have a pending fence_server_sync() (GPU side sync), flush now.
* The alternative to try to track this with batch dependencies gets
* hairy quickly.
*
* Call this before switching to a different batch, to handle this case.
*/
static inline void
fd_context_switch_from(struct fd_context *ctx)
{
if (ctx->batch && (ctx->batch->in_fence_fd != -1))
fd_batch_flush(ctx->batch);
}
/**
* If there is a pending fence-fd that we need to sync on, this will
* transfer the reference to the next batch we are going to render
* to.
*/
static inline void
fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch)
{
if (ctx->in_fence_fd != -1) {
sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd);
close(ctx->in_fence_fd);
ctx->in_fence_fd = -1;
}
}
static inline struct fd_batch *
fd_context_batch(struct fd_context *ctx)
{
if (unlikely(!ctx->batch)) {
struct fd_batch *batch =
fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
ctx->batch = batch;
fd_context_all_dirty(ctx);
}
fd_context_switch_to(ctx, ctx->batch);
return ctx->batch;
}
static inline void
fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
struct fd_context *ctx = batch->ctx;
if (ctx->query_set_stage)
ctx->query_set_stage(batch, stage);
batch->stage = stage;
}
void fd_context_switch_from(struct fd_context *ctx);
void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch);
struct fd_batch * fd_context_batch(struct fd_context *ctx);
void fd_context_setup_common_vbos(struct fd_context *ctx);
void fd_context_cleanup_common_vbos(struct fd_context *ctx);

View file

@ -28,6 +28,7 @@
#include "util/u_inlines.h"
#include "freedreno_fence.h"
#include "freedreno_batch.h"
#include "freedreno_context.h"
#include "freedreno_util.h"
/* TODO: Use the interface drm/freedreno_drmif.h instead of calling directly */

View file

@ -692,9 +692,9 @@ fd_resource_transfer_map(struct pipe_context *pctx,
struct fd_batch *write_batch = NULL;
/* hold a reference, so it doesn't disappear under us: */
fd_context_lock(ctx);
fd_screen_lock(ctx->screen);
fd_batch_reference_locked(&write_batch, rsc->write_batch);
fd_context_unlock(ctx);
fd_screen_unlock(ctx->screen);
if ((usage & PIPE_MAP_WRITE) && write_batch &&
write_batch->back_blit) {