freedreno: Rename fd_fence -> fd_pipe_fence

So we don't have namespace clashes in the next commit.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20263>
This commit is contained in:
Rob Clark 2022-12-14 10:49:34 -08:00 committed by Marge Bot
parent c1a621813b
commit 7469574814
7 changed files with 71 additions and 69 deletions

View file

@ -84,7 +84,7 @@ batch_init(struct fd_batch *batch)
* immediately:
*/
if (ctx->screen->gen < 6)
batch->fence = fd_fence_create(batch);
batch->fence = fd_pipe_fence_create(batch);
batch->cleared = 0;
batch->fast_cleared = 0;
@ -199,9 +199,9 @@ batch_fini(struct fd_batch *batch)
/* in case batch wasn't flushed but fence was created: */
if (batch->fence)
fd_fence_set_batch(batch->fence, NULL);
fd_pipe_fence_set_batch(batch->fence, NULL);
fd_fence_ref(&batch->fence, NULL);
fd_pipe_fence_ref(&batch->fence, NULL);
cleanup_submit(batch);
@ -368,7 +368,7 @@ batch_flush(struct fd_batch *batch) assert_dt
fd_screen_unlock(batch->ctx->screen);
if (batch->fence)
fd_fence_ref(&batch->ctx->last_fence, batch->fence);
fd_pipe_fence_ref(&batch->ctx->last_fence, batch->fence);
fd_gmem_render_tiles(batch);

View file

@ -359,7 +359,7 @@ static inline void
fd_batch_needs_flush(struct fd_batch *batch)
{
batch->needs_flush = true;
fd_fence_ref(&batch->ctx->last_fence, NULL);
fd_pipe_fence_ref(&batch->ctx->last_fence, NULL);
}
/* Since we reorder batches and can pause/resume queries (notably for disabling

View file

@ -78,15 +78,15 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
*/
assert(!(flags & PIPE_FLUSH_FENCE_FD));
fd_fence_set_batch(*fencep, batch);
fd_fence_ref(&batch->fence, *fencep);
fd_pipe_fence_set_batch(*fencep, batch);
fd_pipe_fence_ref(&batch->fence, *fencep);
/* If we have nothing to flush, update the pre-created unflushed
* fence with the current state of the last-fence:
*/
if (ctx->last_fence) {
fd_fence_repopulate(*fencep, ctx->last_fence);
fd_fence_ref(&fence, *fencep);
fd_pipe_fence_repopulate(*fencep, ctx->last_fence);
fd_pipe_fence_ref(&fence, *fencep);
fd_bc_dump(ctx, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
goto out;
}
@ -97,7 +97,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
*/
flags &= ~PIPE_FLUSH_DEFERRED;
} else if (!batch->fence) {
batch->fence = fd_fence_create(batch);
batch->fence = fd_pipe_fence_create(batch);
}
/* In some sequence of events, we can end up with a last_fence that is
@ -105,20 +105,20 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
* errors.
*/
if ((flags & PIPE_FLUSH_FENCE_FD) && ctx->last_fence &&
!fd_fence_is_fd(ctx->last_fence))
fd_fence_ref(&ctx->last_fence, NULL);
!fd_pipe_fence_is_fd(ctx->last_fence))
fd_pipe_fence_ref(&ctx->last_fence, NULL);
/* if no rendering since last flush, ie. app just decided it needed
* a fence, re-use the last one:
*/
if (ctx->last_fence) {
fd_fence_ref(&fence, ctx->last_fence);
fd_pipe_fence_ref(&fence, ctx->last_fence);
fd_bc_dump(ctx, "%p: reuse last_fence, remaining:\n", ctx);
goto out;
}
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
fd_fence_ref(&fence, batch->fence);
fd_pipe_fence_ref(&fence, batch->fence);
if (flags & PIPE_FLUSH_FENCE_FD)
fence->submit_fence.use_fence_fd = true;
@ -141,11 +141,11 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
out:
if (fencep)
fd_fence_ref(fencep, fence);
fd_pipe_fence_ref(fencep, fence);
fd_fence_ref(&ctx->last_fence, fence);
fd_pipe_fence_ref(&ctx->last_fence, fence);
fd_fence_ref(&fence, NULL);
fd_pipe_fence_ref(&fence, NULL);
fd_batch_reference(&batch, NULL);
@ -338,7 +338,7 @@ fd_context_destroy(struct pipe_context *pctx)
list_del(&ctx->node);
fd_screen_unlock(ctx->screen);
fd_fence_ref(&ctx->last_fence, NULL);
fd_pipe_fence_ref(&ctx->last_fence, NULL);
if (ctx->in_fence_fd != -1)
close(ctx->in_fence_fd);
@ -629,9 +629,9 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
pctx->emit_string_marker = fd_emit_string_marker;
pctx->set_debug_callback = fd_set_debug_callback;
pctx->get_device_reset_status = fd_get_device_reset_status;
pctx->create_fence_fd = fd_create_fence_fd;
pctx->fence_server_sync = fd_fence_server_sync;
pctx->fence_server_signal = fd_fence_server_signal;
pctx->create_fence_fd = fd_create_pipe_fence_fd;
pctx->fence_server_sync = fd_pipe_fence_server_sync;
pctx->fence_server_signal = fd_pipe_fence_server_signal;
pctx->texture_barrier = fd_texture_barrier;
pctx->memory_barrier = fd_memory_barrier;
@ -694,7 +694,7 @@ fd_context_init_tc(struct pipe_context *pctx, unsigned flags)
pctx, &ctx->screen->transfer_pool,
fd_replace_buffer_storage,
&(struct threaded_context_options){
.create_fence = fd_fence_create_unflushed,
.create_fence = fd_pipe_fence_create_unflushed,
.is_resource_busy = fd_resource_busy,
.unsynchronized_get_device_reset_status = true,
.unsynchronized_create_fence_fd = true,

View file

@ -79,10 +79,11 @@ fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
}
void
fd_fence_repopulate(struct pipe_fence_handle *fence, struct pipe_fence_handle *last_fence)
fd_pipe_fence_repopulate(struct pipe_fence_handle *fence,
struct pipe_fence_handle *last_fence)
{
if (last_fence->last_fence)
fd_fence_repopulate(fence, last_fence->last_fence);
fd_pipe_fence_repopulate(fence, last_fence->last_fence);
/* The fence we are re-populating must not be an fd-fence (but last_fince
* might have been)
@ -90,18 +91,18 @@ fd_fence_repopulate(struct pipe_fence_handle *fence, struct pipe_fence_handle *l
assert(!fence->submit_fence.use_fence_fd);
assert(!last_fence->batch);
fd_fence_ref(&fence->last_fence, last_fence);
fd_pipe_fence_ref(&fence->last_fence, last_fence);
/* We have nothing to flush, so nothing will clear the batch reference
* (which is normally done when the batch is flushed), so do it now:
*/
fd_fence_set_batch(fence, NULL);
fd_pipe_fence_set_batch(fence, NULL);
}
static void
fd_fence_destroy(struct pipe_fence_handle *fence)
{
fd_fence_ref(&fence->last_fence, NULL);
fd_pipe_fence_ref(&fence->last_fence, NULL);
tc_unflushed_batch_token_reference(&fence->tc_token, NULL);
@ -120,7 +121,8 @@ fd_fence_destroy(struct pipe_fence_handle *fence)
}
void
fd_fence_ref(struct pipe_fence_handle **ptr, struct pipe_fence_handle *pfence)
fd_pipe_fence_ref(struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence)
{
if (pipe_reference(&(*ptr)->reference, &pfence->reference))
fd_fence_destroy(*ptr);
@ -129,8 +131,8 @@ fd_fence_ref(struct pipe_fence_handle **ptr, struct pipe_fence_handle *pfence)
}
bool
fd_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
struct pipe_fence_handle *fence, uint64_t timeout)
fd_pipe_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
struct pipe_fence_handle *fence, uint64_t timeout)
{
/* Note: for TC deferred fence, pctx->flush() may not have been called
* yet, so always do fence_flush() *first* before delegating to
@ -140,7 +142,7 @@ fd_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
return false;
if (fence->last_fence)
return fd_fence_finish(pscreen, pctx, fence->last_fence, timeout);
return fd_pipe_fence_finish(pscreen, pctx, fence->last_fence, timeout);
if (fence->last_fence)
fence = fence->last_fence;
@ -171,7 +173,7 @@ fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,
util_queue_fence_init(&fence->submit_fence.ready);
fence->ctx = ctx;
fd_fence_set_batch(fence, batch);
fd_pipe_fence_set_batch(fence, batch);
fence->pipe = fd_pipe_ref(ctx->pipe);
fence->screen = ctx->screen;
fence->submit_fence.fence_fd = fence_fd;
@ -182,8 +184,8 @@ fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,
}
void
fd_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,
int fd, enum pipe_fd_type type)
fd_create_pipe_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,
int fd, enum pipe_fd_type type)
{
struct fd_context *ctx = fd_context(pctx);
@ -210,7 +212,7 @@ fd_create_fence_fd(struct pipe_context *pctx, struct pipe_fence_handle **pfence,
}
void
fd_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
fd_pipe_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
{
struct fd_context *ctx = fd_context(pctx);
@ -220,7 +222,7 @@ fd_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
fence_flush(pctx, fence, 0);
if (fence->last_fence) {
fd_fence_server_sync(pctx, fence->last_fence);
fd_pipe_fence_server_sync(pctx, fence->last_fence);
return;
}
@ -234,8 +236,8 @@ fd_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *fence)
}
void
fd_fence_server_signal(struct pipe_context *pctx,
struct pipe_fence_handle *fence)
fd_pipe_fence_server_signal(struct pipe_context *pctx,
struct pipe_fence_handle *fence)
{
struct fd_context *ctx = fd_context(pctx);
@ -245,7 +247,7 @@ fd_fence_server_signal(struct pipe_context *pctx,
}
int
fd_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)
fd_pipe_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)
{
/* We don't expect deferred flush to be combined with fence-fd: */
assert(!fence->last_fence);
@ -261,19 +263,19 @@ fd_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fence)
}
bool
fd_fence_is_fd(struct pipe_fence_handle *fence)
fd_pipe_fence_is_fd(struct pipe_fence_handle *fence)
{
return fence->submit_fence.use_fence_fd;
}
struct pipe_fence_handle *
fd_fence_create(struct fd_batch *batch)
fd_pipe_fence_create(struct fd_batch *batch)
{
return fence_create(batch->ctx, batch, -1, 0);
}
void
fd_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
fd_pipe_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
{
if (batch) {
assert(!fence->batch);
@ -293,8 +295,8 @@ fd_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
}
struct pipe_fence_handle *
fd_fence_create_unflushed(struct pipe_context *pctx,
struct tc_unflushed_batch_token *tc_token)
fd_pipe_fence_create_unflushed(struct pipe_context *pctx,
struct tc_unflushed_batch_token *tc_token)
{
struct pipe_fence_handle *fence =
fence_create(fd_context(pctx), NULL, -1, 0);

View file

@ -77,32 +77,32 @@ struct pipe_fence_handle {
uint32_t syncobj;
};
void fd_fence_repopulate(struct pipe_fence_handle *fence,
struct pipe_fence_handle *last_fence);
void fd_fence_ref(struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence);
bool fd_fence_finish(struct pipe_screen *pscreen, struct pipe_context *ctx,
struct pipe_fence_handle *pfence, uint64_t timeout);
void fd_create_fence_fd(struct pipe_context *pctx,
struct pipe_fence_handle **pfence, int fd,
enum pipe_fd_type type);
void fd_fence_server_sync(struct pipe_context *pctx,
struct pipe_fence_handle *fence);
void fd_fence_server_signal(struct pipe_context *ctx,
struct pipe_fence_handle *fence);
int fd_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *pfence);
bool fd_fence_is_fd(struct pipe_fence_handle *fence);
void fd_pipe_fence_repopulate(struct pipe_fence_handle *fence,
struct pipe_fence_handle *last_fence);
void fd_pipe_fence_ref(struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence);
bool fd_pipe_fence_finish(struct pipe_screen *pscreen, struct pipe_context *ctx,
struct pipe_fence_handle *pfence, uint64_t timeout);
void fd_create_pipe_fence_fd(struct pipe_context *pctx,
struct pipe_fence_handle **pfence, int fd,
enum pipe_fd_type type);
void fd_pipe_fence_server_sync(struct pipe_context *pctx,
struct pipe_fence_handle *fence);
void fd_pipe_fence_server_signal(struct pipe_context *ctx,
struct pipe_fence_handle *fence);
int fd_pipe_fence_get_fd(struct pipe_screen *pscreen,
struct pipe_fence_handle *pfence);
bool fd_pipe_fence_is_fd(struct pipe_fence_handle *fence);
struct fd_batch;
struct pipe_fence_handle *fd_fence_create(struct fd_batch *batch);
struct pipe_fence_handle *fd_pipe_fence_create(struct fd_batch *batch);
void fd_fence_set_batch(struct pipe_fence_handle *fence,
struct fd_batch *batch);
void fd_pipe_fence_set_batch(struct pipe_fence_handle *fence,
struct fd_batch *batch);
struct tc_unflushed_batch_token;
struct pipe_fence_handle *
fd_fence_create_unflushed(struct pipe_context *pctx,
struct tc_unflushed_batch_token *tc_token);
fd_pipe_fence_create_unflushed(struct pipe_context *pctx,
struct tc_unflushed_batch_token *tc_token);
#endif /* FREEDRENO_FENCE_H_ */

View file

@ -675,7 +675,7 @@ flush_ring(struct fd_batch *batch)
batch->fence ? &batch->fence->submit_fence : NULL);
if (batch->fence)
fd_fence_set_batch(batch->fence, NULL);
fd_pipe_fence_set_batch(batch->fence, NULL);
}
void

View file

@ -964,7 +964,7 @@ static void
_fd_fence_ref(struct pipe_screen *pscreen, struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence)
{
fd_fence_ref(ptr, pfence);
fd_pipe_fence_ref(ptr, pfence);
}
static void
@ -1184,8 +1184,8 @@ fd_screen_create(struct fd_device *dev, struct renderonly *ro,
pscreen->get_timestamp = fd_screen_get_timestamp;
pscreen->fence_reference = _fd_fence_ref;
pscreen->fence_finish = fd_fence_finish;
pscreen->fence_get_fd = fd_fence_get_fd;
pscreen->fence_finish = fd_pipe_fence_finish;
pscreen->fence_get_fd = fd_pipe_fence_get_fd;
pscreen->query_dmabuf_modifiers = fd_screen_query_dmabuf_modifiers;
pscreen->is_dmabuf_modifier_supported =