iris: Rename iris_seqno to iris_fine_fence

Rename iris_seqno to iris_fine_fence, borrowed from si_fine_fence, to
avoid introducing any confusion with any other seqno used for tracking
pipelines.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5233>
This commit is contained in:
Chris Wilson 2020-05-27 22:19:52 +01:00 committed by Kenneth Graunke
parent 682e14d3ea
commit 034329128b
8 changed files with 145 additions and 143 deletions

View file

@ -41,6 +41,8 @@ IRIS_C_SOURCES = \
iris_draw.c \ iris_draw.c \
iris_fence.c \ iris_fence.c \
iris_fence.h \ iris_fence.h \
iris_fine_fence.c \
iris_fine_fence.h
iris_formats.c \ iris_formats.c \
iris_genx_macros.h \ iris_genx_macros.h \
iris_genx_protos.h \ iris_genx_protos.h \
@ -56,5 +58,3 @@ IRIS_C_SOURCES = \
iris_resource.h \ iris_resource.h \
iris_screen.c \ iris_screen.c \
iris_screen.h \ iris_screen.h \
iris_seqno.c \
iris_seqno.h

View file

@ -181,10 +181,10 @@ iris_init_batch(struct iris_context *ice,
batch->state_sizes = ice->state.sizes; batch->state_sizes = ice->state.sizes;
batch->name = name; batch->name = name;
batch->seqno.uploader = batch->fine_fences.uploader =
u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM, u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, 0); PIPE_USAGE_STAGING, 0);
iris_seqno_init(batch); iris_fine_fence_init(batch);
batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr); batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
assert(batch->hw_ctx_id); assert(batch->hw_ctx_id);
@ -320,7 +320,7 @@ iris_use_pinned_bo(struct iris_batch *batch,
((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) { ((other_entry->flags & EXEC_OBJECT_WRITE) || writable)) {
iris_batch_flush(batch->other_batches[b]); iris_batch_flush(batch->other_batches[b]);
iris_batch_add_syncobj(batch, iris_batch_add_syncobj(batch,
batch->other_batches[b]->last_seqno->syncobj, batch->other_batches[b]->last_fence->syncobj,
I915_EXEC_FENCE_WAIT); I915_EXEC_FENCE_WAIT);
} }
} }
@ -420,14 +420,14 @@ iris_batch_free(struct iris_batch *batch)
ralloc_free(batch->exec_fences.mem_ctx); ralloc_free(batch->exec_fences.mem_ctx);
pipe_resource_reference(&batch->seqno.ref.res, NULL); pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s) util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
iris_syncobj_reference(screen, s, NULL); iris_syncobj_reference(screen, s, NULL);
ralloc_free(batch->syncobjs.mem_ctx); ralloc_free(batch->syncobjs.mem_ctx);
iris_seqno_reference(batch->screen, &batch->last_seqno, NULL); iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
u_upload_destroy(batch->seqno.uploader); u_upload_destroy(batch->fine_fences.uploader);
iris_bo_unreference(batch->bo); iris_bo_unreference(batch->bo);
batch->bo = NULL; batch->bo = NULL;
@ -515,12 +515,12 @@ add_aux_map_bos_to_batch(struct iris_batch *batch)
static void static void
finish_seqno(struct iris_batch *batch) finish_seqno(struct iris_batch *batch)
{ {
struct iris_seqno *sq = iris_seqno_new(batch, IRIS_SEQNO_END); struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
if (!sq) if (!sq)
return; return;
iris_seqno_reference(batch->screen, &batch->last_seqno, sq); iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
iris_seqno_reference(batch->screen, &sq, NULL); iris_fine_fence_reference(batch->screen, &sq, NULL);
} }
/** /**

View file

@ -34,7 +34,7 @@
#include "common/gen_decoder.h" #include "common/gen_decoder.h"
#include "iris_fence.h" #include "iris_fence.h"
#include "iris_seqno.h" #include "iris_fine_fence.h"
struct iris_context; struct iris_context;
@ -117,10 +117,10 @@ struct iris_batch {
/** The sequence number to write the next time we add a fence. */ /** The sequence number to write the next time we add a fence. */
uint32_t next; uint32_t next;
} seqno; } fine_fences;
/** A seqno (and syncobj) for the last batch that was submitted. */ /** A seqno (and syncobj) for the last batch that was submitted. */
struct iris_seqno *last_seqno; struct iris_fine_fence *last_fence;
/** List of other batches which we might need to flush to use a BO */ /** List of other batches which we might need to flush to use a BO */
struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1]; struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1];

View file

@ -118,7 +118,7 @@ struct pipe_fence_handle {
struct pipe_context *unflushed_ctx; struct pipe_context *unflushed_ctx;
struct iris_seqno *seqno[IRIS_BATCH_COUNT]; struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
}; };
static void static void
@ -127,8 +127,8 @@ iris_fence_destroy(struct pipe_screen *p_screen,
{ {
struct iris_screen *screen = (struct iris_screen *)p_screen; struct iris_screen *screen = (struct iris_screen *)p_screen;
for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
iris_seqno_reference(screen, &fence->seqno[i], NULL); iris_fine_fence_reference(screen, &fence->fine[i], NULL);
free(fence); free(fence);
} }
@ -214,19 +214,19 @@ iris_fence_flush(struct pipe_context *ctx,
struct iris_batch *batch = &ice->batches[b]; struct iris_batch *batch = &ice->batches[b];
if (deferred && iris_batch_bytes_used(batch) > 0) { if (deferred && iris_batch_bytes_used(batch) > 0) {
struct iris_seqno *seqno = struct iris_fine_fence *fine =
iris_seqno_new(batch, IRIS_SEQNO_BOTTOM_OF_PIPE); iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
iris_seqno_reference(screen, &fence->seqno[b], seqno); iris_fine_fence_reference(screen, &fence->fine[b], fine);
iris_seqno_reference(screen, &seqno, NULL); iris_fine_fence_reference(screen, &fine, NULL);
} else { } else {
/* This batch has no commands queued up (perhaps we just flushed, /* This batch has no commands queued up (perhaps we just flushed,
* or all the commands are on the other batch). Wait for the last * or all the commands are on the other batch). Wait for the last
* syncobj on this engine - unless it's already finished by now. * syncobj on this engine - unless it's already finished by now.
*/ */
if (iris_seqno_signaled(batch->last_seqno)) if (iris_fine_fence_signaled(batch->last_fence))
continue; continue;
iris_seqno_reference(screen, &fence->seqno[b], batch->last_seqno); iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
} }
} }
@ -263,14 +263,14 @@ iris_fence_await(struct pipe_context *ctx,
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
struct iris_batch *batch = &ice->batches[b]; struct iris_batch *batch = &ice->batches[b];
for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) { for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
struct iris_seqno *seqno = fence->seqno[i]; struct iris_fine_fence *fine = fence->fine[i];
if (iris_seqno_signaled(seqno)) if (iris_fine_fence_signaled(fine))
continue; continue;
iris_batch_flush(batch); iris_batch_flush(batch);
iris_batch_add_syncobj(batch, seqno->syncobj, I915_EXEC_FENCE_WAIT); iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
} }
} }
} }
@ -320,12 +320,12 @@ iris_fence_finish(struct pipe_screen *p_screen,
*/ */
if (ctx && ctx == fence->unflushed_ctx) { if (ctx && ctx == fence->unflushed_ctx) {
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) { for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
struct iris_seqno *seqno = fence->seqno[i]; struct iris_fine_fence *fine = fence->fine[i];
if (iris_seqno_signaled(seqno)) if (iris_fine_fence_signaled(fine))
continue; continue;
if (seqno->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i])) if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
iris_batch_flush(&ice->batches[i]); iris_batch_flush(&ice->batches[i]);
} }
@ -334,14 +334,14 @@ iris_fence_finish(struct pipe_screen *p_screen,
} }
unsigned int handle_count = 0; unsigned int handle_count = 0;
uint32_t handles[ARRAY_SIZE(fence->seqno)]; uint32_t handles[ARRAY_SIZE(fence->fine)];
for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) { for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
struct iris_seqno *seqno = fence->seqno[i]; struct iris_fine_fence *fine = fence->fine[i];
if (iris_seqno_signaled(seqno)) if (iris_fine_fence_signaled(fine))
continue; continue;
handles[handle_count++] = seqno->syncobj->handle; handles[handle_count++] = fine->syncobj->handle;
} }
if (handle_count == 0) if (handle_count == 0)
@ -401,14 +401,14 @@ iris_fence_get_fd(struct pipe_screen *p_screen,
if (fence->unflushed_ctx) if (fence->unflushed_ctx)
return -1; return -1;
for (unsigned i = 0; i < ARRAY_SIZE(fence->seqno); i++) { for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
struct iris_seqno *seqno = fence->seqno[i]; struct iris_fine_fence *fine = fence->fine[i];
if (iris_seqno_signaled(seqno)) if (iris_fine_fence_signaled(fine))
continue; continue;
struct drm_syncobj_handle args = { struct drm_syncobj_handle args = {
.handle = seqno->syncobj->handle, .handle = fine->syncobj->handle,
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
.fd = -1, .fd = -1,
}; };
@ -466,8 +466,8 @@ iris_fence_create_fd(struct pipe_context *ctx,
syncobj->handle = args.handle; syncobj->handle = args.handle;
pipe_reference_init(&syncobj->ref, 1); pipe_reference_init(&syncobj->ref, 1);
struct iris_seqno *seqno = calloc(1, sizeof(*seqno)); struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
if (!seqno) { if (!fine) {
free(syncobj); free(syncobj);
*out = NULL; *out = NULL;
return; return;
@ -475,25 +475,25 @@ iris_fence_create_fd(struct pipe_context *ctx,
static const uint32_t zero = 0; static const uint32_t zero = 0;
/* Fences work in terms of iris_seqno, but we don't actually have a /* Fences work in terms of iris_fine_fence, but we don't actually have a
* seqno for an imported fence. So, create a fake one which always * seqno for an imported fence. So, create a fake one which always
* returns as 'not signaled' so we fall back to using the sync object. * returns as 'not signaled' so we fall back to using the sync object.
*/ */
seqno->seqno = UINT32_MAX; fine->seqno = UINT32_MAX;
seqno->map = &zero; fine->map = &zero;
seqno->syncobj = syncobj; fine->syncobj = syncobj;
seqno->flags = IRIS_SEQNO_END; fine->flags = IRIS_FENCE_END;
pipe_reference_init(&seqno->reference, 1); pipe_reference_init(&fine->reference, 1);
struct pipe_fence_handle *fence = calloc(1, sizeof(*fence)); struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
if (!fence) { if (!fence) {
free(seqno); free(fine);
free(syncobj); free(syncobj);
*out = NULL; *out = NULL;
return; return;
} }
pipe_reference_init(&fence->ref, 1); pipe_reference_init(&fence->ref, 1);
fence->seqno[0] = seqno; fence->fine[0] = fine;
*out = fence; *out = fence;
} }

View file

@ -0,0 +1,78 @@
#include "iris_context.h"
#include "iris_fine_fence.h"
#include "util/u_upload_mgr.h"
static void
iris_fine_fence_reset(struct iris_batch *batch)
{
u_upload_alloc(batch->fine_fences.uploader,
0, sizeof(uint64_t), sizeof(uint64_t),
&batch->fine_fences.ref.offset, &batch->fine_fences.ref.res,
(void **)&batch->fine_fences.map);
WRITE_ONCE(*batch->fine_fences.map, 0);
batch->fine_fences.next++;
}
void
iris_fine_fence_init(struct iris_batch *batch)
{
batch->fine_fences.ref.res = NULL;
batch->fine_fences.next = 0;
iris_fine_fence_reset(batch);
}
static uint32_t
iris_fine_fence_next(struct iris_batch *batch)
{
uint32_t seqno = batch->fine_fences.next++;
if (batch->fine_fences.next == 0)
iris_fine_fence_reset(batch);
return seqno;
}
void
iris_fine_fence_destroy(struct iris_screen *screen,
struct iris_fine_fence *fine)
{
iris_syncobj_reference(screen, &fine->syncobj, NULL);
pipe_resource_reference(&fine->ref.res, NULL);
free(fine);
}
struct iris_fine_fence *
iris_fine_fence_new(struct iris_batch *batch, unsigned flags)
{
struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
if (!fine)
return NULL;
pipe_reference_init(&fine->reference, 1);
fine->seqno = iris_fine_fence_next(batch);
iris_syncobj_reference(batch->screen, &fine->syncobj,
iris_batch_get_signal_syncobj(batch));
pipe_resource_reference(&fine->ref.res, batch->fine_fences.ref.res);
fine->ref.offset = batch->fine_fences.ref.offset;
fine->map = batch->fine_fences.map;
fine->flags = flags;
unsigned pc;
if (flags & IRIS_FENCE_TOP_OF_PIPE) {
pc = PIPE_CONTROL_WRITE_IMMEDIATE | PIPE_CONTROL_CS_STALL;
} else {
pc = PIPE_CONTROL_WRITE_IMMEDIATE |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DATA_CACHE_FLUSH;
}
iris_emit_pipe_control_write(batch, "fence: fine", pc,
iris_resource_bo(fine->ref.res),
fine->ref.offset,
fine->seqno);
return fine;
}

View file

@ -21,8 +21,8 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
*/ */
#ifndef IRIS_SEQNO_DOT_H #ifndef IRIS_FINE_FENCE_DOT_H
#define IRIS_SEQNO_DOT_H #define IRIS_FINE_FENCE_DOT_H
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -43,7 +43,7 @@
* (relying on mid-batch preemption to switch GPU execution to the * (relying on mid-batch preemption to switch GPU execution to the
* batch that writes it). * batch that writes it).
*/ */
struct iris_seqno { struct iris_fine_fence {
struct pipe_reference reference; struct pipe_reference reference;
/** Buffer where the seqno lives */ /** Buffer where the seqno lives */
@ -59,34 +59,34 @@ struct iris_seqno {
*/ */
struct iris_syncobj *syncobj; struct iris_syncobj *syncobj;
#define IRIS_SEQNO_BOTTOM_OF_PIPE 0x0 /**< Written by bottom-of-pipe flush */ #define IRIS_FENCE_BOTTOM_OF_PIPE 0x0 /**< Written by bottom-of-pipe flush */
#define IRIS_SEQNO_TOP_OF_PIPE 0x1 /**< Written by top-of-pipe flush */ #define IRIS_FENCE_TOP_OF_PIPE 0x1 /**< Written by top-of-pipe flush */
#define IRIS_SEQNO_END 0x2 /**< Written at the end of a batch */ #define IRIS_FENCE_END 0x2 /**< Written at the end of a batch */
/** Information about the type of flush involved (see IRIS_SEQNO_*) */ /** Information about the type of flush involved (see IRIS_FENCE_*) */
uint32_t flags; uint32_t flags;
/** /**
* Sequence number expected to be written by the flush we inserted * Sequence number expected to be written by the flush we inserted
* when creating this fence. The iris_seqno is 'signaled' when *@map * when creating this fence. The iris_fine_fence is 'signaled' when *@map
* (written by the flush on the GPU) is greater-than-or-equal to @seqno. * (written by the flush on the GPU) is greater-than-or-equal to @seqno.
*/ */
uint32_t seqno; uint32_t seqno;
}; };
void iris_seqno_init(struct iris_batch *batch); void iris_fine_fence_init(struct iris_batch *batch);
struct iris_seqno *iris_seqno_new(struct iris_batch *batch, unsigned flags); struct iris_fine_fence *iris_fine_fence_new(struct iris_batch *batch, unsigned flags);
void iris_seqno_destroy(struct iris_screen *screen, struct iris_seqno *sq); void iris_fine_fence_destroy(struct iris_screen *screen, struct iris_fine_fence *sq);
static inline void static inline void
iris_seqno_reference(struct iris_screen *screen, iris_fine_fence_reference(struct iris_screen *screen,
struct iris_seqno **dst, struct iris_fine_fence **dst,
struct iris_seqno *src) struct iris_fine_fence *src)
{ {
if (pipe_reference(&(*dst)->reference, &src->reference)) if (pipe_reference(&(*dst)->reference, &src->reference))
iris_seqno_destroy(screen, *dst); iris_fine_fence_destroy(screen, *dst);
*dst = src; *dst = src;
} }
@ -97,7 +97,7 @@ iris_seqno_reference(struct iris_screen *screen,
* NULL is considered signaled. * NULL is considered signaled.
*/ */
static inline bool static inline bool
iris_seqno_signaled(const struct iris_seqno *sq) iris_fine_fence_signaled(const struct iris_fine_fence *sq)
{ {
return !sq || (READ_ONCE(*sq->map) >= sq->seqno); return !sq || (READ_ONCE(*sq->map) >= sq->seqno);
} }

View file

@ -1,76 +0,0 @@
#include "iris_context.h"
#include "iris_seqno.h"
#include "util/u_upload_mgr.h"
static void
iris_seqno_reset(struct iris_batch *batch)
{
u_upload_alloc(batch->seqno.uploader, 0, sizeof(uint64_t), sizeof(uint64_t),
&batch->seqno.ref.offset, &batch->seqno.ref.res,
(void **)&batch->seqno.map);
WRITE_ONCE(*batch->seqno.map, 0);
batch->seqno.next++;
}
void
iris_seqno_init(struct iris_batch *batch)
{
batch->seqno.ref.res = NULL;
batch->seqno.next = 0;
iris_seqno_reset(batch);
}
static uint32_t
iris_seqno_next(struct iris_batch *batch)
{
uint32_t seqno = batch->seqno.next++;
if (batch->seqno.next == 0)
iris_seqno_reset(batch);
return seqno;
}
void
iris_seqno_destroy(struct iris_screen *screen, struct iris_seqno *sq)
{
iris_syncobj_reference(screen, &sq->syncobj, NULL);
pipe_resource_reference(&sq->ref.res, NULL);
free(sq);
}
struct iris_seqno *
iris_seqno_new(struct iris_batch *batch, unsigned flags)
{
struct iris_seqno *sq = calloc(1, sizeof(*sq));
if (!sq)
return NULL;
pipe_reference_init(&sq->reference, 1);
sq->seqno = iris_seqno_next(batch);
iris_syncobj_reference(batch->screen, &sq->syncobj,
iris_batch_get_signal_syncobj(batch));
pipe_resource_reference(&sq->ref.res, batch->seqno.ref.res);
sq->ref.offset = batch->seqno.ref.offset;
sq->map = batch->seqno.map;
sq->flags = flags;
unsigned pc;
if (flags & IRIS_SEQNO_TOP_OF_PIPE) {
pc = PIPE_CONTROL_WRITE_IMMEDIATE | PIPE_CONTROL_CS_STALL;
} else {
pc = PIPE_CONTROL_WRITE_IMMEDIATE |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DATA_CACHE_FLUSH;
}
iris_emit_pipe_control_write(batch, "fence: seqno", pc,
iris_resource_bo(sq->ref.res),
sq->ref.offset,
sq->seqno);
return sq;
}

View file

@ -34,6 +34,8 @@ files_libiris = files(
'iris_draw.c', 'iris_draw.c',
'iris_fence.c', 'iris_fence.c',
'iris_fence.h', 'iris_fence.h',
'iris_fine_fence.c',
'iris_fine_fence.h',
'iris_formats.c', 'iris_formats.c',
'iris_genx_macros.h', 'iris_genx_macros.h',
'iris_genx_protos.h', 'iris_genx_protos.h',
@ -50,8 +52,6 @@ files_libiris = files(
'iris_resource.h', 'iris_resource.h',
'iris_screen.c', 'iris_screen.c',
'iris_screen.h', 'iris_screen.h',
'iris_seqno.c',
'iris_seqno.h',
'iris_disk_cache.c', 'iris_disk_cache.c',
) )