gallium: Add PIPE_BARRIER_UPDATE_BUFFER and UPDATE_TEXTURE bits.

The glMemoryBarrier() function makes shader memory stores ordered with
respect to things specified by the given bits.  Until now, st/mesa has
ignored GL_TEXTURE_UPDATE_BARRIER_BIT and GL_BUFFER_UPDATE_BARRIER_BIT,
saying that drivers should implicitly perform the needed flushing.

This seems like a pretty big assumption to make.  Instead, this commit
opts to translate them to new PIPE_BARRIER bits, and adjusts existing
drivers to continue ignoring them (preserving the current behavior).

The i965 driver performs actions on these memory barriers.  Shader
memory stores go through a "data cache" which is separate from the
render cache and other read caches (like the texture cache).  All
memory barriers need to flush the data cache (to ensure shader memory
stores are visible), and possibly invalidate read caches (to ensure
stale data is no longer visible).  The driver implicitly flushes for
most caches, but not for data cache, since ARB_shader_image_load_store
introduced MemoryBarrier() precisely to order these explicitly.

I would like to follow i965's approach in iris, flushing the data cache
on any MemoryBarrier() call, so I need st/mesa to actually call the
pipe->memory_barrier() callback.

Fixes KHR-GL45.shader_image_load_store.advanced-sync-textureUpdate
and Piglit's spec/arb_shader_image_load_store/host-mem-barrier on
the iris driver.

Roland said this looks reasonable to him.
Reviewed-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Kenneth Graunke 2019-03-05 20:43:11 -08:00
parent 3e534489ec
commit 220c1dce1e
8 changed files with 44 additions and 16 deletions

View file

@ -99,6 +99,9 @@ fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
static void
fd_memory_barrier(struct pipe_context *pctx, unsigned flags)
{
if (!(flags & ~PIPE_BARRIER_UPDATE))
return;
fd_context_flush(pctx, NULL, 0);
/* TODO do we need to check for persistently mapped buffers and fd_bo_cpu_prep()?? */
}

View file

@ -94,6 +94,10 @@ void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom
static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
if (!(flags & ~PIPE_BARRIER_UPDATE))
return;
if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;

View file

@ -4710,6 +4710,9 @@ void si_memory_barrier(struct pipe_context *ctx, unsigned flags)
{
struct si_context *sctx = (struct si_context *)ctx;
if (!(flags & ~PIPE_BARRIER_UPDATE))
return;
/* Subsequent commands must wait for all shader invocations to
* complete. */
sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |

View file

@ -192,5 +192,8 @@ void softpipe_texture_barrier(struct pipe_context *pipe, unsigned flags)
void softpipe_memory_barrier(struct pipe_context *pipe, unsigned flags)
{
if (!(flags & ~PIPE_BARRIER_UPDATE))
return;
softpipe_texture_barrier(pipe, 0);
}

View file

@ -974,6 +974,9 @@ tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
{
struct tegra_context *context = to_tegra_context(pcontext);
if (!(flags & ~PIPE_BARRIER_UPDATE))
return;
context->gpu->memory_barrier(context->gpu, flags);
}

View file

@ -71,6 +71,9 @@ v3d_memory_barrier(struct pipe_context *pctx, unsigned int flags)
{
struct v3d_context *v3d = v3d_context(pctx);
if (!(flags & ~PIPE_BARRIER_UPDATE))
return;
/* We only need to flush jobs writing to SSBOs/images. */
perf_debug("Flushing all jobs for glMemoryBarrier(), could do better");
v3d_flush(pctx);

View file

@ -425,7 +425,12 @@ enum pipe_flush_flags
#define PIPE_BARRIER_FRAMEBUFFER (1 << 9)
#define PIPE_BARRIER_STREAMOUT_BUFFER (1 << 10)
#define PIPE_BARRIER_GLOBAL_BUFFER (1 << 11)
#define PIPE_BARRIER_ALL ((1 << 12) - 1)
#define PIPE_BARRIER_UPDATE_BUFFER (1 << 12)
#define PIPE_BARRIER_UPDATE_TEXTURE (1 << 13)
#define PIPE_BARRIER_ALL ((1 << 14) - 1)
#define PIPE_BARRIER_UPDATE \
(PIPE_BARRIER_UPDATE_BUFFER | PIPE_BARRIER_UPDATE_TEXTURE)
/**
* Flags for pipe_context::texture_barrier.

View file

@ -95,21 +95,25 @@ st_MemoryBarrier(struct gl_context *ctx, GLbitfield barriers)
*/
flags |= PIPE_BARRIER_TEXTURE;
}
/* GL_TEXTURE_UPDATE_BARRIER_BIT:
* Texture updates translate to:
* (1) texture transfers to/from the CPU,
* (2) texture as blit destination, or
* (3) texture as framebuffer.
* In all cases, we assume the driver does the required flushing
* automatically.
*/
/* GL_BUFFER_UPDATE_BARRIER_BIT:
* Buffer updates translate to
* (1) buffer transfers to/from the CPU,
* (2) resource copies and clears.
* In all cases, we assume the driver does the required flushing
* automatically.
*/
if (barriers & GL_TEXTURE_UPDATE_BARRIER_BIT) {
/* GL_TEXTURE_UPDATE_BARRIER_BIT:
* Texture updates translate to:
* (1) texture transfers to/from the CPU,
* (2) texture as blit destination, or
* (3) texture as framebuffer.
* Some drivers may handle these automatically, and can ignore the bit.
*/
flags |= PIPE_BARRIER_UPDATE_TEXTURE;
}
if (barriers & GL_BUFFER_UPDATE_BARRIER_BIT) {
/* GL_BUFFER_UPDATE_BARRIER_BIT:
* Buffer updates translate to
* (1) buffer transfers to/from the CPU,
* (2) resource copies and clears.
* Some drivers may handle these automatically, and can ignore the bit.
*/
flags |= PIPE_BARRIER_UPDATE_BUFFER;
}
if (barriers & GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT)
flags |= PIPE_BARRIER_MAPPED_BUFFER;
if (barriers & GL_QUERY_BUFFER_BARRIER_BIT)