nvc0: reduce overhead from always marking images dirty

We would revalidate images when anything was touched at all. Which is
unfortunate, since the state tracker does not use CSO's to reduce the
workload. So instead implement a protocol to ensure that something has
changed before revalidating all the images.

Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
Cc: "12.0" <mesa-stable@lists.freedesktop.org>
This commit is contained in:
Ilia Mirkin 2016-06-04 14:13:38 -04:00
parent 0f673db6f0
commit fd6bbc2ee2

View file

@ -1231,36 +1231,60 @@ nvc0_set_compute_resources(struct pipe_context *pipe,
nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SURFACES; nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SURFACES;
} }
static void static bool
nvc0_bind_images_range(struct nvc0_context *nvc0, const unsigned s, nvc0_bind_images_range(struct nvc0_context *nvc0, const unsigned s,
unsigned start, unsigned nr, unsigned start, unsigned nr,
struct pipe_image_view *pimages) struct pipe_image_view *pimages)
{ {
const unsigned end = start + nr; const unsigned end = start + nr;
const unsigned mask = ((1 << nr) - 1) << start; unsigned mask = 0;
unsigned i; unsigned i;
assert(s < 6); assert(s < 6);
if (pimages) { if (pimages) {
for (i = start; i < end; ++i) { for (i = start; i < end; ++i) {
struct pipe_image_view *img = &nvc0->images[s][i];
const unsigned p = i - start; const unsigned p = i - start;
if (img->resource == pimages[p].resource &&
img->format == pimages[p].format &&
img->access == pimages[p].access) {
if (img->resource == NULL)
continue;
if (img->resource->target == PIPE_BUFFER &&
img->u.buf.first_element == pimages[p].u.buf.first_element &&
img->u.buf.last_element == pimages[p].u.buf.last_element)
continue;
if (img->resource->target != PIPE_BUFFER &&
img->u.tex.first_layer == pimages[p].u.tex.first_layer &&
img->u.tex.last_layer == pimages[p].u.tex.last_layer &&
img->u.tex.level == pimages[p].u.tex.level)
continue;
}
mask |= (1 << i);
if (pimages[p].resource) if (pimages[p].resource)
nvc0->images_valid[s] |= (1 << i); nvc0->images_valid[s] |= (1 << i);
else else
nvc0->images_valid[s] &= ~(1 << i); nvc0->images_valid[s] &= ~(1 << i);
nvc0->images[s][i].format = pimages[p].format; img->format = pimages[p].format;
nvc0->images[s][i].access = pimages[p].access; img->access = pimages[p].access;
if (pimages[p].resource->target == PIPE_BUFFER) if (pimages[p].resource && pimages[p].resource->target == PIPE_BUFFER)
nvc0->images[s][i].u.buf = pimages[p].u.buf; img->u.buf = pimages[p].u.buf;
else else
nvc0->images[s][i].u.tex = pimages[p].u.tex; img->u.tex = pimages[p].u.tex;
pipe_resource_reference( pipe_resource_reference(
&nvc0->images[s][i].resource, pimages[p].resource); &img->resource, pimages[p].resource);
} }
if (!mask)
return false;
} else { } else {
mask = ((1 << nr) - 1) << start;
if (!(nvc0->images_valid[s] & mask))
return false;
for (i = start; i < end; ++i) for (i = start; i < end; ++i)
pipe_resource_reference(&nvc0->images[s][i].resource, NULL); pipe_resource_reference(&nvc0->images[s][i].resource, NULL);
nvc0->images_valid[s] &= ~mask; nvc0->images_valid[s] &= ~mask;
@ -1271,6 +1295,8 @@ nvc0_bind_images_range(struct nvc0_context *nvc0, const unsigned s,
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF); nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
else else
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_SUF); nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_SUF);
return true;
} }
static void static void
@ -1279,7 +1305,8 @@ nvc0_set_shader_images(struct pipe_context *pipe, unsigned shader,
struct pipe_image_view *images) struct pipe_image_view *images)
{ {
const unsigned s = nvc0_shader_stage(shader); const unsigned s = nvc0_shader_stage(shader);
nvc0_bind_images_range(nvc0_context(pipe), s, start, nr, images); if (!nvc0_bind_images_range(nvc0_context(pipe), s, start, nr, images))
return;
if (s == 5) if (s == 5)
nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SURFACES; nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SURFACES;