nvc0: bind images for 3d/cp shaders on GM107+

On Maxwell, images binding is slightly different (and much better)
regarding Fermi and Kepler because a texture view needs to be uploaded
for each image and this is going to simplify the thing a lot.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Ilia Mirkin <imirkin@alum.mit.edu>
This commit is contained in:
Samuel Pitoiset 2016-07-04 19:15:02 +02:00
parent 1da704a94c
commit 2ae4b5d622
5 changed files with 207 additions and 18 deletions

View file

@ -161,8 +161,11 @@ nvc0_context_unreference_resources(struct nvc0_context *nvc0)
for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
for (i = 0; i < NVC0_MAX_IMAGES; ++i)
for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
pipe_resource_reference(&nvc0->images[s][i].resource, NULL);
if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
pipe_sampler_view_reference(&nvc0->images_tic[s][i], NULL);
}
}
for (s = 0; s < 2; ++s) {

View file

@ -246,6 +246,7 @@ struct nvc0_context {
uint32_t buffers_valid[6];
struct pipe_image_view images[6][NVC0_MAX_IMAGES];
struct pipe_sampler_view *images_tic[6][NVC0_MAX_IMAGES]; /* GM107+ */
uint16_t images_dirty[6];
uint16_t images_valid[6];
@ -349,6 +350,9 @@ struct pipe_sampler_view *
nvc0_create_sampler_view(struct pipe_context *,
struct pipe_resource *,
const struct pipe_sampler_view *);
struct pipe_sampler_view *
gm107_create_texture_view_from_image(struct pipe_context *,
const struct pipe_image_view *);
/* nvc0_transfer.c */
void

View file

@ -1296,6 +1296,19 @@ nvc0_bind_images_range(struct nvc0_context *nvc0, const unsigned s,
pipe_resource_reference(
&img->resource, pimages[p].resource);
if (nvc0->screen->base.class_3d >= GM107_3D_CLASS) {
if (nvc0->images_tic[s][i]) {
struct nv50_tic_entry *old =
nv50_tic_entry(nvc0->images_tic[s][i]);
nvc0_screen_tic_unlock(nvc0->screen, old);
pipe_sampler_view_reference(&nvc0->images_tic[s][i], NULL);
}
nvc0->images_tic[s][i] =
gm107_create_texture_view_from_image(&nvc0->base.pipe,
&pimages[p]);
}
}
if (!mask)
return false;
@ -1303,8 +1316,16 @@ nvc0_bind_images_range(struct nvc0_context *nvc0, const unsigned s,
mask = ((1 << nr) - 1) << start;
if (!(nvc0->images_valid[s] & mask))
return false;
for (i = start; i < end; ++i)
for (i = start; i < end; ++i) {
pipe_resource_reference(&nvc0->images[s][i].resource, NULL);
if (nvc0->screen->base.class_3d >= GM107_3D_CLASS) {
struct nv50_tic_entry *old = nv50_tic_entry(nvc0->images_tic[s][i]);
if (old) {
nvc0_screen_tic_unlock(nvc0->screen, old);
pipe_sampler_view_reference(&nvc0->images_tic[s][i], NULL);
}
}
}
nvc0->images_valid[s] &= ~mask;
}
nvc0->images_dirty[s] |= mask;

View file

@ -236,6 +236,42 @@ gm107_create_texture_view(struct pipe_context *pipe,
return &view->pipe;
}
struct pipe_sampler_view *
gm107_create_texture_view_from_image(struct pipe_context *pipe,
const struct pipe_image_view *view)
{
struct nv04_resource *res = nv04_resource(view->resource);
struct pipe_sampler_view templ = {};
enum pipe_texture_target target;
uint32_t flags = 0;
if (!res)
return NULL;
target = res->base.target;
if (target == PIPE_TEXTURE_CUBE || target == PIPE_TEXTURE_CUBE_ARRAY)
target = PIPE_TEXTURE_2D_ARRAY;
templ.format = view->format;
templ.swizzle_r = PIPE_SWIZZLE_X;
templ.swizzle_g = PIPE_SWIZZLE_Y;
templ.swizzle_b = PIPE_SWIZZLE_Z;
templ.swizzle_a = PIPE_SWIZZLE_W;
if (target == PIPE_BUFFER) {
templ.u.buf.first_element = view->u.buf.first_element;
templ.u.buf.last_element = view->u.buf.last_element;
} else {
templ.u.tex.first_layer = view->u.tex.first_layer;
templ.u.tex.last_layer = view->u.tex.last_layer;
templ.u.tex.first_level = templ.u.tex.last_level = view->u.tex.level;
}
flags = NV50_TEXVIEW_SCALED_COORDS;
return nvc0_create_texture_view(pipe, &res->base, &templ, flags, target);
}
static struct pipe_sampler_view *
gf100_create_texture_view(struct pipe_context *pipe,
struct pipe_resource *texture,
@ -1099,6 +1135,60 @@ nvc0_update_surface_bindings(struct nvc0_context *nvc0)
nvc0->images_dirty[5] |= nvc0->images_valid[5];
}
static void
gm107_validate_surfaces(struct nvc0_context *nvc0,
struct pipe_image_view *view, int stage, int slot)
{
struct nv04_resource *res = nv04_resource(view->resource);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_screen *screen = nvc0->screen;
struct nouveau_bo *txc = nvc0->screen->txc;
struct nv50_tic_entry *tic;
tic = nv50_tic_entry(nvc0->images_tic[stage][slot]);
res = nv04_resource(tic->pipe.texture);
nvc0_update_tic(nvc0, tic, res);
if (tic->id < 0) {
tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
/* upload the texture view */
PUSH_SPACE(push, 16);
BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_DST_ADDRESS_HIGH), 2);
PUSH_DATAh(push, txc->offset + (tic->id * 32));
PUSH_DATA (push, txc->offset + (tic->id * 32));
BEGIN_NVC0(push, NVE4_P2MF(UPLOAD_LINE_LENGTH_IN), 2);
PUSH_DATA (push, 32);
PUSH_DATA (push, 1);
BEGIN_1IC0(push, NVE4_P2MF(UPLOAD_EXEC), 9);
PUSH_DATA (push, 0x1001);
PUSH_DATAp(push, &tic->tic[0], 8);
BEGIN_NVC0(push, NVC0_3D(TIC_FLUSH), 1);
PUSH_DATA (push, 0);
} else
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
PUSH_DATA (push, (tic->id << 4) | 1);
}
nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
BCTX_REFN(nvc0->bufctx_3d, 3D_SUF, res, RD);
/* upload the texture handle */
BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
PUSH_DATA (push, NVC0_CB_AUX_SIZE);
PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(stage));
PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(stage));
BEGIN_NVC0(push, NVC0_3D(CB_POS), 2);
PUSH_DATA (push, NVC0_CB_AUX_TEX_INFO(slot + 32));
PUSH_DATA (push, tic->id);
}
static inline void
nve4_update_surface_bindings(struct nvc0_context *nvc0)
{
@ -1110,15 +1200,16 @@ nve4_update_surface_bindings(struct nvc0_context *nvc0)
if (!nvc0->images_dirty[s])
continue;
BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
PUSH_DATA (push, NVC0_CB_AUX_SIZE);
PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 16 * NVC0_MAX_IMAGES);
PUSH_DATA (push, NVC0_CB_AUX_SU_INFO(0));
for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
struct pipe_image_view *view = &nvc0->images[s][i];
BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
PUSH_DATA (push, NVC0_CB_AUX_SIZE);
PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 16);
PUSH_DATA (push, NVC0_CB_AUX_SU_INFO(i));
if (view->resource) {
struct nv04_resource *res = nv04_resource(view->resource);
@ -1129,6 +1220,9 @@ nve4_update_surface_bindings(struct nvc0_context *nvc0)
nve4_set_surface_info(push, view, nvc0);
BCTX_REFN(nvc0->bufctx_3d, 3D_SUF, res, RDWR);
if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
gm107_validate_surfaces(nvc0, view, s, i);
} else {
for (j = 0; j < 16; j++)
PUSH_DATA(push, 0);

View file

@ -187,6 +187,69 @@ nve4_screen_compute_setup(struct nvc0_screen *screen,
return 0;
}
static void
gm107_compute_validate_surfaces(struct nvc0_context *nvc0,
struct pipe_image_view *view, int slot)
{
struct nv04_resource *res = nv04_resource(view->resource);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_screen *screen = nvc0->screen;
struct nouveau_bo *txc = nvc0->screen->txc;
struct nv50_tic_entry *tic;
uint64_t address;
const int s = 5;
tic = nv50_tic_entry(nvc0->images_tic[s][slot]);
res = nv04_resource(tic->pipe.texture);
nvc0_update_tic(nvc0, tic, res);
if (tic->id < 0) {
tic->id = nvc0_screen_tic_alloc(nvc0->screen, tic);
/* upload the texture view */
PUSH_SPACE(push, 16);
BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
PUSH_DATAh(push, txc->offset + (tic->id * 32));
PUSH_DATA (push, txc->offset + (tic->id * 32));
BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
PUSH_DATA (push, 32);
PUSH_DATA (push, 1);
BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 9);
PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
PUSH_DATAp(push, &tic->tic[0], 8);
BEGIN_NIC0(push, NVE4_CP(TIC_FLUSH), 1);
PUSH_DATA (push, (tic->id << 4) | 1);
} else
if (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
BEGIN_NIC0(push, NVE4_CP(TEX_CACHE_CTL), 1);
PUSH_DATA (push, (tic->id << 4) | 1);
}
nvc0->screen->tic.lock[tic->id / 32] |= 1 << (tic->id % 32);
res->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
res->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
/* upload the texture handle */
BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
PUSH_DATAh(push, address + NVC0_CB_AUX_TEX_INFO(slot + 32));
PUSH_DATA (push, address + NVC0_CB_AUX_TEX_INFO(slot + 32));
BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
PUSH_DATA (push, 4);
PUSH_DATA (push, 0x1);
BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 2);
PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
PUSH_DATA (push, tic->id);
BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
}
static void
nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
{
@ -200,17 +263,18 @@ nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
address = nvc0->screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
PUSH_DATAh(push, address + NVC0_CB_AUX_SU_INFO(0));
PUSH_DATA (push, address + NVC0_CB_AUX_SU_INFO(0));
BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
PUSH_DATA (push, 16 * NVC0_MAX_IMAGES * 4);
PUSH_DATA (push, 0x1);
BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 16 * NVC0_MAX_IMAGES);
PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
struct pipe_image_view *view = &nvc0->images[s][i];
BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
PUSH_DATAh(push, address + NVC0_CB_AUX_SU_INFO(i));
PUSH_DATA (push, address + NVC0_CB_AUX_SU_INFO(i));
BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
PUSH_DATA (push, 16 * 4);
PUSH_DATA (push, 0x1);
BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 16);
PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
if (view->resource) {
struct nv04_resource *res = nv04_resource(view->resource);
@ -221,6 +285,9 @@ nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
nve4_set_surface_info(push, view, nvc0);
BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
gm107_compute_validate_surfaces(nvc0, view, i);
} else {
for (j = 0; j < 16; j++)
PUSH_DATA(push, 0);