nv50/ir: normalize cube coordinates after derivatives have been computed

In "manual" derivative mode (always used on nv50 and sometimes on nvc0
but always for cube), the idea is that using the quadop instruction, we
set up the "other" quads to have values such that the derivatives work
out, and then run the texture instruction as if nothing were strange. It
pulls values from the other lanes, and does its magic.

However cube coordinates have to be normalized - one of the 3 coords has
to be 1, to determine which is the major axis, to say which face is
being sampled. We were normalizing the coordinates first, and then
adding the derivatives. This is wrong for two reasons:

- the coordinates got normalized by a scaling factor but the
  derivatives didn't
- the result of the addition didn't end up normalized

To resolve this, we flip the logic around to normalize *after* the
per-lane coordinates are set up.

This fixes a bunch of textureGrad cube dEQP tests.

NOTE: nv50 cube arrays with explicit derivatives are still broken, to be
resolved at a later date.

Signed-off-by: Ilia Mirkin <imirkin@alum.mit.edu>
Cc: "11.1 11.2" <mesa-stable@lists.freedesktop.org>
This commit is contained in:
Ilia Mirkin 2016-03-19 11:58:25 -04:00
parent ea2bff1d11
commit 6eeb284e4f
4 changed files with 84 additions and 15 deletions

View file

@ -1989,7 +1989,6 @@ Converter::loadProjTexCoords(Value *dst[4], Value *src[4], unsigned int mask)
void
Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
{
Value *val;
Value *arg[4], *src[8];
Value *lod = NULL, *shd = NULL;
unsigned int s, c, d;
@ -2032,17 +2031,6 @@ Converter::handleTEX(Value *dst[4], int R, int S, int L, int C, int Dx, int Dy)
shd = src[n - 1];
}
if (tgt.isCube()) {
for (c = 0; c < 3; ++c)
src[c] = mkOp1v(OP_ABS, TYPE_F32, getSSA(), arg[c]);
val = getScratch();
mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
mkOp1(OP_RCP, TYPE_F32, val, val);
for (c = 0; c < 3; ++c)
src[c] = mkOp2v(OP_MUL, TYPE_F32, getSSA(), arg[c], val);
}
for (c = 0, d = 0; c < 4; ++c) {
if (dst[c]) {
texi->setDef(d++, dst[c]);

View file

@ -67,6 +67,7 @@ GM107LoweringPass::handleManualTXD(TexInstruction *i)
tmp = bld.getScratch();
for (l = 0; l < 4; ++l) {
Value *src[3], *val;
// mov coordinates from lane l to all lanes
bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
for (c = 0; c < dim; ++c) {
@ -92,10 +93,25 @@ GM107LoweringPass::handleManualTXD(TexInstruction *i)
add->lanes = 1; /* abused for .ndv */
}
// normalize cube coordinates if necessary
if (i->tex.target.isCube()) {
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), crd[c]);
val = bld.getScratch();
bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
bld.mkOp1(OP_RCP, TYPE_F32, val, val);
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(), crd[c], val);
} else {
for (c = 0; c < dim; ++c)
src[c] = crd[c];
}
// texture
bld.insert(tex = cloneForward(func, i));
for (c = 0; c < dim; ++c)
tex->setSrc(c + array, crd[c]);
tex->setSrc(c + array, src[c]);
bld.mkOp(OP_QUADPOP, TYPE_NONE, NULL);
// save results

View file

@ -724,6 +724,23 @@ NV50LoweringPreSSA::handleTEX(TexInstruction *i)
const int dref = arg;
const int lod = i->tex.target.isShadow() ? (arg + 1) : arg;
/* Only normalize in the non-explicit derivatives case.
*/
if (i->tex.target.isCube() && i->op != OP_TXD) {
Value *src[3], *val;
int c;
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), i->getSrc(c));
val = bld.getScratch();
bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
bld.mkOp1(OP_RCP, TYPE_F32, val, val);
for (c = 0; c < 3; ++c) {
i->setSrc(c, bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(),
i->getSrc(c), val));
}
}
// handle MS, which means looking up the MS params for this texture, and
// adjusting the input coordinates to point at the right sample.
if (i->tex.target.isMS()) {
@ -941,6 +958,7 @@ NV50LoweringPreSSA::handleTXD(TexInstruction *i)
bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
for (l = 0; l < 4; ++l) {
Value *src[3], *val;
// mov coordinates from lane l to all lanes
for (c = 0; c < dim; ++c)
bld.mkQuadop(0x00, crd[c], l, i->getSrc(c), zero);
@ -950,10 +968,24 @@ NV50LoweringPreSSA::handleTXD(TexInstruction *i)
// add dPdy from lane l to lanes dy
for (c = 0; c < dim; ++c)
bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
// normalize cube coordinates if necessary
if (i->tex.target.isCube()) {
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), crd[c]);
val = bld.getScratch();
bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
bld.mkOp1(OP_RCP, TYPE_F32, val, val);
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(), crd[c], val);
} else {
for (c = 0; c < dim; ++c)
src[c] = crd[c];
}
// texture
bld.insert(tex = cloneForward(func, i));
for (c = 0; c < dim; ++c)
tex->setSrc(c, crd[c]);
tex->setSrc(c, src[c]);
// save results
for (c = 0; i->defExists(c); ++c) {
Instruction *mov;

View file

@ -615,6 +615,24 @@ NVC0LoweringPass::handleTEX(TexInstruction *i)
const int lyr = arg - (i->tex.target.isMS() ? 2 : 1);
const int chipset = prog->getTarget()->getChipset();
/* Only normalize in the non-explicit derivatives case. For explicit
* derivatives, this is handled in handleManualTXD.
*/
if (i->tex.target.isCube() && i->dPdx[0].get() == NULL) {
Value *src[3], *val;
int c;
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), i->getSrc(c));
val = bld.getScratch();
bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
bld.mkOp1(OP_RCP, TYPE_F32, val, val);
for (c = 0; c < 3; ++c) {
i->setSrc(c, bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(),
i->getSrc(c), val));
}
}
// Arguments to the TEX instruction are a little insane. Even though the
// encoding is identical between SM20 and SM30, the arguments mean
// different things between Fermi and Kepler+. A lot of arguments are
@ -861,6 +879,7 @@ NVC0LoweringPass::handleManualTXD(TexInstruction *i)
bld.mkOp(OP_QUADON, TYPE_NONE, NULL);
for (l = 0; l < 4; ++l) {
Value *src[3], *val;
// mov coordinates from lane l to all lanes
for (c = 0; c < dim; ++c)
bld.mkQuadop(0x00, crd[c], l, i->getSrc(c + array), zero);
@ -870,10 +889,24 @@ NVC0LoweringPass::handleManualTXD(TexInstruction *i)
// add dPdy from lane l to lanes dy
for (c = 0; c < dim; ++c)
bld.mkQuadop(qOps[l][1], crd[c], l, i->dPdy[c].get(), crd[c]);
// normalize cube coordinates
if (i->tex.target.isCube()) {
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp1v(OP_ABS, TYPE_F32, bld.getSSA(), crd[c]);
val = bld.getScratch();
bld.mkOp2(OP_MAX, TYPE_F32, val, src[0], src[1]);
bld.mkOp2(OP_MAX, TYPE_F32, val, src[2], val);
bld.mkOp1(OP_RCP, TYPE_F32, val, val);
for (c = 0; c < 3; ++c)
src[c] = bld.mkOp2v(OP_MUL, TYPE_F32, bld.getSSA(), crd[c], val);
} else {
for (c = 0; c < dim; ++c)
src[c] = crd[c];
}
// texture
bld.insert(tex = cloneForward(func, i));
for (c = 0; c < dim; ++c)
tex->setSrc(c + array, crd[c]);
tex->setSrc(c + array, src[c]);
// save results
for (c = 0; i->defExists(c); ++c) {
Instruction *mov;