nir/lower_int64: fix shift lowering

Starting with !19748 lowered 64 bit shifts were showing wrong results for
shifts with insignificant bits set.

nir shifts are defined to only look at the least significant bits. The
lowering has take this into account.

So there are two things going on:
1. the `ieq` and `uge` further down depend on `y` being masked.
2. the calculation of `reverse_count` actually depends on a masked `y` as
   well, due to the `(iabs (iadd y -32))` giving a different result for
   shifts > 31;

Fixes: 41f3e9e5f5 ("nir: Implement lowering of 64-bit shift operations")
Signed-off-by: Karol Herbst <kherbst@redhat.com>
Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19995>
(cherry picked from commit 5398dd04bf)
This commit is contained in:
Karol Herbst 2022-11-25 01:56:07 +01:00 committed by Dylan Baker
parent f73fc14feb
commit ab71151624
2 changed files with 10 additions and 1 deletions

View file

@ -184,7 +184,7 @@
"description": "nir/lower_int64: fix shift lowering",
"nominated": true,
"nomination_type": 1,
"resolution": 0,
"resolution": 1,
"main_sha": null,
"because_sha": "41f3e9e5f5de5309821c266b76ccdd1b4d016ce8"
},

View file

@ -170,6 +170,8 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
*
* uint64_t lshift(uint64_t x, int c)
* {
* c %= 64;
*
* if (c == 0) return x;
*
* uint32_t lo = LO(x), hi = HI(x);
@ -187,6 +189,7 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
*/
nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
y = nir_iand_imm(b, y, 0x3f);
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y);
@ -212,6 +215,8 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
*
* uint64_t arshift(uint64_t x, int c)
* {
* c %= 64;
*
* if (c == 0) return x;
*
* uint32_t lo = LO(x);
@ -231,6 +236,7 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
*/
nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
y = nir_iand_imm(b, y, 0x3f);
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
@ -256,6 +262,8 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
*
* uint64_t rshift(uint64_t x, int c)
* {
* c %= 64;
*
* if (c == 0) return x;
*
* uint32_t lo = LO(x), hi = HI(x);
@ -274,6 +282,7 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x);
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
y = nir_iand_imm(b, y, 0x3f);
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);