From ab71151624c8d8bb68c06dc2862cf40411f85099 Mon Sep 17 00:00:00 2001 From: Karol Herbst Date: Fri, 25 Nov 2022 01:56:07 +0100 Subject: [PATCH] nir/lower_int64: fix shift lowering Starting with !19748 lowered 64 bit shifts were showing wrong results for shifts with insignificant bits set. nir shifts are defined to only look at the least significant bits. The lowering has take this into account. So there are two things going on: 1. the `ieq` and `uge` further down depend on `y` being masked. 2. the calculation of `reverse_count` actually depends on a masked `y` as well, due to the `(iabs (iadd y -32))` giving a different result for shifts > 31; Fixes: 41f3e9e5f5d ("nir: Implement lowering of 64-bit shift operations") Signed-off-by: Karol Herbst Reviewed-by: Rhys Perry Reviewed-by: Ian Romanick Part-of: (cherry picked from commit 5398dd04bf62db100639d96c84a8c41041f4ad01) --- .pick_status.json | 2 +- src/compiler/nir/nir_lower_int64.c | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.pick_status.json b/.pick_status.json index 358dfc9c23d..9a60d5515d4 100644 --- a/.pick_status.json +++ b/.pick_status.json @@ -184,7 +184,7 @@ "description": "nir/lower_int64: fix shift lowering", "nominated": true, "nomination_type": 1, - "resolution": 0, + "resolution": 1, "main_sha": null, "because_sha": "41f3e9e5f5de5309821c266b76ccdd1b4d016ce8" }, diff --git a/src/compiler/nir/nir_lower_int64.c b/src/compiler/nir/nir_lower_int64.c index 26a8b30279a..d11049482f4 100644 --- a/src/compiler/nir/nir_lower_int64.c +++ b/src/compiler/nir/nir_lower_int64.c @@ -170,6 +170,8 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) * * uint64_t lshift(uint64_t x, int c) * { + * c %= 64; + * * if (c == 0) return x; * * uint32_t lo = LO(x), hi = HI(x); @@ -187,6 +189,7 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) */ nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + y = nir_iand_imm(b, y, 0x3f); nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32))); nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y); @@ -212,6 +215,8 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) * * uint64_t arshift(uint64_t x, int c) * { + * c %= 64; + * * if (c == 0) return x; * * uint32_t lo = LO(x); @@ -231,6 +236,7 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) */ nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + y = nir_iand_imm(b, y, 0x3f); nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32))); nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y); @@ -256,6 +262,8 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) * * uint64_t rshift(uint64_t x, int c) * { + * c %= 64; + * * if (c == 0) return x; * * uint32_t lo = LO(x), hi = HI(x); @@ -274,6 +282,7 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) nir_ssa_def *x_lo = nir_unpack_64_2x32_split_x(b, x); nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); + y = nir_iand_imm(b, y, 0x3f); nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32))); nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);