mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-03-16 08:20:36 +01:00
nir: use imm-helpers
We have to use 1ull instead of 1u because MSVC is stupid... Reviewed-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23855>
This commit is contained in:
parent
0d8c458e8f
commit
afa79cd9b8
12 changed files with 77 additions and 72 deletions
|
|
@ -232,7 +232,7 @@ nir_select(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *s)
|
|||
{
|
||||
if (s->num_components != 1) {
|
||||
uint64_t mask = 1ull << (s->bit_size - 1);
|
||||
s = nir_iand(b, s, nir_imm_intN_t(b, mask, s->bit_size));
|
||||
s = nir_iand_imm(b, s, mask);
|
||||
}
|
||||
return nir_bcsel(b, nir_ieq_imm(b, s, 0), x, y);
|
||||
}
|
||||
|
|
@ -247,8 +247,9 @@ static inline nir_ssa_def *
|
|||
nir_clz_u(nir_builder *b, nir_ssa_def *a)
|
||||
{
|
||||
nir_ssa_def *val;
|
||||
val = nir_isub(b, nir_imm_intN_t(b, a->bit_size - 1, 32),
|
||||
nir_ufind_msb(b, nir_u2uN(b, a, MAX2(a->bit_size, 32))));
|
||||
val = nir_isub_imm(b, a->bit_size - 1,
|
||||
nir_ufind_msb(b, nir_u2uN(b, a,
|
||||
MAX2(a->bit_size, 32))));
|
||||
return nir_u2uN(b, val, a->bit_size);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,9 +29,9 @@ static inline nir_ssa_def *
|
|||
nir_shift_imm(nir_builder *b, nir_ssa_def *value, int left_shift)
|
||||
{
|
||||
if (left_shift > 0)
|
||||
return nir_ishl(b, value, nir_imm_int(b, left_shift));
|
||||
return nir_ishl_imm(b, value, left_shift);
|
||||
else if (left_shift < 0)
|
||||
return nir_ushr(b, value, nir_imm_int(b, -left_shift));
|
||||
return nir_ushr_imm(b, value, -left_shift);
|
||||
else
|
||||
return value;
|
||||
}
|
||||
|
|
@ -49,7 +49,7 @@ static inline nir_ssa_def *
|
|||
nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
|
||||
uint32_t mask, int left_shift)
|
||||
{
|
||||
return nir_shift_imm(b, nir_iand(b, src, nir_imm_int(b, mask)), left_shift);
|
||||
return nir_shift_imm(b, nir_iand_imm(b, src, mask), left_shift);
|
||||
}
|
||||
|
||||
static inline nir_ssa_def *
|
||||
|
|
@ -78,8 +78,11 @@ nir_format_sign_extend_ivec(nir_builder *b, nir_ssa_def *src,
|
|||
assert(src->num_components <= 4);
|
||||
nir_ssa_def *comps[4];
|
||||
for (unsigned i = 0; i < src->num_components; i++) {
|
||||
nir_ssa_def *shift = nir_imm_int(b, src->bit_size - bits[i]);
|
||||
comps[i] = nir_ishr(b, nir_ishl(b, nir_channel(b, src, i), shift), shift);
|
||||
unsigned shift = src->bit_size - bits[i];
|
||||
comps[i] = nir_ishr_imm(b, nir_ishl_imm(b,
|
||||
nir_channel(b, src, i),
|
||||
shift),
|
||||
shift);
|
||||
}
|
||||
return nir_vec(b, comps, src->num_components);
|
||||
}
|
||||
|
|
@ -106,12 +109,12 @@ nir_format_unpack_int(nir_builder *b, nir_ssa_def *packed,
|
|||
assert(bits[i] < bit_size);
|
||||
assert(offset + bits[i] <= bit_size);
|
||||
nir_ssa_def *chan = nir_channel(b, packed, next_chan);
|
||||
nir_ssa_def *lshift = nir_imm_int(b, bit_size - (offset + bits[i]));
|
||||
nir_ssa_def *rshift = nir_imm_int(b, bit_size - bits[i]);
|
||||
unsigned lshift = bit_size - (offset + bits[i]);
|
||||
unsigned rshift = bit_size - bits[i];
|
||||
if (sign_extend)
|
||||
comps[i] = nir_ishr(b, nir_ishl(b, chan, lshift), rshift);
|
||||
comps[i] = nir_ishr_imm(b, nir_ishl_imm(b, chan, lshift), rshift);
|
||||
else
|
||||
comps[i] = nir_ushr(b, nir_ishl(b, chan, lshift), rshift);
|
||||
comps[i] = nir_ushr_imm(b, nir_ishl_imm(b, chan, lshift), rshift);
|
||||
offset += bits[i];
|
||||
if (offset >= bit_size) {
|
||||
next_chan++;
|
||||
|
|
@ -194,8 +197,8 @@ nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
|
|||
unsigned shift = 0;
|
||||
unsigned dst_idx = 0;
|
||||
for (unsigned i = 0; i < src->num_components; i++) {
|
||||
nir_ssa_def *shifted = nir_ishl(b, nir_channel(b, src, i),
|
||||
nir_imm_int(b, shift));
|
||||
nir_ssa_def *shifted = nir_ishl_imm(b, nir_channel(b, src, i),
|
||||
shift);
|
||||
if (shift == 0) {
|
||||
dst_chan[dst_idx] = shifted;
|
||||
} else {
|
||||
|
|
@ -209,14 +212,16 @@ nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
nir_ssa_def *mask = nir_imm_int(b, ~0u >> (32 - dst_bits));
|
||||
unsigned mask = ~0u >> (32 - dst_bits);
|
||||
|
||||
unsigned src_idx = 0;
|
||||
unsigned shift = 0;
|
||||
for (unsigned i = 0; i < dst_components; i++) {
|
||||
dst_chan[i] = nir_iand(b, nir_ushr_imm(b, nir_channel(b, src, src_idx),
|
||||
shift),
|
||||
mask);
|
||||
dst_chan[i] = nir_iand_imm(b,
|
||||
nir_ushr_imm(b,
|
||||
nir_channel(b, src, src_idx),
|
||||
shift),
|
||||
mask);
|
||||
shift += dst_bits;
|
||||
if (shift >= src_bits) {
|
||||
src_idx++;
|
||||
|
|
@ -421,27 +426,27 @@ nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
|
|||
nir_channel(b, clamped, 2)));
|
||||
|
||||
/* maxrgb.u += maxrgb.u & (1 << (23-9)); */
|
||||
maxu = nir_iadd(b, maxu, nir_iand(b, maxu, nir_imm_int(b, 1 << 14)));
|
||||
maxu = nir_iadd(b, maxu, nir_iand_imm(b, maxu, 1 << 14));
|
||||
|
||||
/* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
|
||||
* 1 + RGB9E5_EXP_BIAS - 127;
|
||||
*/
|
||||
nir_ssa_def *exp_shared =
|
||||
nir_iadd(b, nir_umax(b, nir_ushr_imm(b, maxu, 23),
|
||||
nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
|
||||
nir_imm_int(b, 1 + RGB9E5_EXP_BIAS - 127));
|
||||
nir_iadd_imm(b, nir_umax(b, nir_ushr_imm(b, maxu, 23),
|
||||
nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
|
||||
1 + RGB9E5_EXP_BIAS - 127);
|
||||
|
||||
/* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
|
||||
* RGB9E5_MANTISSA_BITS) + 1;
|
||||
*/
|
||||
nir_ssa_def *revdenom_biasedexp =
|
||||
nir_isub(b, nir_imm_int(b, 127 + RGB9E5_EXP_BIAS +
|
||||
RGB9E5_MANTISSA_BITS + 1),
|
||||
exp_shared);
|
||||
nir_isub_imm(b, 127 + RGB9E5_EXP_BIAS +
|
||||
RGB9E5_MANTISSA_BITS + 1,
|
||||
exp_shared);
|
||||
|
||||
/* revdenom.u = revdenom_biasedexp << 23; */
|
||||
nir_ssa_def *revdenom =
|
||||
nir_ishl(b, revdenom_biasedexp, nir_imm_int(b, 23));
|
||||
nir_ishl_imm(b, revdenom_biasedexp, 23);
|
||||
|
||||
/* rm = (int) (rc.f * revdenom.f);
|
||||
* gm = (int) (gc.f * revdenom.f);
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ lower_alu_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
|
|||
nir_ssa_def *src0_32 = nir_type_convert(b, src0, base_type, base_type | 32, nir_rounding_mode_undef);
|
||||
nir_ssa_def *src1_32 = nir_type_convert(b, src1, base_type, base_type | 32, nir_rounding_mode_undef);
|
||||
nir_ssa_def *dest_32 = nir_imul(b, src0_32, src1_32);
|
||||
nir_ssa_def *dest_shifted = nir_ishr(b, dest_32, nir_imm_int(b, src0->bit_size));
|
||||
nir_ssa_def *dest_shifted = nir_ishr_imm(b, dest_32, src0->bit_size);
|
||||
lowered = nir_type_convert(b, dest_shifted, base_type, base_type | src0->bit_size, nir_rounding_mode_undef);
|
||||
} else {
|
||||
nir_ssa_def *cshift = nir_imm_int(b, src0->bit_size / 2);
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ get_signed_inf(nir_builder *b, nir_ssa_def *zero)
|
|||
* the low 32 bits are always 0 so we can construct the correct high 32
|
||||
* bits and then pack it together with zero low 32 bits.
|
||||
*/
|
||||
nir_ssa_def *inf_hi = nir_ior(b, nir_imm_int(b, 0x7ff00000), zero_hi);
|
||||
nir_ssa_def *inf_hi = nir_ior_imm(b, zero_hi, 0x7ff00000);
|
||||
return nir_pack_64_2x32_split(b, nir_imm_int(b, 0), inf_hi);
|
||||
}
|
||||
|
||||
|
|
@ -125,8 +125,8 @@ lower_rcp(nir_builder *b, nir_ssa_def *src)
|
|||
* small below.
|
||||
*/
|
||||
nir_ssa_def *new_exp = nir_isub(b, get_exponent(b, ra),
|
||||
nir_isub(b, get_exponent(b, src),
|
||||
nir_imm_int(b, 1023)));
|
||||
nir_iadd_imm(b, get_exponent(b, src),
|
||||
-1023));
|
||||
|
||||
ra = set_exponent(b, ra, new_exp);
|
||||
|
||||
|
|
@ -174,14 +174,13 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
|
|||
* shifting right by 1.
|
||||
*/
|
||||
|
||||
nir_ssa_def *unbiased_exp = nir_isub(b, get_exponent(b, src),
|
||||
nir_imm_int(b, 1023));
|
||||
nir_ssa_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
|
||||
-1023);
|
||||
nir_ssa_def *even = nir_iand_imm(b, unbiased_exp, 1);
|
||||
nir_ssa_def *half = nir_ishr_imm(b, unbiased_exp, 1);
|
||||
|
||||
nir_ssa_def *src_norm = set_exponent(b, src,
|
||||
nir_iadd(b, nir_imm_int(b, 1023),
|
||||
even));
|
||||
nir_iadd_imm(b, even, 1023));
|
||||
|
||||
nir_ssa_def *ra = nir_f2f64(b, nir_frsq(b, nir_f2f32(b, src_norm)));
|
||||
nir_ssa_def *new_exp = nir_isub(b, get_exponent(b, ra), half);
|
||||
|
|
@ -281,7 +280,7 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
|
|||
nir_ssa_def *r_1 = nir_ffma(b, nir_fneg(b, g_1), g_1, src);
|
||||
res = nir_ffma(b, h_1, r_1, g_1);
|
||||
} else {
|
||||
nir_ssa_def *y_1 = nir_fmul(b, nir_imm_double(b, 2.0), h_1);
|
||||
nir_ssa_def *y_1 = nir_fmul_imm(b, h_1, 2.0);
|
||||
nir_ssa_def *r_1 = nir_ffma(b, nir_fneg(b, y_1), nir_fmul(b, h_1, src),
|
||||
one_half);
|
||||
res = nir_ffma(b, y_1, r_1, y_1);
|
||||
|
|
@ -315,10 +314,10 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt)
|
|||
static nir_ssa_def *
|
||||
lower_trunc(nir_builder *b, nir_ssa_def *src)
|
||||
{
|
||||
nir_ssa_def *unbiased_exp = nir_isub(b, get_exponent(b, src),
|
||||
nir_imm_int(b, 1023));
|
||||
nir_ssa_def *unbiased_exp = nir_iadd_imm(b, get_exponent(b, src),
|
||||
-1023);
|
||||
|
||||
nir_ssa_def *frac_bits = nir_isub(b, nir_imm_int(b, 52), unbiased_exp);
|
||||
nir_ssa_def *frac_bits = nir_isub_imm(b, 52, unbiased_exp);
|
||||
|
||||
/*
|
||||
* Decide the operation to apply depending on the unbiased exponent:
|
||||
|
|
@ -348,7 +347,7 @@ lower_trunc(nir_builder *b, nir_ssa_def *src)
|
|||
nir_imm_int(b, ~0),
|
||||
nir_ishl(b,
|
||||
nir_imm_int(b, ~0),
|
||||
nir_isub(b, frac_bits, nir_imm_int(b, 32))));
|
||||
nir_iadd_imm(b, frac_bits, -32)));
|
||||
|
||||
nir_ssa_def *src_lo = nir_unpack_64_2x32_split_x(b, src);
|
||||
nir_ssa_def *src_hi = nir_unpack_64_2x32_split_y(b, src);
|
||||
|
|
@ -393,7 +392,7 @@ lower_ceil(nir_builder *b, nir_ssa_def *src)
|
|||
return nir_bcsel(b,
|
||||
nir_ior(b, negative, nir_feq(b, src, tr)),
|
||||
tr,
|
||||
nir_fadd(b, tr, nir_imm_double(b, 1.0)));
|
||||
nir_fadd_imm(b, tr, 1.0));
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
|
|
@ -407,8 +406,8 @@ lower_round_even(nir_builder *b, nir_ssa_def *src)
|
|||
{
|
||||
/* Add and subtract 2**52 to round off any fractional bits. */
|
||||
nir_ssa_def *two52 = nir_imm_double(b, (double)(1ull << 52));
|
||||
nir_ssa_def *sign = nir_iand(b, nir_unpack_64_2x32_split_y(b, src),
|
||||
nir_imm_int(b, 1ull << 31));
|
||||
nir_ssa_def *sign = nir_iand_imm(b, nir_unpack_64_2x32_split_y(b, src),
|
||||
1ull << 31);
|
||||
|
||||
b->exact = true;
|
||||
nir_ssa_def *res = nir_fsub(b, nir_fadd(b, nir_fabs(b, src), two52), two52);
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
|
|||
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
|
||||
y = nir_iand_imm(b, y, 0x3f);
|
||||
|
||||
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
|
||||
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
|
||||
nir_ssa_def *lo_shifted = nir_ishl(b, x_lo, y);
|
||||
nir_ssa_def *hi_shifted = nir_ishl(b, x_hi, y);
|
||||
nir_ssa_def *lo_shifted_hi = nir_ushr(b, x_lo, reverse_count);
|
||||
|
|
@ -230,7 +230,7 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
|
|||
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
|
||||
y = nir_iand_imm(b, y, 0x3f);
|
||||
|
||||
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
|
||||
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
|
||||
nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
|
||||
nir_ssa_def *hi_shifted = nir_ishr(b, x_hi, y);
|
||||
nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
|
||||
|
|
@ -276,7 +276,7 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y)
|
|||
nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x);
|
||||
y = nir_iand_imm(b, y, 0x3f);
|
||||
|
||||
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd(b, y, nir_imm_int(b, -32)));
|
||||
nir_ssa_def *reverse_count = nir_iabs(b, nir_iadd_imm(b, y, -32));
|
||||
nir_ssa_def *lo_shifted = nir_ushr(b, x_lo, y);
|
||||
nir_ssa_def *hi_shifted = nir_ushr(b, x_hi, y);
|
||||
nir_ssa_def *hi_shifted_lo = nir_ishl(b, x_hi, reverse_count);
|
||||
|
|
@ -537,9 +537,9 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
|
|||
* quot.y |= 1U << i;
|
||||
* }
|
||||
*/
|
||||
nir_ssa_def *d_shift = nir_ishl(b, d_lo, nir_imm_int(b, i));
|
||||
nir_ssa_def *d_shift = nir_ishl_imm(b, d_lo, i);
|
||||
nir_ssa_def *new_n_hi = nir_isub(b, n_hi, d_shift);
|
||||
nir_ssa_def *new_q_hi = nir_ior(b, q_hi, nir_imm_int(b, 1u << i));
|
||||
nir_ssa_def *new_q_hi = nir_ior_imm(b, q_hi, 1ull << i);
|
||||
nir_ssa_def *cond = nir_iand(b, need_high_div,
|
||||
nir_uge(b, n_hi, d_shift));
|
||||
if (i != 0) {
|
||||
|
|
@ -567,9 +567,9 @@ lower_udiv64_mod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d,
|
|||
* quot.x |= 1U << i;
|
||||
* }
|
||||
*/
|
||||
nir_ssa_def *d_shift = nir_ishl(b, d, nir_imm_int(b, i));
|
||||
nir_ssa_def *d_shift = nir_ishl_imm(b, d, i);
|
||||
nir_ssa_def *new_n = nir_isub(b, n, d_shift);
|
||||
nir_ssa_def *new_q_lo = nir_ior(b, q_lo, nir_imm_int(b, 1u << i));
|
||||
nir_ssa_def *new_q_lo = nir_ior_imm(b, q_lo, 1ull << i);
|
||||
nir_ssa_def *cond = nir_uge(b, n, d_shift);
|
||||
if (i != 0) {
|
||||
/* log2_denom is always <= 31, so we don't need to bother with it
|
||||
|
|
@ -683,7 +683,7 @@ lower_ufind_msb64(nir_builder *b, nir_ssa_def *x)
|
|||
|
||||
if (b->shader->options->lower_uadd_sat) {
|
||||
nir_ssa_def *valid_hi_bits = nir_ine_imm(b, x_hi, 0);
|
||||
nir_ssa_def *hi_res = nir_iadd(b, nir_imm_intN_t(b, 32, 32), hi_count);
|
||||
nir_ssa_def *hi_res = nir_iadd_imm(b, hi_count, 32);
|
||||
return nir_bcsel(b, valid_hi_bits, hi_res, lo_count);
|
||||
} else {
|
||||
/* If hi_count was -1, it will still be -1 after this uadd_sat. As a
|
||||
|
|
@ -712,7 +712,7 @@ lower_find_lsb64(nir_builder *b, nir_ssa_def *x)
|
|||
/* Use umin so that -1 (no bits found) becomes larger (0xFFFFFFFF)
|
||||
* than any actual bit position, so we return a found bit instead.
|
||||
*/
|
||||
return nir_umin(b, lo_lsb, nir_iadd(b, hi_lsb, nir_imm_int(b, 32)));
|
||||
return nir_umin(b, lo_lsb, nir_iadd_imm(b, hi_lsb, 32));
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
|
|
@ -746,7 +746,7 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
|
|||
}
|
||||
|
||||
nir_ssa_def *discard =
|
||||
nir_imax(b, nir_isub(b, exp, nir_imm_int(b, significand_bits)),
|
||||
nir_imax(b, nir_iadd_imm(b, exp, -significand_bits),
|
||||
nir_imm_int(b, 0));
|
||||
nir_ssa_def *significand = COND_LOWER_OP(b, ushr, x, discard);
|
||||
if (significand_bits < 32)
|
||||
|
|
@ -783,7 +783,7 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
|
|||
* unrounded input manually.
|
||||
*/
|
||||
nir_ssa_def *shift =
|
||||
nir_imax(b, nir_isub(b, nir_imm_int(b, significand_bits), exp),
|
||||
nir_imax(b, nir_isub_imm(b, significand_bits, exp),
|
||||
nir_imm_int(b, 0));
|
||||
significand = COND_LOWER_OP(b, ishl, significand, shift);
|
||||
|
||||
|
|
@ -806,7 +806,7 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
|
|||
*/
|
||||
nir_ssa_def *biased_exp = nir_bcsel(b, nir_ilt_imm(b, exp, 0),
|
||||
nir_imm_int(b, 0),
|
||||
nir_iadd(b, exp, nir_imm_int(b, 1023)));
|
||||
nir_iadd_imm(b, exp, 1023));
|
||||
|
||||
/* Pack the significand and exponent manually. */
|
||||
nir_ssa_def *lo = nir_unpack_64_2x32_split_x(b, significand);
|
||||
|
|
@ -1243,10 +1243,10 @@ lower_scan_iadd64(nir_builder *b, const nir_intrinsic_instr *intrin)
|
|||
nir_ssa_def *x_low =
|
||||
nir_u2u32(b, nir_iand_imm(b, x, 0xffffff));
|
||||
nir_ssa_def *x_mid =
|
||||
nir_u2u32(b, nir_iand_imm(b, nir_ushr(b, x, nir_imm_int(b, 24)),
|
||||
nir_u2u32(b, nir_iand_imm(b, nir_ushr_imm(b, x, 24),
|
||||
0xffffff));
|
||||
nir_ssa_def *x_hi =
|
||||
nir_u2u32(b, nir_ushr(b, x, nir_imm_int(b, 48)));
|
||||
nir_u2u32(b, nir_ushr_imm(b, x, 48));
|
||||
|
||||
nir_ssa_def *scan_low =
|
||||
build_scan_intrinsic(b, intrin->intrinsic, nir_op_iadd,
|
||||
|
|
|
|||
|
|
@ -961,7 +961,7 @@ build_runtime_addr_mode_check(nir_builder *b, nir_ssa_def *addr,
|
|||
case nir_address_format_62bit_generic: {
|
||||
assert(addr->num_components == 1);
|
||||
assert(addr->bit_size == 64);
|
||||
nir_ssa_def *mode_enum = nir_ushr(b, addr, nir_imm_int(b, 62));
|
||||
nir_ssa_def *mode_enum = nir_ushr_imm(b, addr, 62);
|
||||
switch (mode) {
|
||||
case nir_var_function_temp:
|
||||
case nir_var_shader_temp:
|
||||
|
|
|
|||
|
|
@ -76,8 +76,9 @@ lower_tex_src_to_offset(nir_builder *b,
|
|||
}
|
||||
|
||||
index = nir_iadd(b, index,
|
||||
nir_imul(b, nir_imm_int(b, array_elements),
|
||||
nir_ssa_for_src(b, deref->arr.index, 1)));
|
||||
nir_imul_imm(b,
|
||||
nir_ssa_for_src(b, deref->arr.index, 1),
|
||||
array_elements));
|
||||
}
|
||||
|
||||
array_elements *= glsl_get_length(parent->type);
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
break;
|
||||
case nir_intrinsic_quad_broadcast:
|
||||
assert(intrin->src[1].is_ssa);
|
||||
index = nir_ior(b, nir_iand(b, index, nir_imm_int(b, ~0x3)),
|
||||
index = nir_ior(b, nir_iand_imm(b, index, ~0x3),
|
||||
intrin->src[1].ssa);
|
||||
break;
|
||||
case nir_intrinsic_quad_swap_horizontal:
|
||||
|
|
@ -600,8 +600,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin,
|
|||
}
|
||||
|
||||
if (i)
|
||||
dst = nir_bcsel(b, nir_ieq(b, intrin->src[1].ssa,
|
||||
nir_src_for_ssa(nir_imm_int(b, i)).ssa),
|
||||
dst = nir_bcsel(b, nir_ieq_imm(b, intrin->src[1].ssa, i),
|
||||
qbcst_dst, dst);
|
||||
else
|
||||
dst = qbcst_dst;
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
|
|||
if (intr->intrinsic == nir_intrinsic_load_ubo &&
|
||||
!b->shader->info.first_ubo_is_default_ubo) {
|
||||
nir_ssa_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
|
||||
nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
|
||||
nir_ssa_def *new_idx = nir_iadd_imm(b, old_idx, 1);
|
||||
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
|
||||
nir_src_for_ssa(new_idx));
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -55,10 +55,9 @@ build_umod(nir_builder *b, nir_ssa_def *n, uint64_t d)
|
|||
if (d == 0) {
|
||||
return nir_imm_intN_t(b, 0, n->bit_size);
|
||||
} else if (util_is_power_of_two_or_zero64(d)) {
|
||||
return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
|
||||
return nir_iand_imm(b, n, d - 1);
|
||||
} else {
|
||||
return nir_isub(b, n, nir_imul(b, build_udiv(b, n, d),
|
||||
nir_imm_intN_t(b, d, n->bit_size)));
|
||||
return nir_isub(b, n, nir_imul_imm(b, build_udiv(b, n, d), d));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -115,8 +114,7 @@ build_irem(nir_builder *b, nir_ssa_def *n, int64_t d)
|
|||
nir_iadd_imm(b, n, d - 1), n);
|
||||
return nir_isub(b, n, nir_iand_imm(b, tmp, -d));
|
||||
} else {
|
||||
return nir_isub(b, n, nir_imul(b, build_idiv(b, n, d),
|
||||
nir_imm_intN_t(b, d, n->bit_size)));
|
||||
return nir_isub(b, n, nir_imul_imm(b, build_idiv(b, n, d), d));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -133,7 +131,7 @@ build_imod(nir_builder *b, nir_ssa_def *n, int64_t d)
|
|||
nir_ssa_def *is_zero = nir_ieq_imm(b, n, 0);
|
||||
return nir_bcsel(b, nir_ior(b, is_neg_not_int_min, is_zero), n, nir_iadd(b, int_min_def, n));
|
||||
} else if (d > 0 && util_is_power_of_two_or_zero64(d)) {
|
||||
return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
|
||||
return nir_iand_imm(b, n, d - 1);
|
||||
} else if (d < 0 && util_is_power_of_two_or_zero64(-d)) {
|
||||
nir_ssa_def *d_def = nir_imm_intN_t(b, d, n->bit_size);
|
||||
nir_ssa_def *res = nir_ior(b, n, d_def);
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@
|
|||
#include "nir.h"
|
||||
#include "nir_builder.h"
|
||||
|
||||
#include "util/u_math.h"
|
||||
|
||||
static bool
|
||||
nir_scale_fdiv_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
|
||||
{
|
||||
|
|
@ -39,8 +41,8 @@ nir_scale_fdiv_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
|
|||
nir_ssa_def *orig_a = nir_ssa_for_alu_src(b, alu, 0);
|
||||
nir_ssa_def *orig_b = nir_ssa_for_alu_src(b, alu, 1);
|
||||
nir_ssa_def *fabs = nir_fabs(b, orig_b);
|
||||
nir_ssa_def *big = nir_flt(b, nir_imm_int(b, 0x7e800000), fabs);
|
||||
nir_ssa_def *small = nir_flt(b, fabs, nir_imm_int(b, 0x00800000));
|
||||
nir_ssa_def *big = nir_fgt_imm(b, fabs, uif(0x7e800000));
|
||||
nir_ssa_def *small = nir_flt_imm(b, fabs, uif(0x00800000));
|
||||
|
||||
nir_ssa_def *scaled_down_a = nir_fmul_imm(b, orig_a, 0.25);
|
||||
nir_ssa_def *scaled_down_b = nir_fmul_imm(b, orig_b, 0.25);
|
||||
|
|
|
|||
|
|
@ -2264,7 +2264,7 @@ TEST_F(nir_split_vars_test, twolevel_dont_split_lvl_1)
|
|||
nir_deref_instr *level0 = nir_build_deref_array_imm(b, temp_deref, i);
|
||||
for (int j = 0; j < 6; j++) {
|
||||
/* just add the inner index to get some different derefs */
|
||||
nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd(b, &ind_deref->dest.ssa, nir_imm_int(b, j)));
|
||||
nir_deref_instr *level1 = nir_build_deref_array(b, level0, nir_iadd_imm(b, &ind_deref->dest.ssa, j));
|
||||
nir_store_deref(b, level1, nir_load_var(b, in[i]), 1);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue