intel/compiler: round f2f16 correctly for RTNE case

v2: bcsel -> b2i32 (Ian)

Fixes upcoming Vulkan CTS tests:
dEQP-VK.spirv_assembly.instruction.compute.float_controls.fp16.input_args.rounding_rte_conv_from_fp64_up
dEQP-VK.spirv_assembly.instruction.compute.float_controls.fp16.input_args.rounding_rte_conv_from_fp64_up_nostorage
dEQP-VK.spirv_assembly.instruction.graphics.float_controls.fp16.input_args.rounding_rte_conv_from_fp64_up_vert
dEQP-VK.spirv_assembly.instruction.graphics.float_controls.fp16.input_args.rounding_rte_conv_from_fp64_up_nostorage_vert
dEQP-VK.spirv_assembly.instruction.graphics.float_controls.fp16.input_args.rounding_rte_conv_from_fp64_up_frag
dEQP-VK.spirv_assembly.instruction.graphics.float_controls.fp16.input_args.rounding_rte_conv_from_fp64_up_nostorage_frag

Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25281>
This commit is contained in:
Iván Briano 2023-09-14 18:09:07 -07:00 committed by Marge Bot
parent c8a8b09c15
commit 54498937c5
2 changed files with 62 additions and 4 deletions

View file

@ -1658,7 +1658,11 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
} while (progress);
OPT(brw_nir_lower_conversions);
if (OPT(brw_nir_lower_conversions)) {
if (OPT(nir_lower_int64)) {
brw_nir_optimize(nir, compiler);
}
}
if (is_scalar)
OPT(nir_lower_alu_to_scalar, NULL, NULL);

View file

@ -25,7 +25,8 @@
#include "compiler/nir/nir_builder.h"
static nir_rounding_mode
get_opcode_rounding_mode(nir_op op)
get_opcode_rounding_mode(nir_op op, nir_alu_type dst_type,
unsigned execution_mode)
{
switch (op) {
case nir_op_f2f16_rtz:
@ -33,7 +34,8 @@ get_opcode_rounding_mode(nir_op op)
case nir_op_f2f16_rtne:
return nir_rounding_mode_rtne;
default:
return nir_rounding_mode_undef;
return nir_get_rounding_mode_from_float_controls(execution_mode,
dst_type);
}
}
@ -45,6 +47,57 @@ split_conversion(nir_builder *b, nir_alu_instr *alu, nir_alu_type src_type,
b->cursor = nir_before_instr(&alu->instr);
nir_def *src = nir_ssa_for_alu_src(b, alu, 0);
nir_def *tmp = nir_type_convert(b, src, src_type, tmp_type, nir_rounding_mode_undef);
if (dst_type == nir_type_float16 && rnd == nir_rounding_mode_rtne) {
/* We round down from double to half float by going through float in
* between, but this can give us inaccurate results in some cases. One
* such case is 0x40ee6a0000000001, which should round to 0x7b9b, but
* going through float first turns into 0x7b9a instead. This is because
* the first non-fitting bit is set, so we get a tie, but with the least
* significant bit of the original number set, the tie should break
* rounding up. The cast to float, however, turns into 0x47735000, which
* when going to half still ties, but now we lost the tie-up bit, and
* instead we round to the nearest even, which in this case is down.
*
* To fix this, we check if the original would have tied, and if the tie
* would have rounded up, and if both are true, set the least
* significant bit of the intermediate float to 1, so that a tie on the
* next cast rounds up as well. If the rounding already got rid of the
* tie, that set bit will just be truncated anyway and the end result
* doesn't change.
*
* Another failing case is 0x40effdffffffffff. This one doesn't have the
* tie from double to half, so it just rounds down to 0x7bff (65504.0),
* but going through float first, it turns into 0x477ff000, which does
* have the tie bit for half set, and when that one gets rounded it
* turns into 0x7c00 (Infinity).
* The fix for that one is to make sure the intermediate float does not
* have the tie bit set if the original didn't have it.
*
* For the RTZ case, we don't need to do anything, as the intermediate
* float should be ok already.
*/
int significand_bits16 = 10;
int significand_bits32 = 23;
int significand_bits64 = 52;
int f64_to_16_tie_bit = significand_bits64 - significand_bits16 - 1;
int f32_to_16_tie_bit = significand_bits32 - significand_bits16 - 1;
uint64_t f64_rounds_up_mask = ((1ULL << f64_to_16_tie_bit) - 1);
nir_def *would_tie = nir_iand_imm(b, src, 1ULL << f64_to_16_tie_bit);
nir_def *would_rnd_up = nir_iand_imm(b, src, f64_rounds_up_mask);
nir_def *tie_up = nir_b2i32(b, nir_ine_imm(b, would_rnd_up, 0));
nir_def *break_tie = nir_bcsel(b,
nir_ine_imm(b, would_tie, 0),
nir_imm_int(b, ~0),
nir_imm_int(b, ~(1U << f32_to_16_tie_bit)));
tmp = nir_ior(b, tmp, tie_up);
tmp = nir_iand(b, tmp, break_tie);
}
nir_def *res = nir_type_convert(b, tmp, tmp_type, dst_type, rnd);
nir_def_rewrite_uses(&alu->def, res);
nir_instr_remove(&alu->instr);
@ -78,7 +131,8 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
(src_bit_size == 64 && dst_full_type == nir_type_float16)) {
split_conversion(b, alu, src_type, nir_type_float | 32,
dst_type | dst_bit_size,
get_opcode_rounding_mode(alu->op));
get_opcode_rounding_mode(alu->op, dst_full_type,
b->shader->info.float_controls_execution_mode));
return true;
}