diff --git a/src/panfrost/bifrost/bi_pack.c b/src/panfrost/bifrost/bi_pack.c index aa0db55ab5f..76b4de791e9 100644 --- a/src/panfrost/bifrost/bi_pack.c +++ b/src/panfrost/bifrost/bi_pack.c @@ -23,7 +23,7 @@ #include "compiler.h" #include "bi_print.h" -#include "bi_pack_helpers.h" +#include "bi_generated_pack.h" #define RETURN_PACKED(str) { \ uint64_t temp = 0; \ @@ -402,1209 +402,194 @@ bi_pack_registers(bi_registers regs) return packed; } -/* Constructs a packed 2-bit swizzle for a 16-bit vec2 source. Source must be - * 16-bit and written components must correspond to valid swizzles (component x - * or y). */ - -static unsigned -bi_swiz16(bi_instruction *ins, unsigned src) -{ - assert(nir_alu_type_get_type_size(ins->src_types[src]) == 16); - unsigned swizzle = 0; - - for (unsigned c = 0; c < 2; ++c) { - if (!bi_writes_component(ins, src)) continue; - - unsigned k = ins->swizzle[src][c]; - assert(k <= 1); - swizzle |= (k << c); - } - - return swizzle; -} - -static unsigned -bi_pack_fma_fma(bi_instruction *ins, bi_registers *regs) -{ - /* (-a)(-b) = ab, so we only need one negate bit */ - bool negate_mul = ins->src_neg[0] ^ ins->src_neg[1]; - - if (ins->op.mscale) { - assert(!(ins->src_abs[0] && ins->src_abs[1])); - assert(!ins->src_abs[2] || !ins->src_neg[3] || !ins->src_abs[3]); - - /* We can have exactly one abs, and can flip the multiplication - * to make it fit if we have to */ - bool flip_ab = ins->src_abs[1]; - - struct bifrost_fma_mscale pack = { - .src0 = bi_get_src(ins, regs, flip_ab ? 1 : 0), - .src1 = bi_get_src(ins, regs, flip_ab ? 0 : 1), - .src2 = bi_get_src(ins, regs, 2), - .src3 = bi_get_src(ins, regs, 3), - .mscale_mode = 0, - .mode = ins->outmod, - .src0_abs = ins->src_abs[0] || ins->src_abs[1], - .src1_neg = negate_mul, - .src2_neg = ins->src_neg[2], - .op = BIFROST_FMA_OP_MSCALE, - }; - - RETURN_PACKED(pack); - } else if (ins->dest_type == nir_type_float32) { - struct bifrost_fma_fma pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src2 = bi_get_src(ins, regs, 2), - .src0_abs = ins->src_abs[0], - .src1_abs = ins->src_abs[1], - .src2_abs = ins->src_abs[2], - .src0_neg = negate_mul, - .src2_neg = ins->src_neg[2], - .outmod = ins->outmod, - .roundmode = ins->roundmode, - .op = BIFROST_FMA_OP_FMA - }; - - RETURN_PACKED(pack); - } else if (ins->dest_type == nir_type_float16) { - struct bifrost_fma_fma16 pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src2 = bi_get_src(ins, regs, 2), - .swizzle_0 = bi_swiz16(ins, 0), - .swizzle_1 = bi_swiz16(ins, 1), - .swizzle_2 = bi_swiz16(ins, 2), - .src0_neg = negate_mul, - .src2_neg = ins->src_neg[2], - .outmod = ins->outmod, - .roundmode = ins->roundmode, - .op = BIFROST_FMA_OP_FMA16 - }; - - RETURN_PACKED(pack); - } else { - unreachable("Invalid fma dest type"); - } -} - -static unsigned -bi_pack_fma_addmin_f32(bi_instruction *ins, bi_registers *regs) -{ - unsigned op = - (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD32 : - (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN32 : - BIFROST_FMA_OP_FMAX32; - - struct bifrost_fma_add pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src0_abs = ins->src_abs[0], - .src1_abs = ins->src_abs[1], - .src0_neg = ins->src_neg[0], - .src1_neg = ins->src_neg[1], - .unk = 0x0, - .outmod = ins->outmod, - .roundmode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax, - .op = op - }; - - RETURN_PACKED(pack); -} - -static bool -bi_pack_fp16_abs(bi_instruction *ins, bi_registers *regs, bool *flip) -{ - /* Absolute values are packed in a quirky way. Let k = src1 < src0. Let - * l be an auxiliary bit we encode. Then the hardware determines: - * - * abs0 = l || k - * abs1 = l && k - * - * Since add/min/max are commutative, this saves a bit by using the - * order of the operands as a bit (k). To pack this, first note: - * - * (l && k) implies (l || k). - * - * That is, if the second argument is abs'd, then the first argument - * also has abs. So there are three cases: - * - * Case 0: Neither src has absolute value. Then we have l = k = 0. - * - * Case 1: Exactly one src has absolute value. Assign that source to - * src0 and the other source to src1. Compute k = src1 < src0 based on - * that assignment. Then l = ~k. - * - * Case 2: Both sources have absolute value. Then we have l = k = 1. - * Note to force k = 1 requires that (src1 < src0) OR (src0 < src1). - * That is, this encoding is only valid if src1 and src0 are distinct. - * This is a scheduling restriction (XXX); if an op of this type - * requires both identical sources to have abs value, then we must - * schedule to ADD (which does not use this ordering trick). - */ - - unsigned abs_0 = ins->src_abs[0], abs_1 = ins->src_abs[1]; - unsigned src_0 = bi_get_src(ins, regs, 0); - unsigned src_1 = bi_get_src(ins, regs, 1); - - assert(!(abs_0 && abs_1 && src_0 == src_1)); - - if (!abs_0 && !abs_1) { - /* Force k = 0 <===> NOT(src1 < src0) */ - *flip = (src_1 < src_0); - return false; - } else if (abs_0 && !abs_1) { - return src_1 >= src_0; - } else if (abs_1 && !abs_0) { - *flip = true; - return src_0 >= src_1; - } else { - *flip = !(src_1 < src_0); - return true; - } -} - -static unsigned -bi_pack_fmadd_min_f16(bi_instruction *ins, bi_registers *regs, bool FMA) -{ - unsigned op = - (!FMA) ? ((ins->op.minmax == BI_MINMAX_MIN) ? - BIFROST_ADD_OP_FMIN16 : BIFROST_ADD_OP_FMAX16) : - (ins->type == BI_ADD) ? BIFROST_FMA_OP_FADD16 : - (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_FMA_OP_FMIN16 : - BIFROST_FMA_OP_FMAX16; - - bool flip = false; - bool l = bi_pack_fp16_abs(ins, regs, &flip); - unsigned src_0 = bi_get_src(ins, regs, 0); - unsigned src_1 = bi_get_src(ins, regs, 1); - - if (FMA) { - struct bifrost_fma_add_minmax16 pack = { - .src0 = flip ? src_1 : src_0, - .src1 = flip ? src_0 : src_1, - .src0_neg = ins->src_neg[flip ? 1 : 0], - .src1_neg = ins->src_neg[flip ? 0 : 1], - .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0), - .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1), - .abs1 = l, - .outmod = ins->outmod, - .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax, - .op = op - }; - - RETURN_PACKED(pack); - } else { - /* Can't have modes for fp16 */ - assert(ins->outmod == 0); - - struct bifrost_add_fmin16 pack = { - .src0 = flip ? src_1 : src_0, - .src1 = flip ? src_0 : src_1, - .src0_neg = ins->src_neg[flip ? 1 : 0], - .src1_neg = ins->src_neg[flip ? 0 : 1], - .abs1 = l, - .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0), - .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1), - .mode = ins->minmax, - .op = op - }; - - RETURN_PACKED(pack); - } -} - -static unsigned -bi_pack_fma_addmin(bi_instruction *ins, bi_registers *regs) -{ - if (ins->dest_type == nir_type_float32) - return bi_pack_fma_addmin_f32(ins, regs); - else if(ins->dest_type == nir_type_float16) - return bi_pack_fmadd_min_f16(ins, regs, true); - else - unreachable("Unknown FMA/ADD type"); -} - -static unsigned -bi_pack_fma_1src(bi_instruction *ins, bi_registers *regs, unsigned op) -{ - struct bifrost_fma_inst pack = { - .src0 = bi_get_src(ins, regs, 0), - .op = op - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_fma_2src(bi_instruction *ins, bi_registers *regs, unsigned op) -{ - struct bifrost_fma_2src pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .op = op - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_1src(bi_instruction *ins, bi_registers *regs, unsigned op) -{ - struct bifrost_add_inst pack = { - .src0 = bi_get_src(ins, regs, 0), - .op = op - }; - - RETURN_PACKED(pack); -} - -static enum bifrost_csel_cond -bi_cond_to_csel(enum bi_cond cond, bool *flip, bool *invert, nir_alu_type T) -{ - nir_alu_type B = nir_alu_type_get_base_type(T); - unsigned idx = (B == nir_type_float) ? 0 : - ((B == nir_type_int) ? 1 : 2); - - switch (cond){ - case BI_COND_LT: - *flip = true; - /* fallthrough */ - case BI_COND_GT: { - const enum bifrost_csel_cond ops[] = { - BIFROST_FGT_F, - BIFROST_IGT_I, - BIFROST_UGT_I - }; - - return ops[idx]; - } - case BI_COND_LE: - *flip = true; - /* fallthrough */ - case BI_COND_GE: { - const enum bifrost_csel_cond ops[] = { - BIFROST_FGE_F, - BIFROST_IGE_I, - BIFROST_UGE_I - }; - - return ops[idx]; - } - case BI_COND_NE: - *invert = true; - /* fallthrough */ - case BI_COND_EQ: { - const enum bifrost_csel_cond ops[] = { - BIFROST_FEQ_F, - BIFROST_IEQ_F, - BIFROST_IEQ_F /* sign is irrelevant */ - }; - - return ops[idx]; - } - default: - unreachable("Invalid op for csel"); - } -} - -static unsigned -bi_pack_fma_csel(bi_instruction *ins, bi_registers *regs) -{ - /* TODO: Use csel3 as well */ - bool flip = false, invert = false; - - enum bifrost_csel_cond cond = - bi_cond_to_csel(ins->cond, &flip, &invert, ins->src_types[0]); - - unsigned size = nir_alu_type_get_type_size(ins->dest_type); - - unsigned cmp_0 = (flip ? 1 : 0); - unsigned cmp_1 = (flip ? 0 : 1); - unsigned res_0 = (invert ? 3 : 2); - unsigned res_1 = (invert ? 2 : 3); - - struct bifrost_csel4 pack = { - .src0 = bi_get_src(ins, regs, cmp_0), - .src1 = bi_get_src(ins, regs, cmp_1), - .src2 = bi_get_src(ins, regs, res_0), - .src3 = bi_get_src(ins, regs, res_1), - .cond = cond, - .op = (size == 16) ? BIFROST_FMA_OP_CSEL4_V16 : - BIFROST_FMA_OP_CSEL4 - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_fma_frexp(bi_instruction *ins, bi_registers *regs) -{ - unsigned op = BIFROST_FMA_OP_FREXPE_LOG; - return bi_pack_fma_1src(ins, regs, op); -} - -static unsigned -bi_pack_fma_reduce(bi_instruction *ins, bi_registers *regs) -{ - if (ins->op.reduce == BI_REDUCE_ADD_FREXPM) { - return bi_pack_fma_2src(ins, regs, BIFROST_FMA_OP_ADD_FREXPM); - } else { - unreachable("Invalid reduce op"); - } -} - -/* We have a single convert opcode in the IR but a number of opcodes that could - * come out. In particular we have native opcodes for: - * - * [ui]16 --> [fui]32 -- int16_to_32 - * f16 --> f32 -- float16_to_32 - * f32 --> f16 -- float32_to_16 - * f32 --> [ui]32 -- float32_to_int - * [ui]32 --> f32 -- int_to_float32 - * [fui]16 --> [fui]16 -- f2i_i2f16 - */ - -static unsigned -bi_pack_convert(bi_instruction *ins, bi_registers *regs, bool FMA) -{ - nir_alu_type from_base = nir_alu_type_get_base_type(ins->src_types[0]); - unsigned from_size = nir_alu_type_get_type_size(ins->src_types[0]); - bool from_unsigned = from_base == nir_type_uint; - - nir_alu_type to_base = nir_alu_type_get_base_type(ins->dest_type); - unsigned to_size = nir_alu_type_get_type_size(ins->dest_type); - bool to_unsigned = to_base == nir_type_uint; - bool to_float = to_base == nir_type_float; - - /* Sanity check */ - assert((from_base != to_base) || (from_size != to_size)); - assert((MAX2(from_size, to_size) / MIN2(from_size, to_size)) <= 2); - - /* f32 to f16 is special */ - if (from_size == 32 && to_size == 16 && from_base == to_base) { - /* TODO uint/int */ - assert(from_base == nir_type_float); - - struct bifrost_fma_2src pfma = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .op = BIFROST_FMA_FLOAT32_TO_16 - }; - - struct bifrost_add_2src padd = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .op = BIFROST_ADD_FLOAT32_TO_16 - }; - - if (FMA) { - RETURN_PACKED(pfma); - } else { - RETURN_PACKED(padd); - } - } - - /* Otherwise, figure out the mode */ - unsigned op = 0; - - if (from_size == 16 && to_size == 32) { - unsigned component = ins->swizzle[0][0]; - assert(component <= 1); - - if (from_base == nir_type_float) - op = BIFROST_CONVERT_5(component); - else - op = BIFROST_CONVERT_4(from_unsigned, component, to_float); - } else { - unsigned mode = 0; - unsigned swizzle = (from_size == 16) ? bi_swiz16(ins, 0) : 0; - bool is_unsigned = from_unsigned; - - if (from_base == nir_type_float) { - assert(to_base != nir_type_float); - is_unsigned = to_unsigned; - - if (from_size == 32 && to_size == 32) - mode = BIFROST_CONV_F32_TO_I32; - else if (from_size == 16 && to_size == 16) - mode = BIFROST_CONV_F16_TO_I16; - else - unreachable("Invalid float conversion"); - } else { - assert(to_base == nir_type_float); - assert(from_size == to_size); - - if (to_size == 32) - mode = BIFROST_CONV_I32_TO_F32; - else if (to_size == 16) - mode = BIFROST_CONV_I16_TO_F16; - else - unreachable("Invalid int conversion"); - } - - /* Fixup swizzle for 32-bit only modes */ - - if (mode == BIFROST_CONV_I32_TO_F32) - swizzle = 0b11; - else if (mode == BIFROST_CONV_F32_TO_I32) - swizzle = 0b10; - - op = BIFROST_CONVERT(is_unsigned, ins->roundmode, swizzle, mode); - - /* Unclear what the top bit is for... maybe 16-bit related */ - bool mode2 = mode == BIFROST_CONV_F16_TO_I16; - bool mode6 = mode == BIFROST_CONV_I16_TO_F16; - - if (!(mode2 || mode6)) - op |= 0x100; - } - - if (FMA) - return bi_pack_fma_1src(ins, regs, BIFROST_FMA_CONVERT | op); - else - return bi_pack_add_1src(ins, regs, BIFROST_ADD_CONVERT | op); -} - -static unsigned -bi_pack_fma_select(bi_instruction *ins, bi_registers *regs) -{ - unsigned size = nir_alu_type_get_type_size(ins->src_types[0]); - - if (size == 16) { - unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1)); - unsigned op = BIFROST_FMA_SEL_16(swiz); - return bi_pack_fma_2src(ins, regs, op); - } else if (size == 8) { - unsigned swiz = 0; - - for (unsigned c = 0; c < 4; ++c) { - if (ins->swizzle[c][0]) { - /* Ensure lowering restriction is met */ - assert(ins->swizzle[c][0] == 2); - swiz |= (1 << c); - } - } - - struct bifrost_fma_sel8 pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src2 = bi_get_src(ins, regs, 2), - .src3 = bi_get_src(ins, regs, 3), - .swizzle = swiz, - .op = BIFROST_FMA_OP_SEL8 - }; - - RETURN_PACKED(pack); - } else { - unreachable("Unimplemented"); - } -} - -static enum bifrost_fcmp_cond -bi_fcmp_cond(enum bi_cond cond) -{ - switch (cond) { - case BI_COND_LT: return BIFROST_OLT; - case BI_COND_LE: return BIFROST_OLE; - case BI_COND_GE: return BIFROST_OGE; - case BI_COND_GT: return BIFROST_OGT; - case BI_COND_EQ: return BIFROST_OEQ; - case BI_COND_NE: return BIFROST_UNE; - default: unreachable("Unknown bi_cond"); - } -} - -/* a b <==> b a (TODO: NaN behaviour?) */ - -static enum bifrost_fcmp_cond -bi_flip_fcmp(enum bifrost_fcmp_cond cond) -{ - switch (cond) { - case BIFROST_OGT: - return BIFROST_OLT; - case BIFROST_OGE: - return BIFROST_OLE; - case BIFROST_OLT: - return BIFROST_OGT; - case BIFROST_OLE: - return BIFROST_OGE; - case BIFROST_OEQ: - case BIFROST_UNE: - return cond; - default: - unreachable("Unknown fcmp cond"); - } -} - -static unsigned -bi_pack_fma_cmp(bi_instruction *ins, bi_registers *regs) -{ - nir_alu_type Tl = ins->src_types[0]; - nir_alu_type Tr = ins->src_types[1]; - - if (Tl == nir_type_float32 || Tr == nir_type_float32) { - /* TODO: Mixed 32/16 cmp */ - assert(Tl == Tr); - - enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond); - - /* Only src1 has neg, so we arrange: - * a < b --- native - * a < -b --- native - * -a < -b <===> a > b - * -a < b <===> a > -b - * TODO: Is this NaN-precise? - */ - - bool flip = ins->src_neg[0]; - bool neg = ins->src_neg[0] ^ ins->src_neg[1]; - - if (flip) - cond = bi_flip_fcmp(cond); - - struct bifrost_fma_fcmp pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src0_abs = ins->src_abs[0], - .src1_abs = ins->src_abs[1], - .src1_neg = neg, - .src_expand = 0, - .unk1 = 0, - .cond = cond, - .op = BIFROST_FMA_OP_FCMP_D3D - }; - - RETURN_PACKED(pack); - } else if (Tl == nir_type_float16 && Tr == nir_type_float16) { - bool flip = false; - bool l = bi_pack_fp16_abs(ins, regs, &flip); - enum bifrost_fcmp_cond cond = bi_fcmp_cond(ins->cond); - - if (flip) - cond = bi_flip_fcmp(cond); - - struct bifrost_fma_fcmp16 pack = { - .src0 = bi_get_src(ins, regs, flip ? 1 : 0), - .src1 = bi_get_src(ins, regs, flip ? 0 : 1), - .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0), - .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1), - .abs1 = l, - .unk = 0, - .cond = cond, - .op = BIFROST_FMA_OP_FCMP_D3D_16, - }; - - RETURN_PACKED(pack); - } else { - unreachable("Unknown cmp type"); - } -} - -static unsigned -bi_fma_bitwise_op(enum bi_bitwise_op op, bool rshift) -{ - switch (op) { - case BI_BITWISE_OR: - /* Via De Morgan's */ - return rshift ? - BIFROST_FMA_OP_RSHIFT_NAND : - BIFROST_FMA_OP_LSHIFT_NAND; - case BI_BITWISE_AND: - return rshift ? - BIFROST_FMA_OP_RSHIFT_AND : - BIFROST_FMA_OP_LSHIFT_AND; - case BI_BITWISE_XOR: - /* Shift direction handled out of band */ - return BIFROST_FMA_OP_RSHIFT_XOR; - default: - unreachable("Unknown op"); - } -} - -static unsigned -bi_pack_fma_bitwise(bi_instruction *ins, bi_registers *regs) -{ - unsigned size = nir_alu_type_get_type_size(ins->dest_type); - assert(size <= 32); - - bool invert_1 = ins->bitwise.src1_invert; - bool invert_0 = false; - - if (ins->bitwise.dest_invert) { - if (ins->op.bitwise == BI_BITWISE_OR) { - ins->op.bitwise = BI_BITWISE_AND; - invert_0 = true; - invert_1 = !invert_1; - } else if (ins->op.bitwise == BI_BITWISE_AND) { - ins->op.bitwise = BI_BITWISE_OR; - invert_0 = true; - invert_1 = !invert_1; - } else { - invert_1 = !invert_1; - } - } - - if (ins->op.bitwise == BI_BITWISE_OR) { - /* Becomes NAND, so via De Morgan's: - * f(A) | f(B) = ~(~f(A) & ~f(B)) - * = NAND(~f(A), ~f(B)) - */ - - invert_0 = !invert_0; - invert_1 = !invert_1; - } else if (ins->op.bitwise == BI_BITWISE_XOR) { - /* ~A ^ ~B = ~(A ^ ~B) = ~(~(A ^ B)) = A ^ B - * ~A ^ B = ~(A ^ B) = A ^ ~B - */ - - invert_0 ^= invert_1; - invert_1 = false; - - /* invert_1 ends up specifying shift direction */ - invert_1 = !ins->bitwise.rshift; - } - - struct bifrost_shift_fma pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src2 = bi_get_src(ins, regs, 2), - .half = (size == 32) ? 0 : (size == 16) ? 0x7 : (size == 8) ? 0x4 : 0, - .unk = 1, /* XXX */ - .invert_1 = invert_0, - .invert_2 = invert_1, - .op = bi_fma_bitwise_op(ins->op.bitwise, ins->bitwise.rshift) - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_fma_round(bi_instruction *ins, bi_registers *regs) -{ - bool fp16 = ins->dest_type == nir_type_float16; - assert(fp16 || ins->dest_type == nir_type_float32); - - unsigned op = fp16 - ? BIFROST_FMA_ROUND_16(ins->roundmode, bi_swiz16(ins, 0)) - : BIFROST_FMA_ROUND_32(ins->roundmode); - - return bi_pack_fma_1src(ins, regs, op); -} - -static unsigned -bi_pack_fma_imath(bi_instruction *ins, bi_registers *regs) -{ - /* Scheduler: only ADD can have 8/16-bit imath */ - assert(ins->dest_type == nir_type_int32 || ins->dest_type == nir_type_uint32); - - unsigned op = ins->op.imath == BI_IMATH_ADD - ? BIFROST_FMA_IADD_32 - : BIFROST_FMA_ISUB_32; - - return bi_pack_fma_2src(ins, regs, op); -} - -static unsigned -bi_pack_fma_imul(bi_instruction *ins, bi_registers *regs) -{ - assert(ins->op.imul == BI_IMUL_IMUL); - unsigned op = BIFROST_FMA_IMUL_32; - return bi_pack_fma_2src(ins, regs, op); -} - static unsigned bi_pack_fma(bi_clause *clause, bi_bundle bundle, bi_registers *regs) { if (!bundle.fma) - return BIFROST_FMA_NOP; + return pan_pack_fma_nop_i32(clause, NULL, regs); + + bool f16 = bundle.fma->dest_type == nir_type_float16; + bool f32 = bundle.fma->dest_type == nir_type_float32; + bool u32 = bundle.fma->dest_type == nir_type_uint32; + bool u16 = bundle.fma->dest_type == nir_type_uint16; + ASSERTED bool u8 = bundle.fma->dest_type == nir_type_uint8; + bool s32 = bundle.fma->dest_type == nir_type_int32; + bool s16 = bundle.fma->dest_type == nir_type_int16; + ASSERTED bool s8 = bundle.fma->dest_type == nir_type_int8; + + bool src0_f16 = bundle.fma->src_types[0] == nir_type_float16; + bool src0_f32 = bundle.fma->src_types[0] == nir_type_float32; + bool src0_u16 = bundle.fma->src_types[0] == nir_type_uint16; + bool src0_s16 = bundle.fma->src_types[0] == nir_type_int16; + bool src0_s8 = bundle.fma->src_types[0] == nir_type_int8; + bool src0_u8 = bundle.fma->src_types[0] == nir_type_uint8; + + enum bi_cond cond = bundle.fma->cond; + bool typeless_cond = (cond == BI_COND_EQ) || (cond == BI_COND_NE); switch (bundle.fma->type) { case BI_ADD: - return bi_pack_fma_addmin(bundle.fma, regs); + if (bundle.fma->dest_type == nir_type_float32) + return pan_pack_fma_fadd_f32(clause, bundle.fma, regs); + else if (bundle.fma->dest_type == nir_type_float16) + return pan_pack_fma_fadd_v2f16(clause, bundle.fma, regs); + + unreachable("TODO"); case BI_CMP: - return bi_pack_fma_cmp(bundle.fma, regs); + assert (src0_f16 || src0_f32); + + if (src0_f32) + return pan_pack_fma_fcmp_f32(clause, bundle.fma, regs); + else + return pan_pack_fma_fcmp_v2f16(clause, bundle.fma, regs); case BI_BITWISE: - return bi_pack_fma_bitwise(bundle.fma, regs); + if (bundle.fma->op.bitwise == BI_BITWISE_AND) { + if (u32 || s32) { + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_and_i32(clause, bundle.fma, regs) : + pan_pack_fma_lshift_and_i32(clause, bundle.fma, regs); + } else if (u16 || s16) { + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_and_v2i16(clause, bundle.fma, regs) : + pan_pack_fma_lshift_and_v2i16(clause, bundle.fma, regs); + } else { + assert(u8 || s8); + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_and_v4i8(clause, bundle.fma, regs) : + pan_pack_fma_lshift_and_v4i8(clause, bundle.fma, regs); + } + + } else if (bundle.fma->op.bitwise == BI_BITWISE_OR) { + if (u32 || s32) { + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_or_i32(clause, bundle.fma, regs) : + pan_pack_fma_lshift_or_i32(clause, bundle.fma, regs); + } else if (u16 || s16) { + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_or_v2i16(clause, bundle.fma, regs) : + pan_pack_fma_lshift_or_v2i16(clause, bundle.fma, regs); + } else { + assert(u8 || s8); + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_or_v4i8(clause, bundle.fma, regs) : + pan_pack_fma_lshift_or_v4i8(clause, bundle.fma, regs); + } + } else { + assert(bundle.fma->op.bitwise == BI_BITWISE_XOR); + + if (u32 || s32) { + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_xor_i32(clause, bundle.fma, regs) : + pan_pack_fma_lshift_xor_i32(clause, bundle.fma, regs); + } else if (u16 || s16) { + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_xor_v2i16(clause, bundle.fma, regs) : + pan_pack_fma_lshift_xor_v2i16(clause, bundle.fma, regs); + } else { + assert(u8 || s8); + return bundle.fma->bitwise.rshift ? + pan_pack_fma_rshift_xor_v4i8(clause, bundle.fma, regs) : + pan_pack_fma_lshift_xor_v4i8(clause, bundle.fma, regs); + } + } case BI_CONVERT: - return bi_pack_convert(bundle.fma, regs, true); + if (src0_s8) { + assert(s32); + return pan_pack_fma_s8_to_s32(clause, bundle.fma, regs); + } else if (src0_u8) { + assert(u32); + return pan_pack_fma_u8_to_u32(clause, bundle.fma, regs); + } else if (src0_s16) { + assert(s32); + return pan_pack_fma_s16_to_s32(clause, bundle.fma, regs); + } else if (src0_u16) { + assert(u32); + return pan_pack_fma_u16_to_u32(clause, bundle.fma, regs); + } else if (src0_f16) { + assert(f32); + return pan_pack_fma_f16_to_f32(clause, bundle.fma, regs); + } else if (src0_f32) { + assert(f16); + return pan_pack_fma_v2f32_to_v2f16(clause, bundle.fma, regs); + } + + unreachable("Invalid FMA convert"); case BI_CSEL: - return bi_pack_fma_csel(bundle.fma, regs); + if (f32) + return pan_pack_fma_csel_f32(clause, bundle.fma, regs); + else if (f16) + return pan_pack_fma_csel_v2f16(clause, bundle.fma, regs); + else if ((u32 || s32) && typeless_cond) + return pan_pack_fma_csel_i32(clause, bundle.fma, regs); + else if ((u16 || s16) && typeless_cond) + return pan_pack_fma_csel_v2i16(clause, bundle.fma, regs); + else if (u32) + return pan_pack_fma_csel_u32(clause, bundle.fma, regs); + else if (u16) + return pan_pack_fma_csel_v2u16(clause, bundle.fma, regs); + else if (s32) + return pan_pack_fma_csel_s32(clause, bundle.fma, regs); + else if (s16) + return pan_pack_fma_csel_v2s16(clause, bundle.fma, regs); + else + unreachable("Invalid csel type"); case BI_FMA: - return bi_pack_fma_fma(bundle.fma, regs); + if (bundle.fma->dest_type == nir_type_float32) { + if (bundle.fma->op.mscale) + return pan_pack_fma_fma_rscale_f32(clause, bundle.fma, regs); + else + return pan_pack_fma_fma_f32(clause, bundle.fma, regs); + } else { + assert(bundle.fma->dest_type == nir_type_float16); + + if (bundle.fma->op.mscale) + return pan_pack_fma_fma_rscale_v2f16(clause, bundle.fma, regs); + else + return pan_pack_fma_fma_v2f16(clause, bundle.fma, regs); + } case BI_FREXP: - return bi_pack_fma_frexp(bundle.fma, regs); + assert(src0_f32 || src0_f16); + + if (src0_f32) + return pan_pack_fma_frexpe_f32(clause, bundle.fma, regs); + else + return pan_pack_fma_frexpe_v2f16(clause, bundle.fma, regs); case BI_IMATH: - return bi_pack_fma_imath(bundle.fma, regs); + /* XXX: Only 32-bit, with carries/borrows forced */ + assert(s32 || u32); + + if (bundle.fma->op.imath == BI_IMATH_ADD) + return pan_pack_fma_iaddc_i32(clause, bundle.fma, regs); + else + return pan_pack_fma_isubb_i32(clause, bundle.fma, regs); case BI_MINMAX: - return bi_pack_fma_addmin(bundle.fma, regs); + unreachable("FMIN/FMAX not supported on FMA since G72"); case BI_MOV: - return bi_pack_fma_1src(bundle.fma, regs, BIFROST_FMA_OP_MOV); + return pan_pack_fma_mov_i32(clause, bundle.fma, regs); case BI_SELECT: - return bi_pack_fma_select(bundle.fma, regs); + if (nir_alu_type_get_type_size(bundle.fma->src_types[0]) == 16) { + return pan_pack_fma_mkvec_v2i16(clause, bundle.fma, regs); + } else { + assert(nir_alu_type_get_type_size(bundle.fma->src_types[0]) == 8); + return pan_pack_fma_mkvec_v4i8(clause, bundle.fma, regs); + } case BI_ROUND: - return bi_pack_fma_round(bundle.fma, regs); + assert(f16 || f32); + + if (f16) + return pan_pack_fma_fround_v2f16(clause, bundle.fma, regs); + else + return pan_pack_fma_fround_f32(clause, bundle.fma, regs); case BI_REDUCE_FMA: - return bi_pack_fma_reduce(bundle.fma, regs); + assert(src0_f32 && f32); + return pan_pack_fma_fadd_lscale_f32(clause, bundle.fma, regs); case BI_IMUL: - return bi_pack_fma_imul(bundle.fma, regs); + return pan_pack_fma_imul_i32(clause, bundle.fma, regs); default: unreachable("Cannot encode class as FMA"); } } -static unsigned -bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - unsigned size = nir_alu_type_get_type_size(ins->dest_type); - assert(size == 32 || size == 16); - - unsigned op = (size == 32) ? - BIFROST_ADD_OP_LD_VAR_32 : - BIFROST_ADD_OP_LD_VAR_16; - - unsigned packed_addr = 0; - - if (ins->src[0] & BIR_INDEX_CONSTANT) { - /* Direct uses address field directly */ - packed_addr = bi_get_immediate(ins, 0); - } else { - /* Indirect gets an extra source */ - packed_addr = bi_get_src(ins, regs, 0) | 0b11000; - } - - /* The destination is thrown in the data register */ - assert(ins->dest & BIR_INDEX_REGISTER); - clause->data_register = ins->dest & ~BIR_INDEX_REGISTER; - - unsigned channels = ins->vector_channels; - assert(channels >= 1 && channels <= 4); - - struct bifrost_ld_var pack = { - .src0 = bi_get_src(ins, regs, 1), - .addr = packed_addr, - .channels = channels - 1, - .interp_mode = ins->load_vary.interp_mode, - .reuse = ins->load_vary.reuse, - .flat = ins->load_vary.flat, - .op = op - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_2src(bi_instruction *ins, bi_registers *regs, unsigned op) -{ - struct bifrost_add_2src pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .op = op - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_addmin_f32(bi_instruction *ins, bi_registers *regs) -{ - unsigned op = - (ins->type == BI_ADD) ? BIFROST_ADD_OP_FADD32 : - (ins->op.minmax == BI_MINMAX_MIN) ? BIFROST_ADD_OP_FMIN32 : - BIFROST_ADD_OP_FMAX32; - - struct bifrost_add_faddmin pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src0_abs = ins->src_abs[0], - .src1_abs = ins->src_abs[1], - .src0_neg = ins->src_neg[0], - .src1_neg = ins->src_neg[1], - .outmod = ins->outmod, - .mode = (ins->type == BI_ADD) ? ins->roundmode : ins->minmax, - .op = op - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_add_f16(bi_instruction *ins, bi_registers *regs) -{ - /* ADD.v2f16 can't have outmod */ - assert(ins->outmod == BIFROST_NONE); - - struct bifrost_add_faddmin pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .src0_abs = ins->src_abs[0], - .src1_abs = ins->src_abs[1], - .src0_neg = ins->src_neg[0], - .src1_neg = ins->src_neg[1], - .select = bi_swiz16(ins, 0), /* swizzle_0 */ - .outmod = bi_swiz16(ins, 1), /* swizzle_1 */ - .mode = ins->roundmode, - .op = BIFROST_ADD_OP_FADD16 - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_addmin(bi_instruction *ins, bi_registers *regs) -{ - if (ins->dest_type == nir_type_float32) - return bi_pack_add_addmin_f32(ins, regs); - else if (ins->dest_type == nir_type_float16) { - if (ins->type == BI_ADD) - return bi_pack_add_add_f16(ins, regs); - else - return bi_pack_fmadd_min_f16(ins, regs, false); - } else - unreachable("Unknown FMA/ADD type"); -} - -static unsigned -bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - assert(ins->vector_channels >= 1 && ins->vector_channels <= 4); - - const unsigned ops[4] = { - BIFROST_ADD_OP_LD_UBO_1, - BIFROST_ADD_OP_LD_UBO_2, - BIFROST_ADD_OP_LD_UBO_3, - BIFROST_ADD_OP_LD_UBO_4 - }; - - bi_write_data_register(clause, ins); - return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]); -} - -static enum bifrost_ldst_type -bi_pack_ldst_type(nir_alu_type T) -{ - switch (T) { - case nir_type_float16: return BIFROST_LDST_F16; - case nir_type_float32: return BIFROST_LDST_F32; - case nir_type_int32: return BIFROST_LDST_I32; - case nir_type_uint32: return BIFROST_LDST_U32; - default: unreachable("Invalid type loaded"); - } -} - -static unsigned -bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - struct bifrost_ld_var_addr pack = { - .src0 = bi_get_src(ins, regs, 1), - .src1 = bi_get_src(ins, regs, 2), - .location = bi_get_immediate(ins, 0), - .type = bi_pack_ldst_type(ins->src_types[3]), - .op = BIFROST_ADD_OP_LD_VAR_ADDR - }; - - bi_write_data_register(clause, ins); - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - assert(ins->vector_channels >= 0 && ins->vector_channels <= 4); - - struct bifrost_ld_attr pack = { - .src0 = bi_get_src(ins, regs, 1), - .src1 = bi_get_src(ins, regs, 2), - .location = bi_get_immediate(ins, 0), - .channels = ins->vector_channels - 1, - .type = bi_pack_ldst_type(ins->dest_type), - .op = BIFROST_ADD_OP_LD_ATTR - }; - - bi_write_data_register(clause, ins); - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - assert(ins->vector_channels >= 1 && ins->vector_channels <= 4); - - struct bifrost_st_vary pack = { - .src0 = bi_get_src(ins, regs, 1), - .src1 = bi_get_src(ins, regs, 2), - .src2 = bi_get_src(ins, regs, 3), - .channels = ins->vector_channels - 1, - .op = BIFROST_ADD_OP_ST_VAR - }; - - bi_read_data_register(clause, ins); - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_atest(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - bool fp16 = (ins->src_types[1] == nir_type_float16); - - struct bifrost_add_atest pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .half = fp16, - .component = fp16 ? ins->swizzle[1][0] : 1, /* Set for fp32 */ - .op = BIFROST_ADD_OP_ATEST, - }; - - /* Despite *also* writing with the usual mechanism... quirky and - * perhaps unnecessary, but let's match the blob */ - clause->data_register = ins->dest & ~BIR_INDEX_REGISTER; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_blend(bi_clause *clause, bi_instruction *ins, bi_registers *regs) -{ - struct bifrost_add_inst pack = { - .src0 = bi_get_src(ins, regs, 1), - .op = BIFROST_ADD_OP_BLEND - }; - - /* TODO: Pack location in uniform_const */ - assert(ins->blend_location == 0); - - bi_read_data_register(clause, ins); - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_special(bi_instruction *ins, bi_registers *regs) -{ - unsigned op = 0; - bool fp16 = ins->dest_type == nir_type_float16; - bool Y = ins->swizzle[0][0]; - - if (ins->op.special == BI_SPECIAL_FRCP) { - op = fp16 ? - (Y ? BIFROST_ADD_OP_FRCP_FAST_F16_Y : - BIFROST_ADD_OP_FRCP_FAST_F16_X) : - BIFROST_ADD_OP_FRCP_FAST_F32; - } else if (ins->op.special == BI_SPECIAL_FRSQ) { - op = fp16 ? - (Y ? BIFROST_ADD_OP_FRSQ_FAST_F16_Y : - BIFROST_ADD_OP_FRSQ_FAST_F16_X) : - BIFROST_ADD_OP_FRSQ_FAST_F32; - - } else if (ins->op.special == BI_SPECIAL_EXP2_LOW) { - assert(!fp16); - return bi_pack_add_2src(ins, regs, BIFROST_ADD_OP_FEXP2_FAST); - } else if (ins->op.special == BI_SPECIAL_IABS) { - assert(ins->src_types[0] == nir_type_int32); - op = BIFROST_ADD_OP_IABS_32; - } else { - unreachable("Unknown special op"); - } - - return bi_pack_add_1src(ins, regs, op); -} - -static unsigned -bi_pack_add_table(bi_instruction *ins, bi_registers *regs) -{ - unsigned op = 0; - assert(ins->dest_type == nir_type_float32); - - op = BIFROST_ADD_OP_LOG2_HELP; - return bi_pack_add_1src(ins, regs, op); -} -static unsigned -bi_pack_add_tex_compact(bi_clause *clause, bi_instruction *ins, bi_registers *regs, gl_shader_stage stage) -{ - bool f16 = ins->dest_type == nir_type_float16; - bool vtx = stage != MESA_SHADER_FRAGMENT; - - struct bifrost_tex_compact pack = { - .src0 = bi_get_src(ins, regs, 0), - .src1 = bi_get_src(ins, regs, 1), - .op = f16 ? BIFROST_ADD_OP_TEX_COMPACT_F16(vtx) : - BIFROST_ADD_OP_TEX_COMPACT_F32(vtx), - .compute_lod = !vtx, - .tex_index = ins->texture.texture_index, - .sampler_index = ins->texture.sampler_index - }; - - bi_write_data_register(clause, ins); - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_select(bi_instruction *ins, bi_registers *regs) -{ - ASSERTED unsigned size = nir_alu_type_get_type_size(ins->src_types[0]); - assert(size == 16); - - unsigned swiz = (ins->swizzle[0][0] | (ins->swizzle[1][0] << 1)); - unsigned op = BIFROST_ADD_SEL_16(swiz); - return bi_pack_add_2src(ins, regs, op); -} - -static enum bifrost_discard_cond -bi_cond_to_discard(enum bi_cond cond, bool *flip) -{ - switch (cond){ - case BI_COND_GT: - *flip = true; - /* fallthrough */ - case BI_COND_LT: - return BIFROST_DISCARD_FLT; - case BI_COND_GE: - *flip = true; - /* fallthrough */ - case BI_COND_LE: - return BIFROST_DISCARD_FLE; - case BI_COND_NE: - return BIFROST_DISCARD_FNE; - case BI_COND_EQ: - return BIFROST_DISCARD_FEQ; - default: - unreachable("Invalid op for discard"); - } -} - -static unsigned -bi_pack_add_discard(bi_instruction *ins, bi_registers *regs) -{ - bool fp16 = ins->src_types[0] == nir_type_float16; - assert(fp16 || ins->src_types[0] == nir_type_float32); - - bool flip = false; - enum bifrost_discard_cond cond = bi_cond_to_discard(ins->cond, &flip); - - struct bifrost_add_discard pack = { - .src0 = bi_get_src(ins, regs, flip ? 1 : 0), - .src1 = bi_get_src(ins, regs, flip ? 0 : 1), - .cond = cond, - .src0_select = fp16 ? ins->swizzle[0][0] : 0, - .src1_select = fp16 ? ins->swizzle[1][0] : 0, - .fp32 = fp16 ? 0 : 1, - .op = BIFROST_ADD_OP_DISCARD - }; - - RETURN_PACKED(pack); -} - -static enum bifrost_icmp_cond -bi_cond_to_icmp(enum bi_cond cond, bool *flip, bool is_unsigned, bool is_16) -{ - switch (cond){ - case BI_COND_LT: - *flip = true; - /* fallthrough */ - case BI_COND_GT: - return is_unsigned ? (is_16 ? BIFROST_ICMP_IGE : BIFROST_ICMP_UGT) - : BIFROST_ICMP_IGT; - case BI_COND_LE: - *flip = true; - /* fallthrough */ - case BI_COND_GE: - return is_unsigned ? BIFROST_ICMP_UGE : - (is_16 ? BIFROST_ICMP_UGT : BIFROST_ICMP_IGE); - case BI_COND_NE: - return BIFROST_ICMP_NEQ; - case BI_COND_EQ: - return BIFROST_ICMP_EQ; - default: - unreachable("Invalid op for icmp"); - } -} - -static unsigned -bi_pack_add_icmp32(bi_instruction *ins, bi_registers *regs, bool flip, - enum bifrost_icmp_cond cond) -{ - struct bifrost_add_icmp pack = { - .src0 = bi_get_src(ins, regs, flip ? 1 : 0), - .src1 = bi_get_src(ins, regs, flip ? 0 : 1), - .cond = cond, - .sz = 1, - .d3d = true, - .op = BIFROST_ADD_OP_ICMP_32 - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_icmp16(bi_instruction *ins, bi_registers *regs, bool flip, - enum bifrost_icmp_cond cond) -{ - struct bifrost_add_icmp16 pack = { - .src0 = bi_get_src(ins, regs, flip ? 1 : 0), - .src1 = bi_get_src(ins, regs, flip ? 0 : 1), - .src0_swizzle = bi_swiz16(ins, flip ? 1 : 0), - .src1_swizzle = bi_swiz16(ins, flip ? 0 : 1), - .cond = cond, - .d3d = true, - .op = BIFROST_ADD_OP_ICMP_16 - }; - - RETURN_PACKED(pack); -} - -static unsigned -bi_pack_add_cmp(bi_instruction *ins, bi_registers *regs) -{ - nir_alu_type Tl = ins->src_types[0]; - ASSERTED nir_alu_type Tr = ins->src_types[1]; - nir_alu_type Bl = nir_alu_type_get_base_type(Tl); - - if (Bl == nir_type_uint || Bl == nir_type_int) { - assert(Tl == Tr); - unsigned sz = nir_alu_type_get_type_size(Tl); - - bool flip = false; - - enum bifrost_icmp_cond cond = bi_cond_to_icmp( - sz == 16 ? /*bi_invert_cond*/(ins->cond) : ins->cond, - &flip, Bl == nir_type_uint, sz == 16); - - if (sz == 32) - return bi_pack_add_icmp32(ins, regs, flip, cond); - else if (sz == 16) - return bi_pack_add_icmp16(ins, regs, flip, cond); - else - unreachable("TODO"); - } else { - unreachable("TODO"); - } -} - -static unsigned -bi_pack_add_imath(bi_instruction *ins, bi_registers *regs) -{ - /* TODO: 32+16 add */ - assert(ins->src_types[0] == ins->src_types[1]); - unsigned sz = nir_alu_type_get_type_size(ins->src_types[0]); - enum bi_imath_op p = ins->op.imath; - - unsigned op = 0; - - if (sz == 8) { - op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_8 : - BIFROST_ADD_ISUB_8; - } else if (sz == 16) { - op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_16 : - BIFROST_ADD_ISUB_16; - } else if (sz == 32) { - op = (p == BI_IMATH_ADD) ? BIFROST_ADD_IADD_32 : - BIFROST_ADD_ISUB_32; - } else { - unreachable("64-bit todo"); - } - - return bi_pack_add_2src(ins, regs, op); -} - static unsigned bi_pack_add_branch_cond(bi_instruction *ins, bi_registers *regs) { @@ -1672,56 +657,214 @@ static unsigned bi_pack_add(bi_clause *clause, bi_bundle bundle, bi_registers *regs, gl_shader_stage stage) { if (!bundle.add) - return BIFROST_ADD_NOP; + return pan_pack_add_nop_i32(clause, NULL, regs); + + bool f16 = bundle.add->dest_type == nir_type_float16; + bool f32 = bundle.add->dest_type == nir_type_float32; + bool u32 = bundle.add->dest_type == nir_type_uint32; + bool u16 = bundle.add->dest_type == nir_type_uint16; + bool s32 = bundle.add->dest_type == nir_type_int32; + bool s16 = bundle.add->dest_type == nir_type_int16; + + bool src0_f16 = bundle.add->src_types[0] == nir_type_float16; + bool src0_f32 = bundle.add->src_types[0] == nir_type_float32; + bool src0_u32 = bundle.add->src_types[0] == nir_type_uint32; + bool src0_u16 = bundle.add->src_types[0] == nir_type_uint16; + bool src0_u8 = bundle.add->src_types[0] == nir_type_uint8; + bool src0_s32 = bundle.add->src_types[0] == nir_type_int32; + bool src0_s16 = bundle.add->src_types[0] == nir_type_int16; + bool src0_s8 = bundle.add->src_types[0] == nir_type_int8; + + unsigned sz = nir_alu_type_get_type_size(bundle.add->dest_type); + enum bi_cond cond = bundle.add->cond; + bool typeless_cond = (cond == BI_COND_EQ) || (cond == BI_COND_NE); switch (bundle.add->type) { case BI_ADD: - return bi_pack_add_addmin(bundle.add, regs); + if (bundle.add->dest_type == nir_type_float32) + return pan_pack_add_fadd_f32(clause, bundle.add, regs); + else if (bundle.add->dest_type == nir_type_float16) + return pan_pack_add_fadd_v2f16(clause, bundle.add, regs); + + unreachable("TODO"); case BI_ATEST: - return bi_pack_add_atest(clause, bundle.add, regs); + return pan_pack_add_atest(clause, bundle.add, regs); case BI_BRANCH: return bi_pack_add_branch(bundle.add, regs); case BI_CMP: - return bi_pack_add_cmp(bundle.add, regs); + if (src0_f32) + return pan_pack_add_fcmp_f32(clause, bundle.add, regs); + else if (src0_f16) + return pan_pack_add_fcmp_v2f16(clause, bundle.add, regs); + else if ((src0_u32 || src0_s32) && typeless_cond) + return pan_pack_add_icmp_i32(clause, bundle.add, regs); + else if ((src0_u16 || src0_s16) && typeless_cond) + return pan_pack_add_icmp_v2i16(clause, bundle.add, regs); + else if ((src0_u8 || src0_u8) && typeless_cond) + return pan_pack_add_icmp_v4i8(clause, bundle.add, regs); + else if (src0_u32) + return pan_pack_add_icmp_u32(clause, bundle.add, regs); + else if (src0_u16) + return pan_pack_add_icmp_v2u16(clause, bundle.add, regs); + else if (src0_u8) + return pan_pack_add_icmp_v4u8(clause, bundle.add, regs); + else if (src0_s32) + return pan_pack_add_icmp_s32(clause, bundle.add, regs); + else if (src0_s16) + return pan_pack_add_icmp_v2s16(clause, bundle.add, regs); + else if (src0_s8) + return pan_pack_add_icmp_v4s8(clause, bundle.add, regs); + else + unreachable("Invalid cmp type"); case BI_BLEND: - return bi_pack_add_blend(clause, bundle.add, regs); + return pan_pack_add_blend(clause, bundle.add, regs); case BI_BITWISE: unreachable("Packing todo"); case BI_CONVERT: - return bi_pack_convert(bundle.add, regs, false); + if (src0_f16 && s16) + return pan_pack_add_v2f16_to_v2s16(clause, bundle.add, regs); + else if (src0_f16 && u16) + return pan_pack_add_v2f16_to_v2u16(clause, bundle.add, regs); + else if (src0_f16 && s32) + return pan_pack_add_f16_to_s32(clause, bundle.add, regs); + else if (src0_f16 && u32) + return pan_pack_add_f16_to_u32(clause, bundle.add, regs); + else if (src0_s16 && f16) + return pan_pack_add_v2s16_to_v2f16(clause, bundle.add, regs); + else if (src0_u16 && f16) + return pan_pack_add_v2u16_to_v2f16(clause, bundle.add, regs); + else if (src0_s8 && s16) + return pan_pack_add_v2s8_to_v2s16(clause, bundle.add, regs); + else if (src0_u8 && u16) + return pan_pack_add_v2u8_to_v2u16(clause, bundle.add, regs); + else if (src0_s8 && f16) + return pan_pack_add_v2s8_to_v2f16(clause, bundle.add, regs); + else if (src0_u8 && f16) + return pan_pack_add_v2u8_to_v2f16(clause, bundle.add, regs); + else if (src0_f32 && s32) + return pan_pack_add_f32_to_s32(clause, bundle.add, regs); + else if (src0_f32 && u32) + return pan_pack_add_f32_to_u32(clause, bundle.add, regs); + else if (src0_s8 && s32) + return pan_pack_add_s8_to_s32(clause, bundle.add, regs); + else if (src0_u8 && u32) + return pan_pack_add_u8_to_u32(clause, bundle.add, regs); + else if (src0_s8 && f32) + return pan_pack_add_s8_to_f32(clause, bundle.add, regs); + else if (src0_u8 && f32) + return pan_pack_add_u8_to_f32(clause, bundle.add, regs); + else if (src0_s32 && f32) + return pan_pack_add_s32_to_f32(clause, bundle.add, regs); + else if (src0_u32 && f32) + return pan_pack_add_u32_to_f32(clause, bundle.add, regs); + else if (src0_s16 && s32) + return pan_pack_add_s16_to_s32(clause, bundle.add, regs); + else if (src0_u16 && u32) + return pan_pack_add_u16_to_u32(clause, bundle.add, regs); + else if (src0_s16 && f32) + return pan_pack_add_s16_to_f32(clause, bundle.add, regs); + else if (src0_u16 && f32) + return pan_pack_add_u16_to_f32(clause, bundle.add, regs); + else if (src0_f16 && f32) + return pan_pack_add_f16_to_f32(clause, bundle.add, regs); + else if (src0_f32 && f16) + return pan_pack_add_v2f32_to_v2f16(clause, bundle.add, regs); + else + unreachable("Invalid ADD convert"); case BI_DISCARD: - return bi_pack_add_discard(bundle.add, regs); + return pan_pack_add_discard_f32(clause, bundle.add, regs); case BI_FREXP: unreachable("Packing todo"); case BI_IMATH: - return bi_pack_add_imath(bundle.add, regs); + assert(sz == 8 || sz == 16 || sz == 32); + + if (bundle.add->op.imath == BI_IMATH_ADD) { + return (sz == 8) ? pan_pack_add_iadd_v4s8(clause, bundle.add, regs) : + (sz == 16) ? pan_pack_add_iadd_v2s16(clause, bundle.add, regs) : + pan_pack_add_iadd_s32(clause, bundle.add, regs); + } else { + return (sz == 8) ? pan_pack_add_isub_v4s8(clause, bundle.add, regs) : + (sz == 16) ? pan_pack_add_isub_v2s16(clause, bundle.add, regs) : + pan_pack_add_isub_s32(clause, bundle.add, regs); + } case BI_LOAD: unreachable("Packing todo"); case BI_LOAD_ATTR: - return bi_pack_add_ld_attr(clause, bundle.add, regs); + return pan_pack_add_ld_attr_imm(clause, bundle.add, regs); case BI_LOAD_UNIFORM: - return bi_pack_add_ld_ubo(clause, bundle.add, regs); + switch (bundle.add->vector_channels) { + case 1: return pan_pack_add_load_i32(clause, bundle.add, regs); + case 2: return pan_pack_add_load_i64(clause, bundle.add, regs); + case 3: return pan_pack_add_load_i96(clause, bundle.add, regs); + case 4: return pan_pack_add_load_i128(clause, bundle.add, regs); + default: unreachable("Invalid channel count"); + } case BI_LOAD_VAR: - return bi_pack_add_ld_vary(clause, bundle.add, regs); + if (bundle.add->src[0] & BIR_INDEX_CONSTANT) { + if (bi_get_immediate(bundle.add, 0) >= 20) + return pan_pack_add_ld_var_special(clause, bundle.add, regs); + else if (bundle.add->load_vary.flat) + return pan_pack_add_ld_var_flat_imm(clause, bundle.add, regs); + else + return pan_pack_add_ld_var_imm(clause, bundle.add, regs); + } else { + if (bundle.add->load_vary.flat) + return pan_pack_add_ld_var_flat(clause, bundle.add, regs); + else + return pan_pack_add_ld_var(clause, bundle.add, regs); + } case BI_LOAD_VAR_ADDRESS: - return bi_pack_add_ld_var_addr(clause, bundle.add, regs); + return pan_pack_add_lea_attr_imm(clause, bundle.add, regs); case BI_MINMAX: - return bi_pack_add_addmin(bundle.add, regs); + if (bundle.add->op.minmax == BI_MINMAX_MIN) { + if (bundle.add->dest_type == nir_type_float32) + return pan_pack_add_fmin_f32(clause, bundle.add, regs); + else if (bundle.add->dest_type == nir_type_float16) + return pan_pack_add_fmin_v2f16(clause, bundle.add, regs); + unreachable("TODO"); + } else { + if (bundle.add->dest_type == nir_type_float32) + return pan_pack_add_fmax_f32(clause, bundle.add, regs); + else if (bundle.add->dest_type == nir_type_float16) + return pan_pack_add_fmax_v2f16(clause, bundle.add, regs); + unreachable("TODO"); + } case BI_MOV: case BI_STORE: unreachable("Packing todo"); case BI_STORE_VAR: - return bi_pack_add_st_vary(clause, bundle.add, regs); + return pan_pack_add_st_cvt(clause, bundle.add, regs); case BI_SPECIAL: - return bi_pack_add_special(bundle.add, regs); + if (bundle.add->op.special == BI_SPECIAL_FRCP) { + return f16 ? pan_pack_add_frcp_f16(clause, bundle.add, regs) : + pan_pack_add_frcp_f32(clause, bundle.add, regs); + } else if (bundle.add->op.special == BI_SPECIAL_FRSQ) { + return f16 ? pan_pack_add_frsq_f16(clause, bundle.add, regs) : + pan_pack_add_frsq_f32(clause, bundle.add, regs); + } else if (bundle.add->op.special == BI_SPECIAL_EXP2_LOW) { + assert(!f16); + return pan_pack_add_fexp_f32(clause, bundle.add, regs); + } else if (bundle.add->op.special == BI_SPECIAL_IABS) { + assert(bundle.add->src_types[0] == nir_type_int32); + return pan_pack_add_iabs_s32(clause, bundle.add, regs); + } + + unreachable("Unknown special op"); case BI_TABLE: - return bi_pack_add_table(bundle.add, regs); + assert(bundle.add->dest_type == nir_type_float32); + return pan_pack_add_flogd_f32(clause, bundle.add, regs); case BI_SELECT: - return bi_pack_add_select(bundle.add, regs); + assert(nir_alu_type_get_type_size(bundle.add->src_types[0]) == 16); + return pan_pack_add_mkvec_v2i16(clause, bundle.add, regs); case BI_TEX: - if (bundle.add->op.texture == BI_TEX_COMPACT) - return bi_pack_add_tex_compact(clause, bundle.add, regs, stage); - else + if (bundle.add->op.texture == BI_TEX_COMPACT) { + assert(f16 || f32); + + if (f16) + return pan_pack_add_texs_2d_f16(clause, bundle.add, regs); + else + return pan_pack_add_texs_2d_f32(clause, bundle.add, regs); + } else unreachable("Unknown tex type"); case BI_ROUND: unreachable("Packing todo"); diff --git a/src/panfrost/bifrost/bifrost_compile.c b/src/panfrost/bifrost/bifrost_compile.c index 0ae217669c6..2e5785ab04c 100644 --- a/src/panfrost/bifrost/bifrost_compile.c +++ b/src/panfrost/bifrost/bifrost_compile.c @@ -187,10 +187,8 @@ bi_load_with_r61(enum bi_class T, nir_intrinsic_instr *instr) bi_instruction ld = bi_load(T, instr); ld.src[1] = BIR_INDEX_REGISTER | 61; /* TODO: RA */ ld.src[2] = BIR_INDEX_REGISTER | 62; - ld.src[3] = 0; ld.src_types[1] = nir_type_uint32; ld.src_types[2] = nir_type_uint32; - ld.src_types[3] = nir_intrinsic_type(instr); ld.format = nir_intrinsic_type(instr); return ld; } @@ -340,7 +338,10 @@ bi_emit_ld_frag_coord(bi_context *ctx, nir_intrinsic_instr *instr) .dest_type = nir_type_float32, .format = nir_type_float32, .dest = bi_make_temp(ctx), - .src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO }, + .src = { + BIR_INDEX_CONSTANT, + BIR_INDEX_PASS | BIFROST_SRC_CONST_LO + }, .src_types = { nir_type_uint32, nir_type_uint32 }, .constant = { .u32 = (i == 0) ? BIFROST_FRAGZ : BIFROST_FRAGW diff --git a/src/panfrost/bifrost/compiler.h b/src/panfrost/bifrost/compiler.h index 6f0578a1fb5..b207a9d2f04 100644 --- a/src/panfrost/bifrost/compiler.h +++ b/src/panfrost/bifrost/compiler.h @@ -145,8 +145,9 @@ struct bi_load_vary { struct bi_block; +/* Sync with gen-pack.py */ enum bi_cond { - BI_COND_ALWAYS, + BI_COND_ALWAYS = 0, BI_COND_LT, BI_COND_LE, BI_COND_GE, diff --git a/src/panfrost/bifrost/meson.build b/src/panfrost/bifrost/meson.build index eb1f59ff9b8..0d29dbd6a1d 100644 --- a/src/panfrost/bifrost/meson.build +++ b/src/panfrost/bifrost/meson.build @@ -54,6 +54,19 @@ bifrost_gen_disasm_c = custom_target( capture : true, ) +bi_generated_pack_h = custom_target( + 'bi_generated_pack.h', + input : ['gen_pack.py', 'ISA.xml'], + output : 'bi_generated_pack.h', + command : [prog_python, '@INPUT@'], + capture : true, +) + +idep_bi_generated_pack_h = declare_dependency( + sources : [bi_generated_pack_h], + include_directories : include_directories('.'), +) + libpanfrost_bifrost_disasm = static_library( 'panfrost_bifrost_disasm', ['disassemble.c', 'bi_print_common.c', bifrost_gen_disasm_c], @@ -69,7 +82,7 @@ libpanfrost_bifrost = static_library( 'panfrost_bifrost', [libpanfrost_bifrost_files, bifrost_nir_algebraic_c], include_directories : [inc_include, inc_src, inc_mapi, inc_mesa, inc_gallium, inc_gallium_aux, inc_panfrost_hw], - dependencies: [idep_nir], + dependencies: [idep_nir, idep_bi_generated_pack_h], link_with: [libpanfrost_util, libpanfrost_bifrost_disasm], c_args : [no_override_init_args], gnu_symbol_visibility : 'hidden',