mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-22 13:30:12 +01:00
pan/bi: Eliminate writemasks in the IR
Since the hardware doesn't support them, they're a burden to deal with, so let's ensure we never get to a place where we would need to at all. Disables COMBINE lowering for now. Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4766>
This commit is contained in:
parent
1622478fbd
commit
b2c6cf2b6d
10 changed files with 100 additions and 227 deletions
|
|
@ -29,7 +29,7 @@ bi_liveness_ins_update(uint16_t *live, bi_instruction *ins, unsigned max)
|
|||
{
|
||||
/* live_in[s] = GEN[s] + (live_out[s] - KILL[s]) */
|
||||
|
||||
pan_liveness_kill(live, ins->dest, max, ins->writemask);
|
||||
pan_liveness_kill(live, ins->dest, max, bi_writemask(ins));
|
||||
|
||||
bi_foreach_src(ins, src) {
|
||||
unsigned node = ins->src[src];
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@
|
|||
* component c is `x`, we are accessing v.x, and each of the succeeding
|
||||
* components y, z... up to the last component of the vector are accessed
|
||||
* sequentially, then we may perform the same rewrite. If this is not the case,
|
||||
* rewriting would require a swizzle or writemask (TODO), so we fallback on a
|
||||
* rewriting would require more complex vector features, so we fallback on a
|
||||
* move.
|
||||
*
|
||||
* Otherwise is the source is not SSA, we also fallback on a move. We could
|
||||
|
|
@ -46,18 +46,15 @@
|
|||
*/
|
||||
|
||||
static void
|
||||
bi_insert_combine_mov(bi_context *ctx, bi_instruction *parent, unsigned comp, unsigned R)
|
||||
bi_combine_mov32(bi_context *ctx, bi_instruction *parent, unsigned comp, unsigned R)
|
||||
{
|
||||
unsigned bits = nir_alu_type_get_type_size(parent->dest_type);
|
||||
unsigned bytes = bits / 8;
|
||||
|
||||
bi_instruction move = {
|
||||
.type = BI_MOV,
|
||||
.dest = R,
|
||||
.dest_type = parent->dest_type,
|
||||
.writemask = ((1 << bytes) - 1) << (bytes * comp),
|
||||
.dest_type = nir_type_uint32,
|
||||
.dest_offset = comp,
|
||||
.src = { parent->src[comp] },
|
||||
.src_types = { parent->dest_type },
|
||||
.src_types = { nir_type_uint32 },
|
||||
.swizzle = { { parent->swizzle[comp][0] } }
|
||||
};
|
||||
|
||||
|
|
@ -70,12 +67,11 @@ bi_insert_combine_mov(bi_context *ctx, bi_instruction *parent, unsigned comp, un
|
|||
* bookkeeping. */
|
||||
|
||||
static bi_instruction *
|
||||
bi_get_parent(bi_context *ctx, unsigned idx, unsigned mask)
|
||||
bi_get_parent(bi_context *ctx, unsigned idx)
|
||||
{
|
||||
bi_foreach_instr_global(ctx, ins) {
|
||||
if (ins->dest == idx)
|
||||
if ((ins->writemask & mask) == mask)
|
||||
return ins;
|
||||
return ins;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
|
@ -103,76 +99,31 @@ bi_rewrite_uses(bi_context *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
/* Shifts the writemask of an instruction by a specified byte count,
|
||||
* rotating the sources to compensate. Returns true if successful, and
|
||||
* returns false if not (nondestructive in this case). */
|
||||
|
||||
static bool
|
||||
bi_shift_mask(bi_instruction *ins, unsigned shift)
|
||||
{
|
||||
/* No op and handles the funny cases */
|
||||
if (!shift)
|
||||
return true;
|
||||
|
||||
unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
|
||||
unsigned bytes = sz / 8;
|
||||
|
||||
/* If things are misaligned, we bail. Check if shift % bytes is
|
||||
* nonzero. Note bytes is a power-of-two. */
|
||||
if (shift & (bytes - 1))
|
||||
return false;
|
||||
|
||||
/* Ensure there are no funny types */
|
||||
bi_foreach_src(ins, s) {
|
||||
if (ins->src[s] && nir_alu_type_get_type_size(ins->src_types[s]) != sz)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Shift swizzle so old i'th component is accessed by new (i + j)'th
|
||||
* component where j is component shift */
|
||||
unsigned component_shift = shift / bytes;
|
||||
|
||||
/* Sanity check to avoid memory corruption */
|
||||
if (component_shift >= sizeof(ins->swizzle[0]))
|
||||
return false;
|
||||
|
||||
/* Otherwise, shift is divisible by bytes, and all relevant src types
|
||||
* are the same size as the dest type. */
|
||||
ins->writemask <<= shift;
|
||||
|
||||
bi_foreach_src(ins, s) {
|
||||
if (!ins->src[s]) continue;
|
||||
|
||||
size_t overlap = sizeof(ins->swizzle[s]) - component_shift;
|
||||
memmove(ins->swizzle[s] + component_shift, ins->swizzle[s], overlap);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Checks if we have a nicely aligned vector prefix */
|
||||
|
||||
static bool
|
||||
bi_is_aligned_vec(bi_instruction *combine, unsigned s, bi_instruction *parent,
|
||||
bi_is_aligned_vec32(bi_instruction *combine, unsigned s, bi_instruction *io,
|
||||
unsigned *count)
|
||||
{
|
||||
/* We only support prefixes */
|
||||
if (s != 0)
|
||||
return false;
|
||||
|
||||
/* Is it a contiguous write? */
|
||||
unsigned writes = util_bitcount(parent->writemask);
|
||||
if (parent->writemask != ((1 << writes) - 1))
|
||||
if (!(bi_class_props[io->type] & BI_VECTOR))
|
||||
return false;
|
||||
|
||||
/* Okay - how many components? */
|
||||
unsigned bytes = nir_alu_type_get_type_size(parent->dest_type) / 8;
|
||||
unsigned components = writes / bytes;
|
||||
if (nir_alu_type_get_type_size(combine->dest_type) != 32)
|
||||
return false;
|
||||
|
||||
if (nir_alu_type_get_type_size(io->dest_type) != 32)
|
||||
return false;
|
||||
|
||||
unsigned components = io->vector_channels;
|
||||
|
||||
/* Are we contiguous like that? */
|
||||
|
||||
for (unsigned i = 0; i < components; ++i) {
|
||||
if (combine->src[i] != parent->dest)
|
||||
if (combine->src[i] != io->dest)
|
||||
return false;
|
||||
|
||||
if (combine->swizzle[i][0] != i)
|
||||
|
|
@ -184,6 +135,7 @@ bi_is_aligned_vec(bi_instruction *combine, unsigned s, bi_instruction *parent,
|
|||
return true;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Tries to lower a given source of a combine to an appropriate rewrite,
|
||||
* returning true if successful, and false with no changes otherwise. */
|
||||
|
||||
|
|
@ -210,7 +162,7 @@ bi_lower_combine_src(bi_context *ctx, bi_instruction *ins, unsigned s, unsigned
|
|||
unsigned pbytes = nir_alu_type_get_type_size(parent->dest_type) / 8;
|
||||
if (pbytes != bytes) return false;
|
||||
|
||||
bool scalar = (parent->writemask == ((1 << bytes) - 1));
|
||||
bool scalar = parent->vector_channels != 0;
|
||||
if (!(scalar || bi_is_aligned_vec(ins, s, parent, vec_count))) return false;
|
||||
|
||||
if (!bi_shift_mask(parent, bytes * s)) return false;
|
||||
|
|
@ -218,6 +170,7 @@ bi_lower_combine_src(bi_context *ctx, bi_instruction *ins, unsigned s, unsigned
|
|||
parent->dest = R;
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
bi_lower_combine(bi_context *ctx, bi_block *block)
|
||||
|
|
@ -225,9 +178,6 @@ bi_lower_combine(bi_context *ctx, bi_block *block)
|
|||
bi_foreach_instr_in_block_safe(block, ins) {
|
||||
if (ins->type != BI_COMBINE) continue;
|
||||
|
||||
/* The vector itself can't be shifted */
|
||||
assert(ins->writemask & 0x1);
|
||||
|
||||
unsigned R = bi_make_temp_reg(ctx);
|
||||
|
||||
bi_foreach_src(ins, s) {
|
||||
|
|
@ -235,6 +185,7 @@ bi_lower_combine(bi_context *ctx, bi_block *block)
|
|||
if (!ins->src[s])
|
||||
continue;
|
||||
|
||||
#if 0
|
||||
unsigned vec_count = 0;
|
||||
|
||||
if (bi_lower_combine_src(ctx, ins, s, R, &vec_count)) {
|
||||
|
|
@ -244,6 +195,8 @@ bi_lower_combine(bi_context *ctx, bi_block *block)
|
|||
} else {
|
||||
bi_insert_combine_mov(ctx, ins, s, R);
|
||||
}
|
||||
#endif
|
||||
bi_combine_mov32(ctx, ins, s, R);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -968,10 +968,6 @@ bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers
|
|||
BIFROST_ADD_OP_LD_VAR_32 :
|
||||
BIFROST_ADD_OP_LD_VAR_16;
|
||||
|
||||
unsigned cmask = bi_from_bytemask(ins->writemask, size / 8);
|
||||
unsigned channels = util_bitcount(cmask);
|
||||
assert(cmask == ((1 << channels) - 1));
|
||||
|
||||
unsigned packed_addr = 0;
|
||||
|
||||
if (ins->src[0] & BIR_INDEX_CONSTANT) {
|
||||
|
|
@ -987,6 +983,7 @@ bi_pack_add_ld_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers
|
|||
assert(ins->dest & BIR_INDEX_REGISTER);
|
||||
clause->data_register = ins->dest & ~BIR_INDEX_REGISTER;
|
||||
|
||||
unsigned channels = ins->vector_channels;
|
||||
assert(channels >= 1 && channels <= 4);
|
||||
|
||||
struct bifrost_ld_var pack = {
|
||||
|
|
@ -1076,7 +1073,7 @@ bi_pack_add_addmin(bi_instruction *ins, struct bi_registers *regs)
|
|||
static unsigned
|
||||
bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
|
||||
{
|
||||
unsigned components = bi_load32_components(ins);
|
||||
assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
|
||||
|
||||
const unsigned ops[4] = {
|
||||
BIFROST_ADD_OP_LD_UBO_1,
|
||||
|
|
@ -1086,7 +1083,7 @@ bi_pack_add_ld_ubo(bi_clause *clause, bi_instruction *ins, struct bi_registers *
|
|||
};
|
||||
|
||||
bi_write_data_register(clause, ins);
|
||||
return bi_pack_add_2src(ins, regs, ops[components - 1]);
|
||||
return bi_pack_add_2src(ins, regs, ops[ins->vector_channels - 1]);
|
||||
}
|
||||
|
||||
static enum bifrost_ldst_type
|
||||
|
|
@ -1119,11 +1116,13 @@ bi_pack_add_ld_var_addr(bi_clause *clause, bi_instruction *ins, struct bi_regist
|
|||
static unsigned
|
||||
bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
|
||||
{
|
||||
assert(ins->vector_channels >= 0 && ins->vector_channels <= 4);
|
||||
|
||||
struct bifrost_ld_attr pack = {
|
||||
.src0 = bi_get_src(ins, regs, 1, false),
|
||||
.src1 = bi_get_src(ins, regs, 2, false),
|
||||
.location = bi_get_immediate(ins, 0),
|
||||
.channels = MALI_POSITIVE(bi_load32_components(ins)),
|
||||
.channels = MALI_POSITIVE(ins->vector_channels),
|
||||
.type = bi_pack_ldst_type(ins->dest_type),
|
||||
.op = BIFROST_ADD_OP_LD_ATTR
|
||||
};
|
||||
|
|
@ -1135,13 +1134,13 @@ bi_pack_add_ld_attr(bi_clause *clause, bi_instruction *ins, struct bi_registers
|
|||
static unsigned
|
||||
bi_pack_add_st_vary(bi_clause *clause, bi_instruction *ins, struct bi_registers *regs)
|
||||
{
|
||||
assert(ins->store_channels >= 1 && ins->store_channels <= 4);
|
||||
assert(ins->vector_channels >= 1 && ins->vector_channels <= 4);
|
||||
|
||||
struct bifrost_st_vary pack = {
|
||||
.src0 = bi_get_src(ins, regs, 1, false),
|
||||
.src1 = bi_get_src(ins, regs, 2, false),
|
||||
.src2 = bi_get_src(ins, regs, 3, false),
|
||||
.channels = MALI_POSITIVE(ins->store_channels),
|
||||
.channels = MALI_POSITIVE(ins->vector_channels),
|
||||
.op = BIFROST_ADD_OP_ST_VAR
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -244,12 +244,9 @@ bi_print_alu_type(nir_alu_type t, FILE *fp)
|
|||
static void
|
||||
bi_print_swizzle(bi_instruction *ins, unsigned src, FILE *fp)
|
||||
{
|
||||
unsigned size = nir_alu_type_get_type_size(ins->dest_type);
|
||||
unsigned count = (size == 64) ? 1 : (32 / size);
|
||||
|
||||
fprintf(fp, ".");
|
||||
|
||||
for (unsigned u = 0; u < count; ++u) {
|
||||
for (unsigned u = 0; u < bi_get_component_count(ins, src); ++u) {
|
||||
assert(ins->swizzle[src][u] < 4);
|
||||
fputc("xyzw"[ins->swizzle[src][u]], fp);
|
||||
}
|
||||
|
|
@ -348,27 +345,6 @@ bi_print_branch(struct bi_branch *branch, FILE *fp)
|
|||
fprintf(fp, ".%s", bi_cond_name(branch->cond));
|
||||
}
|
||||
|
||||
static void
|
||||
bi_print_writemask(bi_instruction *ins, FILE *fp)
|
||||
{
|
||||
unsigned bits_per_comp = nir_alu_type_get_type_size(ins->dest_type);
|
||||
assert(bits_per_comp);
|
||||
unsigned bytes_per_comp = bits_per_comp / 8;
|
||||
unsigned comps = 16 / bytes_per_comp;
|
||||
unsigned smask = (1 << bytes_per_comp) - 1;
|
||||
fprintf(fp, ".");
|
||||
|
||||
for (unsigned i = 0; i < comps; ++i) {
|
||||
unsigned masked = (ins->writemask >> (i * bytes_per_comp)) & smask;
|
||||
if (!masked)
|
||||
continue;
|
||||
|
||||
assert(masked == smask);
|
||||
assert(i < 4);
|
||||
fputc("xyzw"[i], fp);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
bi_print_instruction(bi_instruction *ins, FILE *fp)
|
||||
{
|
||||
|
|
@ -404,11 +380,12 @@ bi_print_instruction(bi_instruction *ins, FILE *fp)
|
|||
fprintf(fp, ".%s", bi_cond_name(ins->csel_cond));
|
||||
else if (ins->type == BI_BLEND)
|
||||
fprintf(fp, ".loc%u", ins->blend_location);
|
||||
else if (ins->type == BI_STORE || ins->type == BI_STORE_VAR)
|
||||
fprintf(fp, ".v%u", ins->store_channels);
|
||||
else if (ins->type == BI_TEX)
|
||||
fprintf(fp, ".%s", bi_tex_op_name(ins->op.texture));
|
||||
|
||||
if (ins->vector_channels)
|
||||
fprintf(fp, ".v%u", ins->vector_channels);
|
||||
|
||||
if (ins->dest)
|
||||
bi_print_alu_type(ins->dest_type, fp);
|
||||
|
||||
|
|
@ -422,8 +399,8 @@ bi_print_instruction(bi_instruction *ins, FILE *fp)
|
|||
bool succ = bi_print_dest_index(fp, ins, ins->dest);
|
||||
assert(succ);
|
||||
|
||||
if (ins->dest)
|
||||
bi_print_writemask(ins, fp);
|
||||
if (ins->dest_offset)
|
||||
fprintf(fp, "+%u", ins->dest_offset);
|
||||
|
||||
fprintf(fp, ", ");
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@
|
|||
*/
|
||||
|
||||
#include "compiler.h"
|
||||
#include "bi_print.h"
|
||||
#include "panfrost/util/lcra.h"
|
||||
#include "util/u_memory.h"
|
||||
|
||||
|
|
@ -44,7 +45,7 @@ bi_compute_interference(bi_context *ctx, struct lcra_state *l)
|
|||
if (ins->dest && (ins->dest < l->node_count)) {
|
||||
for (unsigned i = 1; i < l->node_count; ++i) {
|
||||
if (live[i])
|
||||
lcra_add_node_interference(l, ins->dest, ins->writemask, i, live[i]);
|
||||
lcra_add_node_interference(l, ins->dest, bi_writemask(ins), i, live[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -101,10 +102,10 @@ bi_reg_from_index(struct lcra_state *l, unsigned index, unsigned offset)
|
|||
if (solution < 0)
|
||||
return index;
|
||||
|
||||
solution += offset;
|
||||
|
||||
assert((solution & 0x3) == 0);
|
||||
unsigned reg = solution / 4;
|
||||
reg += offset;
|
||||
|
||||
return BIR_INDEX_REGISTER | reg;
|
||||
}
|
||||
|
||||
|
|
@ -121,19 +122,10 @@ bi_adjust_src_ra(bi_instruction *ins, struct lcra_state *l, unsigned src)
|
|||
/* TODO: Do we do anything here? */
|
||||
} else {
|
||||
/* Use the swizzle as component select */
|
||||
nir_alu_type T = ins->src_types[src];
|
||||
unsigned size = nir_alu_type_get_type_size(T);
|
||||
assert(size <= 32); /* TODO: 64-bit */
|
||||
unsigned comps_per_reg = 32 / size;
|
||||
unsigned components = bi_get_component_count(ins, src);
|
||||
|
||||
for (unsigned i = 0; i < components; ++i) {
|
||||
/* If we're not writing the component, who cares? */
|
||||
if (!bi_writes_component(ins, i))
|
||||
continue;
|
||||
|
||||
unsigned off = ins->swizzle[src][i] / comps_per_reg;
|
||||
off *= 4; /* 32-bit registers */
|
||||
unsigned off = ins->swizzle[src][i] / components;
|
||||
|
||||
/* We can't cross register boundaries in a swizzle */
|
||||
if (i == 0)
|
||||
|
|
@ -141,7 +133,7 @@ bi_adjust_src_ra(bi_instruction *ins, struct lcra_state *l, unsigned src)
|
|||
else
|
||||
assert(off == offset);
|
||||
|
||||
ins->swizzle[src][i] %= comps_per_reg;
|
||||
ins->swizzle[src][i] %= components;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -154,25 +146,7 @@ bi_adjust_dest_ra(bi_instruction *ins, struct lcra_state *l)
|
|||
if (ins->dest >= l->node_count)
|
||||
return;
|
||||
|
||||
bool vector = (bi_class_props[ins->type] & BI_VECTOR);
|
||||
unsigned offset = 0;
|
||||
|
||||
if (!vector) {
|
||||
/* Look at the writemask to get an offset, specifically the
|
||||
* trailing zeros */
|
||||
|
||||
unsigned tz = __builtin_ctz(ins->writemask);
|
||||
|
||||
/* Recall writemask is one bit per byte, so tz is in eytes */
|
||||
unsigned regs = tz / 4;
|
||||
offset = regs * 4;
|
||||
|
||||
/* Adjust writemask to compensate */
|
||||
ins->writemask >>= offset;
|
||||
}
|
||||
|
||||
ins->dest = bi_reg_from_index(l, ins->dest, offset);
|
||||
|
||||
ins->dest = bi_reg_from_index(l, ins->dest, ins->dest_offset);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -42,7 +42,6 @@ bi_emit_fexp2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.op = { .mscale = true },
|
||||
.dest = bi_make_temp(ctx),
|
||||
.dest_type = nir_type_float32,
|
||||
.writemask = 0xF,
|
||||
.src = {
|
||||
bir_src_index(&instr->src[0].src),
|
||||
BIR_INDEX_CONSTANT | 0,
|
||||
|
|
@ -68,7 +67,6 @@ bi_emit_fexp2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.type = BI_CONVERT,
|
||||
.dest = bi_make_temp(ctx),
|
||||
.dest_type = nir_type_int32,
|
||||
.writemask = 0xF,
|
||||
.src = { mscale.dest },
|
||||
.src_types = { nir_type_float32 },
|
||||
.roundmode = BIFROST_RTE
|
||||
|
|
@ -81,7 +79,6 @@ bi_emit_fexp2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.op = { .special = BI_SPECIAL_EXP2_LOW },
|
||||
.dest = bir_dest_index(&instr->dest.dest),
|
||||
.dest_type = nir_type_float32,
|
||||
.writemask = 0xF,
|
||||
.src = { f2i.dest, mscale.src[0] },
|
||||
.src_types = { nir_type_int32, nir_type_float32 },
|
||||
};
|
||||
|
|
@ -102,7 +99,6 @@ bi_emit_flog2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.op = { .frexp = BI_FREXPE_LOG },
|
||||
.dest = bi_make_temp(ctx),
|
||||
.dest_type = nir_type_int32,
|
||||
.writemask = 0xF,
|
||||
.src = { bir_src_index(&instr->src[0].src) },
|
||||
.src_types = { nir_type_float32 }
|
||||
};
|
||||
|
|
@ -112,7 +108,6 @@ bi_emit_flog2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.type = BI_CONVERT,
|
||||
.dest = bi_make_temp(ctx),
|
||||
.dest_type = nir_type_float32,
|
||||
.writemask = 0xF,
|
||||
.src = { frexpe.dest },
|
||||
.src_types = { nir_type_int32 },
|
||||
.roundmode = BIFROST_RTZ
|
||||
|
|
@ -124,7 +119,6 @@ bi_emit_flog2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.op = { .reduce = BI_REDUCE_ADD_FREXPM },
|
||||
.dest = bi_make_temp(ctx),
|
||||
.dest_type = nir_type_float32,
|
||||
.writemask = 0xF,
|
||||
.src = {
|
||||
BIR_INDEX_CONSTANT,
|
||||
bir_src_index(&instr->src[0].src),
|
||||
|
|
@ -141,7 +135,6 @@ bi_emit_flog2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.op = { .table = BI_TABLE_LOG2_U_OVER_U_1_LOW },
|
||||
.dest = bi_make_temp(ctx),
|
||||
.dest_type = nir_type_float32,
|
||||
.writemask = 0xF,
|
||||
.src = { bir_src_index(&instr->src[0].src) },
|
||||
.src_types = { nir_type_float32 },
|
||||
};
|
||||
|
|
@ -151,7 +144,6 @@ bi_emit_flog2_new(bi_context *ctx, nir_alu_instr *instr)
|
|||
.type = BI_FMA,
|
||||
.dest = bir_dest_index(&instr->dest.dest),
|
||||
.dest_type = nir_type_float32,
|
||||
.writemask = 0xF,
|
||||
.src = {
|
||||
help.dest,
|
||||
x_minus_1.dest,
|
||||
|
|
|
|||
|
|
@ -59,19 +59,12 @@ emit_jump(bi_context *ctx, nir_jump_instr *instr)
|
|||
pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
|
||||
}
|
||||
|
||||
/* Gets a bytemask for a complete vecN write */
|
||||
static unsigned
|
||||
bi_mask_for_channels_32(unsigned i)
|
||||
{
|
||||
return (1 << (4 * i)) - 1;
|
||||
}
|
||||
|
||||
static bi_instruction
|
||||
bi_load(enum bi_class T, nir_intrinsic_instr *instr)
|
||||
{
|
||||
bi_instruction load = {
|
||||
.type = T,
|
||||
.writemask = bi_mask_for_channels_32(instr->num_components),
|
||||
.vector_channels = instr->num_components,
|
||||
.src = { BIR_INDEX_CONSTANT },
|
||||
.src_types = { nir_type_uint32 },
|
||||
.constant = { .u64 = nir_intrinsic_base(instr) },
|
||||
|
|
@ -135,7 +128,6 @@ bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
|
|||
},
|
||||
.dest = BIR_INDEX_REGISTER | 60 /* TODO: RA */,
|
||||
.dest_type = nir_type_uint32,
|
||||
.writemask = 0xF
|
||||
};
|
||||
|
||||
bi_emit(ctx, ins);
|
||||
|
|
@ -160,7 +152,7 @@ bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
|
|||
},
|
||||
.dest = BIR_INDEX_REGISTER | 48 /* Looks like magic */,
|
||||
.dest_type = nir_type_uint32,
|
||||
.writemask = 0xF
|
||||
.vector_channels = 4
|
||||
};
|
||||
|
||||
assert(blend.blend_location < 8);
|
||||
|
|
@ -190,7 +182,7 @@ bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
|
|||
bi_instruction address = bi_load_with_r61(BI_LOAD_VAR_ADDRESS, instr);
|
||||
address.dest = bi_make_temp(ctx);
|
||||
address.dest_type = nir_type_uint32;
|
||||
address.writemask = (1 << 12) - 1;
|
||||
address.vector_channels = 3;
|
||||
|
||||
unsigned nr = nir_intrinsic_src_components(instr, 0);
|
||||
assert(nir_intrinsic_write_mask(instr) == ((1 << nr) - 1));
|
||||
|
|
@ -209,7 +201,7 @@ bi_emit_st_vary(bi_context *ctx, nir_intrinsic_instr *instr)
|
|||
{ 0 },
|
||||
{ 0 }, { 1 }, { 2}
|
||||
},
|
||||
.store_channels = nr,
|
||||
.vector_channels = nr,
|
||||
};
|
||||
|
||||
for (unsigned i = 0; i < nr; ++i)
|
||||
|
|
@ -253,7 +245,7 @@ bi_emit_sysval(bi_context *ctx, nir_instr *instr,
|
|||
|
||||
bi_instruction load = {
|
||||
.type = BI_LOAD_UNIFORM,
|
||||
.writemask = (1 << (nr_components * 4)) - 1,
|
||||
.vector_channels = nr_components,
|
||||
.src = { BIR_INDEX_CONSTANT, BIR_INDEX_ZERO },
|
||||
.src_types = { nir_type_uint32, nir_type_uint32 },
|
||||
.constant = { (uniform * 16) + offset },
|
||||
|
|
@ -327,7 +319,6 @@ emit_load_const(bi_context *ctx, nir_load_const_instr *instr)
|
|||
.type = BI_MOV,
|
||||
.dest = bir_ssa_index(&instr->def),
|
||||
.dest_type = instr->def.bit_size | nir_type_uint,
|
||||
.writemask = (1 << (instr->def.bit_size / 8)) - 1,
|
||||
.src = {
|
||||
BIR_INDEX_CONSTANT
|
||||
},
|
||||
|
|
@ -476,7 +467,7 @@ bi_cond_for_nir(nir_op op, bool soft)
|
|||
|
||||
static void
|
||||
bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
|
||||
unsigned *constants_left, unsigned *constant_shift)
|
||||
unsigned *constants_left, unsigned *constant_shift, unsigned comps)
|
||||
{
|
||||
unsigned bits = nir_src_bit_size(instr->src[i].src);
|
||||
unsigned dest_bits = nir_dest_bit_size(instr->dest.dest);
|
||||
|
|
@ -506,13 +497,19 @@ bi_copy_src(bi_instruction *alu, nir_alu_instr *instr, unsigned i, unsigned to,
|
|||
|
||||
alu->src[to] = bir_src_index(&instr->src[i].src);
|
||||
|
||||
/* We assert scalarization above */
|
||||
alu->swizzle[to][0] = instr->src[i].swizzle[0];
|
||||
/* Copy swizzle for all vectored components, replicating last component
|
||||
* to fill undersized */
|
||||
|
||||
unsigned vec = alu->type == BI_COMBINE ? 1 :
|
||||
MAX2(1, 32 / dest_bits);
|
||||
|
||||
for (unsigned j = 0; j < vec; ++j)
|
||||
alu->swizzle[to][j] = instr->src[i].swizzle[MIN2(j, comps - 1)];
|
||||
}
|
||||
|
||||
static void
|
||||
bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
|
||||
unsigned *constants_left, unsigned *constant_shift)
|
||||
unsigned *constants_left, unsigned *constant_shift, unsigned comps)
|
||||
{
|
||||
/* Bail for vector weirdness */
|
||||
if (cond.swizzle[0] != 0)
|
||||
|
|
@ -537,8 +534,8 @@ bi_fuse_csel_cond(bi_instruction *csel, nir_alu_src cond,
|
|||
|
||||
/* We found one, let's fuse it in */
|
||||
csel->csel_cond = bcond;
|
||||
bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift);
|
||||
bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift);
|
||||
bi_copy_src(csel, alu, 0, 0, constants_left, constant_shift, comps);
|
||||
bi_copy_src(csel, alu, 1, 1, constants_left, constant_shift, comps);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -567,22 +564,14 @@ emit_alu(bi_context *ctx, nir_alu_instr *instr)
|
|||
/* TODO: Implement lowering of special functions for older Bifrost */
|
||||
assert((alu.type != BI_SPECIAL) || !(ctx->quirks & BIFROST_NO_FAST_OP));
|
||||
|
||||
if (instr->dest.dest.is_ssa) {
|
||||
/* Construct a writemask */
|
||||
unsigned bits_per_comp = instr->dest.dest.ssa.bit_size;
|
||||
unsigned comps = instr->dest.dest.ssa.num_components;
|
||||
unsigned comps = nir_dest_num_components(instr->dest.dest);
|
||||
|
||||
if (alu.type != BI_COMBINE)
|
||||
assert(comps == 1);
|
||||
if (alu.type != BI_COMBINE)
|
||||
assert(comps <= MAX2(1, 32 / comps));
|
||||
|
||||
unsigned bits = bits_per_comp * comps;
|
||||
unsigned bytes = bits / 8;
|
||||
alu.writemask = (1 << bytes) - 1;
|
||||
} else {
|
||||
unsigned comp_mask = instr->dest.write_mask;
|
||||
|
||||
alu.writemask = pan_to_bytemask(nir_dest_bit_size(instr->dest.dest),
|
||||
comp_mask);
|
||||
if (!instr->dest.dest.is_ssa) {
|
||||
for (unsigned i = 0; i < comps; ++i)
|
||||
assert(instr->dest.write_mask);
|
||||
}
|
||||
|
||||
/* We inline constants as we go. This tracks how many constants have
|
||||
|
|
@ -607,7 +596,7 @@ emit_alu(bi_context *ctx, nir_alu_instr *instr)
|
|||
if (i && alu.type == BI_CSEL)
|
||||
f++;
|
||||
|
||||
bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift);
|
||||
bi_copy_src(&alu, instr, i, i + f, &constants_left, &constant_shift, comps);
|
||||
}
|
||||
|
||||
/* Op-specific fixup */
|
||||
|
|
@ -676,7 +665,7 @@ emit_alu(bi_context *ctx, nir_alu_instr *instr)
|
|||
alu.src_types[1] = alu.src_types[0];
|
||||
|
||||
bi_fuse_csel_cond(&alu, instr->src[0],
|
||||
&constants_left, &constant_shift);
|
||||
&constants_left, &constant_shift, comps);
|
||||
}
|
||||
|
||||
bi_emit(ctx, alu);
|
||||
|
|
@ -698,8 +687,7 @@ emit_tex_compact(bi_context *ctx, nir_tex_instr *instr)
|
|||
.dest = bir_dest_index(&instr->dest),
|
||||
.dest_type = instr->dest_type,
|
||||
.src_types = { nir_type_float32, nir_type_float32 },
|
||||
.writemask = instr->dest_type == nir_type_float32 ?
|
||||
0xFFFF : 0xFF,
|
||||
.vector_channels = 4
|
||||
};
|
||||
|
||||
for (unsigned i = 0; i < instr->num_srcs; ++i) {
|
||||
|
|
|
|||
|
|
@ -93,28 +93,17 @@ bi_from_bytemask(uint16_t bytemask, unsigned bytes)
|
|||
}
|
||||
|
||||
unsigned
|
||||
bi_get_component_count(bi_instruction *ins, unsigned src)
|
||||
bi_get_component_count(bi_instruction *ins, signed src)
|
||||
{
|
||||
if (bi_class_props[ins->type] & BI_VECTOR) {
|
||||
return (src == 0) ? 4 : 1;
|
||||
assert(ins->vector_channels);
|
||||
return (src <= 0) ? ins->vector_channels : 1;
|
||||
} else {
|
||||
/* Stores imply VECTOR */
|
||||
assert(ins->dest_type);
|
||||
unsigned bytes = nir_alu_type_get_type_size(ins->dest_type);
|
||||
return 32 / bytes;
|
||||
unsigned bytes = nir_alu_type_get_type_size(src < 0 ? ins->dest_type : ins->src_types[src]);
|
||||
return MAX2(32 / bytes, 1);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned
|
||||
bi_load32_components(bi_instruction *ins)
|
||||
{
|
||||
unsigned mask = bi_from_bytemask(ins->writemask, 4);
|
||||
unsigned count = util_bitcount(mask);
|
||||
assert(mask == ((1 << count) - 1));
|
||||
assert(count >= 1 && count <= 4);
|
||||
return count;
|
||||
}
|
||||
|
||||
uint16_t
|
||||
bi_bytemask_of_read_components(bi_instruction *ins, unsigned node)
|
||||
{
|
||||
|
|
@ -157,11 +146,18 @@ bi_get_immediate(bi_instruction *ins, unsigned index)
|
|||
bool
|
||||
bi_writes_component(bi_instruction *ins, unsigned comp)
|
||||
{
|
||||
/* TODO: Do we want something less coarse? */
|
||||
if (bi_class_props[ins->type] & BI_VECTOR)
|
||||
return true;
|
||||
return comp < bi_get_component_count(ins, -1);
|
||||
}
|
||||
|
||||
unsigned
|
||||
bi_writemask(bi_instruction *ins)
|
||||
{
|
||||
nir_alu_type T = ins->dest_type;
|
||||
unsigned size = nir_alu_type_get_type_size(T);
|
||||
return ins->writemask & (0xF << (comp * (size / 8)));
|
||||
unsigned bytes_per_comp = size / 8;
|
||||
unsigned components = bi_get_component_count(ins, -1);
|
||||
unsigned bytes = bytes_per_comp * components;
|
||||
unsigned mask = (1 << bytes) - 1;
|
||||
unsigned shift = ins->dest_offset * 4; /* 32-bit words */
|
||||
return (mask << shift);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -114,8 +114,7 @@ extern unsigned bi_class_props[BI_NUM_CLASSES];
|
|||
* the end of a clause. Implies ADD */
|
||||
#define BI_SCHED_HI_LATENCY (1 << 7)
|
||||
|
||||
/* Intrinsic is vectorized and should read 4 components in the first source
|
||||
* regardless of writemask */
|
||||
/* Intrinsic is vectorized and acts with `vector_channels` components */
|
||||
#define BI_VECTOR (1 << 8)
|
||||
|
||||
/* Use a data register for src0/dest respectively, bypassing the usual
|
||||
|
|
@ -229,6 +228,10 @@ typedef struct {
|
|||
unsigned dest;
|
||||
unsigned src[BIR_SRC_COUNT];
|
||||
|
||||
/* 32-bit word offset for destination, added to the register number in
|
||||
* RA when lowering combines */
|
||||
unsigned dest_offset;
|
||||
|
||||
/* If one of the sources has BIR_INDEX_CONSTANT */
|
||||
union {
|
||||
uint64_t u64;
|
||||
|
|
@ -246,14 +249,6 @@ typedef struct {
|
|||
/* Round mode (requires BI_ROUNDMODE) */
|
||||
enum bifrost_roundmode roundmode;
|
||||
|
||||
/* Writemask (bit for each affected byte). This is quite restricted --
|
||||
* ALU ops can only write to a single channel (exception: <32 in which
|
||||
* you can write to 32/N contiguous aligned channels). Load/store can
|
||||
* only write to all channels at once, in a sense. But it's still
|
||||
* better to use this generic form than have synthetic ops flying
|
||||
* about, since we're not essentially vector for RA purposes. */
|
||||
uint16_t writemask;
|
||||
|
||||
/* Destination type. Usually the type of the instruction
|
||||
* itself, but if sources and destination have different
|
||||
* types, the type of the destination wins (so f2i would be
|
||||
|
|
@ -269,6 +264,9 @@ typedef struct {
|
|||
* selection, so we don't have to special case extraction. */
|
||||
uint8_t swizzle[BIR_SRC_COUNT][NIR_MAX_VEC_COMPONENTS];
|
||||
|
||||
/* For VECTOR ops, how many channels are written? */
|
||||
unsigned vector_channels;
|
||||
|
||||
/* A class-specific op from which the actual opcode can be derived
|
||||
* (along with the above information) */
|
||||
|
||||
|
|
@ -299,9 +297,6 @@ typedef struct {
|
|||
|
||||
/* For BLEND -- the location 0-7 */
|
||||
unsigned blend_location;
|
||||
|
||||
/* For STORE, STORE_VAR -- channel count */
|
||||
unsigned store_channels;
|
||||
};
|
||||
} bi_instruction;
|
||||
|
||||
|
|
@ -578,11 +573,11 @@ bool bi_has_source_mods(bi_instruction *ins);
|
|||
bool bi_is_src_swizzled(bi_instruction *ins, unsigned s);
|
||||
bool bi_has_arg(bi_instruction *ins, unsigned arg);
|
||||
uint16_t bi_from_bytemask(uint16_t bytemask, unsigned bytes);
|
||||
unsigned bi_get_component_count(bi_instruction *ins, unsigned s);
|
||||
unsigned bi_load32_components(bi_instruction *ins);
|
||||
unsigned bi_get_component_count(bi_instruction *ins, signed s);
|
||||
uint16_t bi_bytemask_of_read_components(bi_instruction *ins, unsigned node);
|
||||
uint64_t bi_get_immediate(bi_instruction *ins, unsigned index);
|
||||
bool bi_writes_component(bi_instruction *ins, unsigned comp);
|
||||
unsigned bi_writemask(bi_instruction *ins);
|
||||
|
||||
/* BIR passes */
|
||||
|
||||
|
|
|
|||
|
|
@ -56,12 +56,12 @@ bit_test_single(struct panfrost_device *dev,
|
|||
},
|
||||
.dest = BIR_INDEX_REGISTER | 0,
|
||||
.dest_type = nir_type_uint32,
|
||||
.writemask = 0xFFFF
|
||||
.vector_channels = 4,
|
||||
};
|
||||
|
||||
bi_instruction ldva = {
|
||||
.type = BI_LOAD_VAR_ADDRESS,
|
||||
.writemask = (1 << 12) - 1,
|
||||
.vector_channels = 3,
|
||||
.dest = BIR_INDEX_REGISTER | 32,
|
||||
.dest_type = nir_type_uint32,
|
||||
.src = {
|
||||
|
|
@ -88,7 +88,7 @@ bit_test_single(struct panfrost_device *dev,
|
|||
nir_type_uint32,
|
||||
nir_type_uint32, nir_type_uint32, nir_type_uint32,
|
||||
},
|
||||
.store_channels = 4
|
||||
.vector_channels = 4
|
||||
};
|
||||
|
||||
bi_context *ctx = rzalloc(NULL, bi_context);
|
||||
|
|
@ -382,7 +382,6 @@ bit_convert_helper(struct panfrost_device *dev, unsigned from_size,
|
|||
bi_instruction ins = {
|
||||
.type = BI_CONVERT,
|
||||
.dest = BIR_INDEX_REGISTER | 0,
|
||||
.writemask = 0xF,
|
||||
.src = { BIR_INDEX_REGISTER | 0 }
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue