i965/fs: Replace fs_reg::subreg_offset with fs_reg::offset expressed in bytes.

The fs_reg::subreg_offset and ::offset fields are now redundant, the
sub-GRF offset can just be added to the single ::offset field
expressed in byte units.  The current subreg_offset value can be
recovered by applying the following rule: Replace each rvalue
reference of subreg_offset like 'x = r.subreg_offset' with 'x =
r.offset % reg_unit', and each lvalue reference like 'r.subreg_offset
= x' with 'r.offset = ROUND_DOWN_TO(r.offset, reg_unit) + x'.

For the same reason as in the previous patches, this doesn't attempt
to be particularly clever about simplifying the result in the interest
of keeping the rather lengthy patch as obvious as possible.  I'll come
back later to clean up any ugliness introduced here.

Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
This commit is contained in:
Francisco Jerez 2016-09-01 15:11:21 -07:00
parent 9a523dd051
commit be095e11e4
6 changed files with 34 additions and 51 deletions

View file

@ -426,7 +426,6 @@ fs_reg::fs_reg(struct ::brw_reg reg) :
backend_reg(reg)
{
this->offset = 0;
this->subreg_offset = 0;
this->stride = 1;
if (this->file == IMM &&
(this->type != BRW_REGISTER_TYPE_V &&
@ -440,7 +439,6 @@ bool
fs_reg::equals(const fs_reg &r) const
{
return (this->backend_reg::equals(r) &&
subreg_offset == r.subreg_offset &&
stride == r.stride);
}
@ -448,7 +446,7 @@ fs_reg &
fs_reg::set_smear(unsigned subreg)
{
assert(file != ARF && file != FIXED_GRF && file != IMM);
subreg_offset = subreg * type_sz(type);
offset = ROUND_DOWN_TO(offset, REG_SIZE) + subreg * type_sz(type);
stride = 0;
return *this;
}
@ -710,7 +708,7 @@ fs_inst::is_partial_write() const
return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
(this->exec_size * type_sz(this->dst.type)) < 32 ||
!this->dst.is_contiguous() ||
this->dst.subreg_offset > 0);
this->dst.offset % REG_SIZE != 0);
}
unsigned
@ -1485,7 +1483,7 @@ fs_visitor::assign_curb_setup()
assert(inst->src[i].stride == 0);
inst->src[i] = byte_offset(
retype(brw_reg, inst->src[i].type),
inst->src[i].subreg_offset);
inst->src[i].offset % 4);
}
}
}
@ -1642,7 +1640,7 @@ fs_visitor::convert_attr_sources_to_hw_regs(fs_inst *inst)
unsigned width = inst->src[i].stride == 0 ? 1 : exec_size;
struct brw_reg reg =
stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
inst->src[i].subreg_offset),
inst->src[i].offset % REG_SIZE),
exec_size * inst->src[i].stride,
width, inst->src[i].stride);
reg.abs = inst->src[i].abs;
@ -2715,7 +2713,7 @@ fs_visitor::compute_to_mrf()
inst->dst.type != inst->src[0].type ||
inst->src[0].abs || inst->src[0].negate ||
!inst->src[0].is_contiguous() ||
inst->src[0].subreg_offset)
inst->src[0].offset % REG_SIZE != 0)
continue;
/* Can't compute-to-MRF this GRF if someone else was going to
@ -3519,7 +3517,7 @@ fs_visitor::lower_integer_multiplication()
assert(src1_1_w.stride == 1);
src1_1_w.stride = 2;
}
src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
src1_1_w.offset += type_sz(BRW_REGISTER_TYPE_UW);
}
ibld.MUL(low, inst->src[0], src1_0_w);
ibld.MUL(high, inst->src[0], src1_1_w);
@ -3538,7 +3536,7 @@ fs_visitor::lower_integer_multiplication()
assert(src0_1_w.stride == 1);
src0_1_w.stride = 2;
}
src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
src0_1_w.offset += type_sz(BRW_REGISTER_TYPE_UW);
ibld.MUL(low, src0_0_w, inst->src[1]);
ibld.MUL(high, src0_1_w, inst->src[1]);
@ -3546,14 +3544,14 @@ fs_visitor::lower_integer_multiplication()
fs_reg dst = inst->dst;
dst.type = BRW_REGISTER_TYPE_UW;
dst.subreg_offset = 2;
dst.offset = ROUND_DOWN_TO(dst.offset, REG_SIZE) + 2;
dst.stride = 2;
high.type = BRW_REGISTER_TYPE_UW;
high.stride = 2;
low.type = BRW_REGISTER_TYPE_UW;
low.subreg_offset = 2;
low.offset = ROUND_DOWN_TO(low.offset, REG_SIZE) + 2;
low.stride = 2;
ibld.ADD(dst, low, high);
@ -5317,9 +5315,9 @@ fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
case VGRF:
fprintf(file, "vgrf%d", inst->dst.nr);
if (alloc.sizes[inst->dst.nr] != inst->regs_written ||
inst->dst.subreg_offset)
inst->dst.offset % REG_SIZE)
fprintf(file, "+%d.%d",
inst->dst.offset / REG_SIZE, inst->dst.subreg_offset);
inst->dst.offset / REG_SIZE, inst->dst.offset % REG_SIZE);
break;
case FIXED_GRF:
fprintf(file, "g%d", inst->dst.nr);
@ -5373,9 +5371,9 @@ fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
case VGRF:
fprintf(file, "vgrf%d", inst->src[i].nr);
if (alloc.sizes[inst->src[i].nr] != (unsigned)inst->regs_read(i) ||
inst->src[i].subreg_offset)
inst->src[i].offset % REG_SIZE != 0)
fprintf(file, "+%d.%d", inst->src[i].offset / REG_SIZE,
inst->src[i].subreg_offset);
inst->src[i].offset % REG_SIZE);
break;
case FIXED_GRF:
fprintf(file, "g%d", inst->src[i].nr);
@ -5388,9 +5386,9 @@ fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
break;
case UNIFORM:
fprintf(file, "u%d", inst->src[i].nr + inst->src[i].offset / 4);
if (inst->src[i].subreg_offset) {
if (inst->src[i].offset % 4 != 0) {
fprintf(file, "+%d.%d", inst->src[i].offset / 4,
inst->src[i].subreg_offset);
inst->src[i].offset % 4);
}
break;
case BAD_FILE:

View file

@ -280,12 +280,12 @@ fs_visitor::opt_combine_constants()
ibld.MOV(reg, brw_imm_f(imm->val));
imm->nr = reg.nr;
imm->subreg_offset = reg.subreg_offset;
imm->subreg_offset = reg.offset % REG_SIZE;
reg.subreg_offset += sizeof(float);
if ((unsigned)reg.subreg_offset == 8 * sizeof(float)) {
reg.offset += sizeof(float);
if (reg.offset == 8 * sizeof(float)) {
reg.nr = alloc.allocate(1);
reg.subreg_offset = 0;
reg.offset = 0;
}
}
promoted_constants = table.len;
@ -296,7 +296,7 @@ fs_visitor::opt_combine_constants()
fs_reg *reg = link->reg;
reg->file = VGRF;
reg->nr = table.imm[i].nr;
reg->subreg_offset = table.imm[i].subreg_offset;
reg->offset = table.imm[i].subreg_offset;
reg->stride = 0;
reg->negate = signbit(reg->f) != signbit(table.imm[i].val);
assert((isnan(reg->f) && isnan(table.imm[i].val)) ||

View file

@ -341,7 +341,7 @@ region_contained_in(const fs_reg &src, unsigned regs_read,
const fs_reg &dst, unsigned regs_written)
{
return src.file == dst.file && src.nr == dst.nr &&
(src.offset + src.subreg_offset >= dst.offset + dst.subreg_offset) &&
src.offset >= dst.offset &&
src.offset / REG_SIZE + regs_read <= dst.offset / REG_SIZE + regs_written;
}
@ -461,30 +461,21 @@ fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry)
inst->saturate = inst->saturate || entry->saturate;
/* Compute the offset of inst->src[arg] relative to entry->dst */
const unsigned rel_offset = inst->src[arg].offset - entry->dst.offset +
inst->src[arg].subreg_offset;
const unsigned rel_offset = inst->src[arg].offset - entry->dst.offset;
/* Compute the first component of the copy that the instruction is
* reading, and the base byte offset within that component.
*/
assert(entry->dst.subreg_offset == 0 && entry->dst.stride == 1);
assert(entry->dst.offset % REG_SIZE == 0 && entry->dst.stride == 1);
const unsigned component = rel_offset / type_sz(entry->dst.type);
const unsigned suboffset = rel_offset % type_sz(entry->dst.type);
/* Account for the inconsistent units reg_offset is expressed in.
* FINISHME -- Make the units of reg_offset consistent (e.g. bytes?) for
* all register files.
*/
const unsigned reg_size = (entry->src.file == UNIFORM ? 4 : REG_SIZE);
/* Calculate the byte offset at the origin of the copy of the given
* component and suboffset.
*/
const unsigned offset = suboffset +
inst->src[arg].offset = suboffset +
component * entry->src.stride * type_sz(entry->src.type) +
entry->src.offset + entry->src.subreg_offset;
inst->src[arg].offset = ROUND_DOWN_TO(offset, reg_size);
inst->src[arg].subreg_offset = offset % reg_size;
entry->src.offset;
if (has_source_modifiers) {
if (entry->dst.type != inst->src[arg].type) {

View file

@ -97,14 +97,14 @@ brw_reg_from_fs_reg(fs_inst *inst, fs_reg *reg, unsigned gen, bool compressed)
}
brw_reg = retype(brw_reg, reg->type);
brw_reg = byte_offset(brw_reg, reg->subreg_offset);
brw_reg = byte_offset(brw_reg, reg->offset % REG_SIZE);
brw_reg.abs = reg->abs;
brw_reg.negate = reg->negate;
break;
case ARF:
case FIXED_GRF:
case IMM:
assert(reg->subreg_offset == 0);
assert(reg->offset % REG_SIZE == 0);
brw_reg = reg->as_brw_reg();
break;
case BAD_FILE:

View file

@ -509,13 +509,13 @@ fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
}
tmp.type = BRW_REGISTER_TYPE_W;
tmp.subreg_offset = 2;
tmp.offset = ROUND_DOWN_TO(tmp.offset, REG_SIZE) + 2;
tmp.stride = 2;
bld.OR(tmp, g0, brw_imm_uw(0x3f80));
tmp.type = BRW_REGISTER_TYPE_D;
tmp.subreg_offset = 0;
tmp.offset = ROUND_DOWN_TO(tmp.offset, REG_SIZE);
tmp.stride = 1;
} else {
/* Bit 31 of g1.6 is 0 if the polygon is front facing. */

View file

@ -52,12 +52,6 @@ public:
/** Smear a channel of the reg to all channels. */
fs_reg &set_smear(unsigned subreg);
/**
* Offset in bytes from the start of the register. Values up to a
* backend_reg::reg_offset unit are valid.
*/
int subreg_offset;
/** Register region horizontal stride */
uint8_t stride;
};
@ -87,15 +81,15 @@ byte_offset(fs_reg reg, unsigned delta)
case ATTR:
case UNIFORM: {
const unsigned reg_size = (reg.file == UNIFORM ? 4 : REG_SIZE);
const unsigned suboffset = reg.subreg_offset + delta;
const unsigned suboffset = reg.offset % reg_size + delta;
reg.offset += ROUND_DOWN_TO(suboffset, reg_size);
reg.subreg_offset = suboffset % reg_size;
reg.offset = ROUND_DOWN_TO(reg.offset, reg_size) + suboffset % reg_size;
break;
}
case MRF: {
const unsigned suboffset = reg.subreg_offset + delta;
const unsigned suboffset = reg.offset % REG_SIZE + delta;
reg.nr += suboffset / REG_SIZE;
reg.subreg_offset = suboffset % REG_SIZE;
reg.offset = ROUND_DOWN_TO(reg.offset, REG_SIZE) + suboffset % REG_SIZE;
break;
}
case ARF:
@ -193,7 +187,7 @@ static inline unsigned
reg_offset(const fs_reg &r)
{
return (r.file == VGRF || r.file == IMM ? 0 : r.nr) *
(r.file == UNIFORM ? 4 : REG_SIZE) + r.offset + r.subreg_offset;
(r.file == UNIFORM ? 4 : REG_SIZE) + r.offset;
}
/**