freedreno/ir3: fix addr/pred spilling

The live_values and use_count was not being properly updated.  This
starts triggering problems with the next patch, where we allow copy
propagation for RELATIV access.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Rob Clark 2019-08-12 11:34:18 -07:00 committed by Rob Clark
parent 50a91fbf87
commit 1fd6a91d4a

View file

@ -59,6 +59,11 @@ struct ir3_sched_ctx {
bool error;
};
static bool is_scheduled(struct ir3_instruction *instr)
{
return !!(instr->flags & IR3_INSTR_MARK);
}
static bool is_sfu_or_mem(struct ir3_instruction *instr)
{
return is_sfu(instr) || is_mem(instr);
@ -87,8 +92,32 @@ unuse_each_src(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
}
}
static void clear_cache(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr);
static void use_instr(struct ir3_instruction *instr);
/* transfers a use-count to new instruction, for cases where we
* "spill" address or predicate. Note this might cause the
* previous instruction that loaded a0.x/p0.x to become live
* again, when we previously thought it was dead.
*/
static void
transfer_use(struct ir3_sched_ctx *ctx, struct ir3_instruction *orig_instr,
struct ir3_instruction *new_instr)
{
struct ir3_instruction *src;
debug_assert(is_scheduled(orig_instr));
foreach_ssa_src_n(src, n, new_instr) {
if (__is_false_dep(new_instr, n))
continue;
ctx->live_values += dest_regs(src);
use_instr(src);
}
clear_cache(ctx, orig_instr);
}
static void
use_each_src(struct ir3_instruction *instr)
{
@ -346,11 +375,6 @@ struct ir3_sched_notes {
bool addr_conflict, pred_conflict;
};
static bool is_scheduled(struct ir3_instruction *instr)
{
return !!(instr->flags & IR3_INSTR_MARK);
}
/* could an instruction be scheduled if specified ssa src was scheduled? */
static bool
could_sched(struct ir3_instruction *instr, struct ir3_instruction *src)
@ -372,6 +396,8 @@ static bool
check_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
struct ir3_instruction *instr)
{
debug_assert(!is_scheduled(instr));
/* For instructions that write address register we need to
* make sure there is at least one instruction that uses the
* addr value which is otherwise ready.
@ -640,6 +666,15 @@ find_eligible_instr(struct ir3_sched_ctx *ctx, struct ir3_sched_notes *notes,
return best_instr;
}
static struct ir3_instruction *
split_instr(struct ir3_sched_ctx *ctx, struct ir3_instruction *orig_instr)
{
struct ir3_instruction *new_instr = ir3_instr_clone(orig_instr);
ir3_insert_by_depth(new_instr, &ctx->depth_list);
transfer_use(ctx, orig_instr, new_instr);
return new_instr;
}
/* "spill" the address register by remapping any unscheduled
* instructions which depend on the current address register
* to a clone of the instruction which wrote the address reg.
@ -670,7 +705,7 @@ split_addr(struct ir3_sched_ctx *ctx)
*/
if (indirect->address == ctx->addr) {
if (!new_addr) {
new_addr = ir3_instr_clone(ctx->addr);
new_addr = split_instr(ctx, ctx->addr);
/* original addr is scheduled, but new one isn't: */
new_addr->flags &= ~IR3_INSTR_MARK;
}
@ -714,7 +749,7 @@ split_pred(struct ir3_sched_ctx *ctx)
*/
if (ssa(predicated->regs[1]) == ctx->pred) {
if (!new_pred) {
new_pred = ir3_instr_clone(ctx->pred);
new_pred = split_instr(ctx, ctx->pred);
/* original pred is scheduled, but new one isn't: */
new_pred->flags &= ~IR3_INSTR_MARK;
}