agx: add spill/fill lowering pass

This simple pass turns moves of memory registers (outputted by RA from the
spiller produced moves/phis of memory variables) into concrete stack load/store
instructions. it's a lot more convenient to do this as a dedicated post-RA pass
than trying to fold this into the hairball that is RA proper.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27616>
This commit is contained in:
Alyssa Rosenzweig 2024-01-27 14:16:18 -04:00 committed by Marge Bot
parent 40da539d01
commit 14e1bb78ae
4 changed files with 84 additions and 0 deletions

View file

@ -879,6 +879,7 @@ void agx_print_block(const agx_block *block, FILE *fp);
void agx_print_shader(const agx_context *ctx, FILE *fp);
void agx_optimizer(agx_context *ctx);
void agx_lower_pseudo(agx_context *ctx);
void agx_lower_spill(agx_context *ctx);
void agx_lower_uniform_sources(agx_context *ctx);
void agx_opt_cse(agx_context *ctx);
void agx_dce(agx_context *ctx, bool partial);

View file

@ -0,0 +1,79 @@
/*
* Copyright 2023 Alyssa Rosenzweig
* SPDX-License-Identifier: MIT
*/
#include "util/macros.h"
#include "agx_builder.h"
#include "agx_compile.h"
#include "agx_compiler.h"
/* Lower moves involving memory registers (created when spilling) to concrete
* spills and fills.
*/
static void
spill_fill(agx_builder *b, agx_instr *I, enum agx_size size, unsigned channels,
unsigned component_offset)
{
enum agx_format format =
size == AGX_SIZE_16 ? AGX_FORMAT_I16 : AGX_FORMAT_I32;
unsigned offset_B = component_offset * agx_size_align_16(size) * 2;
unsigned effective_chans = size == AGX_SIZE_64 ? (channels * 2) : channels;
unsigned mask = BITFIELD_MASK(effective_chans);
assert(effective_chans <= 4);
/* Pick off the memory and register parts of the move */
agx_index mem = I->dest[0].memory ? I->dest[0] : I->src[0];
agx_index reg = I->dest[0].memory ? I->src[0] : I->dest[0];
assert(mem.type == AGX_INDEX_REGISTER && mem.memory);
assert(reg.type == AGX_INDEX_REGISTER && !reg.memory);
/* Slice the register according to the part of the spill we're handling */
if (component_offset > 0 || channels != agx_channels(reg)) {
reg.value += component_offset * agx_size_align_16(reg.size);
reg.channels_m1 = channels - 1;
}
/* Calculate stack offset in bytes. IR registers are 2-bytes each. */
unsigned stack_offs_B = b->shader->spill_base + (mem.value * 2) + offset_B;
/* Emit the spill/fill */
if (I->dest[0].memory) {
agx_stack_store(b, reg, agx_immediate(stack_offs_B), format, mask);
b->shader->spills++;
} else {
agx_stack_load_to(b, reg, agx_immediate(stack_offs_B), format, mask);
b->shader->fills++;
}
}
void
agx_lower_spill(agx_context *ctx)
{
agx_foreach_instr_global_safe(ctx, I) {
if (I->op != AGX_OPCODE_MOV || (!I->dest[0].memory && !I->src[0].memory))
continue;
enum agx_size size = I->dest[0].size;
unsigned channels = agx_channels(I->dest[0]);
assert(size == I->src[0].size);
assert(channels == agx_channels(I->src[0]));
/* Texture gradient sources can be vec6, and if such a vector is spilled,
* we need to be able to spill/fill a vec6. Since stack_store/stack_load
* only work up to vec4, we break up into (at most) vec4 components.
*/
agx_builder b = agx_init_builder(ctx, agx_before_instr(I));
for (unsigned c = 0; c < channels; c += 4) {
spill_fill(&b, I, size, MIN2(channels - c, 4), c);
}
agx_remove_instruction(I);
}
}

View file

@ -1351,6 +1351,9 @@ agx_ra(agx_context *ctx)
}
}
if (spilling)
agx_lower_spill(ctx);
agx_foreach_block(ctx, block) {
free(block->ssa_to_reg_out);
block->ssa_to_reg_out = NULL;

View file

@ -23,6 +23,7 @@ libasahi_agx_files = files(
'agx_lower_64bit.c',
'agx_lower_parallel_copy.c',
'agx_lower_pseudo.c',
'agx_lower_spill.c',
'agx_lower_uniform_sources.c',
'agx_pack.c',
'agx_performance.c',