intel/brw: Add phases to backend

The general idea is to be able to validate that certain instructions
were lowered and certain restrictions were already handled.  Passes can
now assert their expectations, i.e. if a pass is mean to run after
certain lowerings or not.

The actual phases are a initial stab and as we re-organized the passes,
we may remove/add phases.

This commit just add some phase steps, later commits will make use of
them.

Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30496>
This commit is contained in:
Caio Oliveira 2024-08-27 10:16:11 -07:00 committed by Marge Bot
parent 21f78454bf
commit affa7567c2
6 changed files with 39 additions and 0 deletions

View file

@ -1691,6 +1691,8 @@ brw_allocate_registers(fs_visitor &s, bool allow_spilling)
s.debug_optimizer(nir, "lowered_vgrfs_to_fixed_grfs", 96, 3);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_REGALLOC);
if (s.last_scratch > 0) {
/* We currently only support up to 2MB of scratch space. If we
* need to support more eventually, the documentation suggests
@ -1876,6 +1878,14 @@ brw_cs_get_dispatch_info(const struct intel_device_info *devinfo,
return info;
}
void
brw_shader_phase_update(fs_visitor &s, enum brw_shader_phase phase)
{
assert(phase == s.phase + 1);
s.phase = phase;
brw_fs_validate(s);
}
bool brw_should_print_shader(const nir_shader *shader, uint64_t debug_flag)
{
return INTEL_DEBUG(debug_flag) && (!shader->info.internal || NIR_DEBUG(PRINT_INTERNAL));

View file

@ -257,6 +257,16 @@ enum instruction_scheduler_mode {
class instruction_scheduler;
enum brw_shader_phase {
BRW_SHADER_PHASE_INITIAL = 0,
BRW_SHADER_PHASE_AFTER_NIR,
BRW_SHADER_PHASE_AFTER_OPT_LOOP,
BRW_SHADER_PHASE_AFTER_EARLY_LOWERING,
BRW_SHADER_PHASE_AFTER_MIDDLE_LOWERING,
BRW_SHADER_PHASE_AFTER_LATE_LOWERING,
BRW_SHADER_PHASE_AFTER_REGALLOC,
};
/**
* The fragment shader front-end.
*
@ -365,6 +375,8 @@ public:
brw_reg dual_src_output;
int first_non_payload_grf;
enum brw_shader_phase phase;
bool failed;
char *fail_msg;
@ -594,6 +606,8 @@ int brw_get_subgroup_id_param_index(const intel_device_info *devinfo,
void nir_to_brw(fs_visitor *s);
void brw_shader_phase_update(fs_visitor &s, enum brw_shader_phase phase);
#ifndef NDEBUG
void brw_fs_validate(const fs_visitor &s);
#else

View file

@ -8432,4 +8432,6 @@ nir_to_brw(fs_visitor *s)
ntb.bld.emit(SHADER_OPCODE_HALT_TARGET);
ralloc_free(ntb.mem_ctx);
brw_shader_phase_update(*s, BRW_SHADER_PHASE_AFTER_NIR);
}

View file

@ -80,6 +80,8 @@ brw_fs_optimize(fs_visitor &s)
OPT(brw_fs_opt_compact_virtual_grfs);
} while (progress);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_OPT_LOOP);
progress = false;
pass_num = 0;
@ -93,6 +95,8 @@ brw_fs_optimize(fs_visitor &s)
OPT(brw_fs_lower_barycentrics);
OPT(brw_fs_lower_logical_sends);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_EARLY_LOWERING);
/* After logical SEND lowering. */
if (OPT(brw_fs_opt_copy_propagation_defs) || OPT(brw_fs_opt_copy_propagation))
@ -131,6 +135,8 @@ brw_fs_optimize(fs_visitor &s)
OPT(brw_fs_opt_dead_code_eliminate);
}
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_MIDDLE_LOWERING);
OPT(brw_fs_lower_alu_restrictions);
OPT(brw_fs_opt_combine_constants);
@ -169,6 +175,8 @@ brw_fs_optimize(fs_visitor &s)
OPT(brw_fs_lower_find_live_channel);
OPT(brw_fs_lower_load_subgroup_invocation);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_LATE_LOWERING);
}
static unsigned

View file

@ -179,6 +179,9 @@ brw_fs_validate(const fs_visitor &s)
{
const intel_device_info *devinfo = s.devinfo;
if (s.phase <= BRW_SHADER_PHASE_AFTER_NIR)
return;
s.cfg->validate(_mesa_shader_stage_to_abbrev(s.stage));
foreach_block_and_inst (block, fs_inst, inst, s.cfg) {

View file

@ -470,6 +470,8 @@ fs_visitor::init()
this->grf_used = 0;
this->spilled_any_registers = false;
this->phase = BRW_SHADER_PHASE_INITIAL;
}
fs_visitor::~fs_visitor()