nir/cse: use the instruction set API

This replaces an O(n^2) algorithm with an O(n) one, while allowing us to
import most of the infrastructure required for GVN. The idea is to walk
the dominance tree depth-first, similar when converting to SSA, and
remove the instructions from the set when we're done visiting the
sub-tree of the dominance tree so that the only instructions in the set
are the instructions that dominate the current block.

No piglit regressions. No shader-db changes.

Compilation time for full shader-db:

Difference at 95.0% confidence
        -35.826 +/- 2.16018
        -6.2852% +/- 0.378975%
        (Student's t, pooled s = 3.37504)

v2:
- rebase on start_block removal
- remove useless state struct
- change commit message

Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>
Signed-off-by: Connor Abbott <cwabbott0@gmail.com>
This commit is contained in:
Connor Abbott 2015-05-22 00:41:45 -04:00
parent 523a28d3fe
commit e8308d0523

View file

@ -22,6 +22,7 @@
*
* Authors:
* Jason Ekstrand (jason@jlekstrand.net)
* Connor Abbott (cwabbott0@gmail.com)
*
*/
@ -31,144 +32,50 @@
* Implements common subexpression elimination
*/
struct cse_state {
void *mem_ctx;
bool progress;
};
/*
* Visits and CSE's the given block and all its descendants in the dominance
* tree recursively. Note that the instr_set is guaranteed to only ever
* contain instructions that dominate the current block.
*/
static bool
src_is_ssa(nir_src *src, void *data)
cse_block(nir_block *block, struct set *instr_set)
{
(void) data;
return src->is_ssa;
}
bool progress = false;
static bool
dest_is_ssa(nir_dest *dest, void *data)
{
(void) data;
return dest->is_ssa;
}
static bool
nir_instr_can_cse(nir_instr *instr)
{
/* We only handle SSA. */
if (!nir_foreach_dest(instr, dest_is_ssa, NULL) ||
!nir_foreach_src(instr, src_is_ssa, NULL))
return false;
switch (instr->type) {
case nir_instr_type_alu:
case nir_instr_type_tex:
case nir_instr_type_load_const:
case nir_instr_type_phi:
return true;
case nir_instr_type_intrinsic: {
const nir_intrinsic_info *info =
&nir_intrinsic_infos[nir_instr_as_intrinsic(instr)->intrinsic];
return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
(info->flags & NIR_INTRINSIC_CAN_REORDER) &&
info->num_variables == 0; /* not implemented yet */
}
case nir_instr_type_call:
case nir_instr_type_jump:
case nir_instr_type_ssa_undef:
return false;
case nir_instr_type_parallel_copy:
default:
unreachable("Invalid instruction type");
}
return false;
}
static nir_ssa_def *
nir_instr_get_dest_ssa_def(nir_instr *instr)
{
switch (instr->type) {
case nir_instr_type_alu:
assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
return &nir_instr_as_alu(instr)->dest.dest.ssa;
case nir_instr_type_tex:
assert(nir_instr_as_tex(instr)->dest.is_ssa);
return &nir_instr_as_tex(instr)->dest.ssa;
case nir_instr_type_load_const:
return &nir_instr_as_load_const(instr)->def;
case nir_instr_type_phi:
assert(nir_instr_as_phi(instr)->dest.is_ssa);
return &nir_instr_as_phi(instr)->dest.ssa;
case nir_instr_type_intrinsic:
assert(nir_instr_as_intrinsic(instr)->dest.is_ssa);
return &nir_instr_as_intrinsic(instr)->dest.ssa;
default:
unreachable("We never ask for any of these");
}
}
static void
nir_opt_cse_instr(nir_instr *instr, struct cse_state *state)
{
if (!nir_instr_can_cse(instr))
return;
for (struct exec_node *node = instr->node.prev;
!exec_node_is_head_sentinel(node); node = node->prev) {
nir_instr *other = exec_node_data(nir_instr, node, node);
if (nir_instrs_equal(instr, other)) {
nir_ssa_def *other_def = nir_instr_get_dest_ssa_def(other);
nir_ssa_def_rewrite_uses(nir_instr_get_dest_ssa_def(instr),
nir_src_for_ssa(other_def));
nir_foreach_instr_safe(block, instr) {
if (nir_instr_set_add_or_rewrite(instr_set, instr)) {
progress = true;
nir_instr_remove(instr);
state->progress = true;
return;
}
}
for (nir_block *block = instr->block->imm_dom;
block != NULL; block = block->imm_dom) {
nir_foreach_instr_reverse(block, other) {
if (nir_instrs_equal(instr, other)) {
nir_ssa_def *other_def = nir_instr_get_dest_ssa_def(other);
nir_ssa_def_rewrite_uses(nir_instr_get_dest_ssa_def(instr),
nir_src_for_ssa(other_def));
nir_instr_remove(instr);
state->progress = true;
return;
}
}
for (unsigned i = 0; i < block->num_dom_children; i++) {
nir_block *child = block->dom_children[i];
progress |= cse_block(child, instr_set);
}
}
static bool
nir_opt_cse_block(nir_block *block, void *void_state)
{
struct cse_state *state = void_state;
nir_foreach_instr(block, instr)
nir_instr_set_remove(instr_set, instr);
nir_foreach_instr_safe(block, instr)
nir_opt_cse_instr(instr, state);
return true;
return progress;
}
static bool
nir_opt_cse_impl(nir_function_impl *impl)
{
struct cse_state state;
state.mem_ctx = ralloc_parent(impl);
state.progress = false;
struct set *instr_set = nir_instr_set_create(NULL);
nir_metadata_require(impl, nir_metadata_dominance);
nir_foreach_block(impl, nir_opt_cse_block, &state);
bool progress = cse_block(nir_start_block(impl), instr_set);
if (state.progress)
if (progress)
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
return state.progress;
nir_instr_set_destroy(instr_set);
return progress;
}
bool
@ -183,3 +90,4 @@ nir_opt_cse(nir_shader *shader)
return progress;
}