nir/lower_shader_calls: avoid respilling values

Currently we do something like this :

  ssa_0 = ...
  ssa_1 = ...
  * spill ssa_0, ssa_1
  call1()
  * fill ssa_0, ssa_1
  ssa_2 = ...
  ssa_3 = ...
  * spill ssa_0, ssa_1, ssa_2, ssa_3
  call2()
  * fill ssa_0, ssa_1, ssa_2, ssa_3

If we assign the same possition to ssa_0 & ssa_1 in the spilling
stack, then on call2(), we know that those values are already present
in memory at the right location and we can avoid respilling them.

The result would be something like this :

  ssa_0 = ...
  ssa_1 = ...
  * spill ssa_0, ssa_1
  call1()
  * fill ssa_0, ssa_1
  ssa_2 = ...
  ssa_3 = ...
  * spill ssa_2, ssa_3
  call2()
  * fill ssa_0, ssa_1, ssa_2, ssa_3

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Konstantin Seurer <konstantin.seurer@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16556>
This commit is contained in:
Lionel Landwerlin 2022-05-16 16:23:02 +03:00 committed by Marge Bot
parent 5a9f8d21d0
commit ca2a1340a2

View file

@ -1209,6 +1209,45 @@ nir_lower_stack_to_scratch(nir_shader *shader,
&state);
}
static bool
opt_remove_respills_instr(struct nir_builder *b, nir_instr *instr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *store_intrin = nir_instr_as_intrinsic(instr);
if (store_intrin->intrinsic != nir_intrinsic_store_stack)
return false;
nir_instr *value_instr = store_intrin->src[0].ssa->parent_instr;
if (value_instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *load_intrin = nir_instr_as_intrinsic(value_instr);
if (load_intrin->intrinsic != nir_intrinsic_load_stack)
return false;
if (nir_intrinsic_base(load_intrin) != nir_intrinsic_base(store_intrin))
return false;
nir_instr_remove(&store_intrin->instr);
return true;
}
/* After shader split, look at stack load/store operations. If we're loading
* and storing the same value at the same location, we can drop the store
* instruction.
*/
static bool
nir_opt_remove_respills(nir_shader *shader)
{
return nir_shader_instructions_pass(shader,
opt_remove_respills_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);
}
/** Lower shader call instructions to split shaders.
*
* Shader calls can be split into an initial shader and a series of "resume"
@ -1296,6 +1335,9 @@ nir_lower_shader_calls(nir_shader *shader,
nir_opt_if(resume_shaders[i], nir_opt_if_optimize_phi_true_false);
}
for (unsigned i = 0; i < num_calls; i++)
NIR_PASS_V(resume_shaders[i], nir_opt_remove_respills);
NIR_PASS_V(shader, nir_lower_stack_to_scratch, address_format);
for (unsigned i = 0; i < num_calls; i++)
NIR_PASS_V(resume_shaders[i], nir_lower_stack_to_scratch, address_format);