intel: switch to nir_metadata_divergence

Reviewed-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30814>
This commit is contained in:
Daniel Schürmann 2024-08-23 11:57:28 +02:00 committed by Marge Bot
parent 067478358f
commit 175c06e5cd
6 changed files with 8 additions and 23 deletions

View file

@ -1664,7 +1664,6 @@ brw_vectorize_lower_mem_access(nir_shader *nir,
* - fewer send messages
* - reduced register pressure
*/
nir_divergence_analysis(nir);
if (OPT(intel_nir_blockify_uniform_loads, compiler->devinfo)) {
OPT(nir_opt_load_store_vectorize, &options);
@ -1853,9 +1852,6 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
OPT(nir_opt_move, nir_move_comparisons);
OPT(nir_opt_dead_cf);
bool divergence_analysis_dirty = false;
NIR_PASS_V(nir, nir_divergence_analysis);
static const nir_lower_subgroups_options subgroups_options = {
.ballot_bit_size = 32,
.ballot_components = 1,
@ -1870,8 +1866,6 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
if (OPT(nir_lower_int64))
brw_nir_optimize(nir, devinfo);
divergence_analysis_dirty = true;
}
/* nir_opt_uniform_subgroup can create some operations (e.g.,
@ -1902,10 +1896,6 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
/* Do this only after the last opt_gcm. GCM will undo this lowering. */
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
if (divergence_analysis_dirty) {
NIR_PASS_V(nir, nir_divergence_analysis);
}
OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
}

View file

@ -1468,9 +1468,6 @@ elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
OPT(nir_opt_move, nir_move_comparisons);
OPT(nir_opt_dead_cf);
bool divergence_analysis_dirty = false;
NIR_PASS_V(nir, nir_divergence_analysis);
/* TODO: Enable nir_opt_uniform_atomics on Gfx7.x too.
* It currently fails Vulkan tests on Haswell for an unknown reason.
*/
@ -1486,16 +1483,10 @@ elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
if (OPT(nir_lower_int64))
elk_nir_optimize(nir, is_scalar, devinfo);
divergence_analysis_dirty = true;
}
/* Do this only after the last opt_gcm. GCM will undo this lowering. */
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
if (divergence_analysis_dirty) {
NIR_PASS_V(nir, nir_divergence_analysis);
}
OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
}

View file

@ -237,9 +237,12 @@ bool
intel_nir_blockify_uniform_loads(nir_shader *shader,
const struct intel_device_info *devinfo)
{
nir_divergence_analysis(shader);
return nir_shader_instructions_pass(shader,
intel_nir_blockify_uniform_loads_instr,
nir_metadata_control_flow |
nir_metadata_live_defs,
nir_metadata_live_defs |
nir_metadata_divergence,
(void *) devinfo);
}

View file

@ -131,6 +131,7 @@ intel_nir_lower_non_uniform_barycentric_at_sample(nir_shader *nir)
{
bool progress;
nir_divergence_analysis(nir);
nir_shader_clear_pass_flags(nir);
progress = nir_shader_instructions_pass(

View file

@ -1010,9 +1010,6 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
NIR_PASS(progress, nir, nir_opt_dce);
} while (progress);
/* Needed for anv_nir_lower_ubo_loads. */
nir_divergence_analysis(nir);
NIR_PASS(_, nir, anv_nir_lower_ubo_loads);
enum nir_lower_non_uniform_access_type lower_non_uniform_access_types =

View file

@ -22,6 +22,7 @@
*/
#include "anv_nir.h"
#include "nir.h"
#include "nir_builder.h"
static bool
@ -114,6 +115,8 @@ lower_ubo_load_instr(nir_builder *b, nir_intrinsic_instr *load,
bool
anv_nir_lower_ubo_loads(nir_shader *shader)
{
nir_divergence_analysis(shader);
return nir_shader_intrinsics_pass(shader, lower_ubo_load_instr,
nir_metadata_none,
NULL);