diff --git a/src/amd/common/ac_nir_lower_ngg.c b/src/amd/common/ac_nir_lower_ngg.c index a89b521b10c..c4b772613fa 100644 --- a/src/amd/common/ac_nir_lower_ngg.c +++ b/src/amd/common/ac_nir_lower_ngg.c @@ -1285,7 +1285,7 @@ save_reusable_variables(nir_builder *b, lower_ngg_nogs_state *s) */ bool next_is_divergent_if = next_cf_node->type == nir_cf_node_if && - nir_cf_node_as_if(next_cf_node)->condition.ssa->divergent; + nir_src_is_divergent(&nir_cf_node_as_if(next_cf_node)->condition); if (next_is_loop || next_is_divergent_if) { block = nir_cf_node_cf_tree_next(next_cf_node); diff --git a/src/compiler/nir/nir_opt_non_uniform_access.c b/src/compiler/nir/nir_opt_non_uniform_access.c index 98ecc0073a0..46bdee8a3e0 100644 --- a/src/compiler/nir/nir_opt_non_uniform_access.c +++ b/src/compiler/nir/nir_opt_non_uniform_access.c @@ -164,7 +164,7 @@ opt_non_uniform_tex_access(nir_tex_instr *tex) case nir_tex_src_texture_offset: case nir_tex_src_texture_handle: case nir_tex_src_texture_deref: - if (tex->texture_non_uniform && !tex->src[i].src.ssa->divergent) { + if (tex->texture_non_uniform && !nir_src_is_divergent(&tex->src[i].src)) { tex->texture_non_uniform = false; progress = true; } @@ -173,7 +173,7 @@ opt_non_uniform_tex_access(nir_tex_instr *tex) case nir_tex_src_sampler_offset: case nir_tex_src_sampler_handle: case nir_tex_src_sampler_deref: - if (tex->sampler_non_uniform && !tex->src[i].src.ssa->divergent) { + if (tex->sampler_non_uniform && !nir_src_is_divergent(&tex->src[i].src)) { tex->sampler_non_uniform = false; progress = true; } @@ -193,7 +193,7 @@ opt_non_uniform_access_intrin(nir_intrinsic_instr *intrin, unsigned handle_src) if (!has_non_uniform_access_intrin(intrin)) return false; - if (intrin->src[handle_src].ssa->divergent) + if (nir_src_is_divergent(&intrin->src[handle_src])) return false; nir_intrinsic_set_access(intrin, nir_intrinsic_access(intrin) & ~ACCESS_NON_UNIFORM); diff --git a/src/compiler/nir/nir_opt_uniform_atomics.c b/src/compiler/nir/nir_opt_uniform_atomics.c index 583c64d53bc..6fb583d67c7 100644 --- a/src/compiler/nir/nir_opt_uniform_atomics.c +++ b/src/compiler/nir/nir_opt_uniform_atomics.c @@ -213,7 +213,8 @@ optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev) nir_def *data = intrin->src[data_src].ssa; /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */ - bool combined_scan_reduce = return_prev && data->divergent; + bool combined_scan_reduce = return_prev && + nir_src_is_divergent(&intrin->src[data_src]); nir_def *reduce = NULL, *scan = NULL; reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL); diff --git a/src/compiler/nir/nir_opt_varyings.c b/src/compiler/nir/nir_opt_varyings.c index eb15ec96993..5732d9de8b9 100644 --- a/src/compiler/nir/nir_opt_varyings.c +++ b/src/compiler/nir/nir_opt_varyings.c @@ -1409,9 +1409,9 @@ gather_outputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_ * so we only propagate constants. * TODO: revisit this when workgroup divergence analysis is merged. */ - const bool divergent = value->divergent || + const bool divergent = (!constant && linkage->producer_stage == MESA_SHADER_MESH) || intr->instr.block->divergent || - (!constant && linkage->producer_stage == MESA_SHADER_MESH); + nir_src_is_divergent(&intr->src[0]); if (!out->producer.value) { /* This is the first store to this output. */ diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index 73330ee7d35..a26cdcde8f3 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -4487,7 +4487,7 @@ emit_if(struct ir3_context *ctx, nir_if *nif) emit_conditional_branch(ctx, nif); } - ctx->block->divergent_condition = nif->condition.ssa->divergent; + ctx->block->divergent_condition = nir_src_is_divergent(&nif->condition); emit_cf_list(ctx, &nif->then_list); emit_cf_list(ctx, &nif->else_list); diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c index 8581f0b7262..67fbb80f45f 100644 --- a/src/gallium/drivers/radeonsi/si_shader_nir.c +++ b/src/gallium/drivers/radeonsi/si_shader_nir.c @@ -400,7 +400,7 @@ static bool si_mark_divergent_texture_non_uniform(struct nir_shader *nir) nir_tex_instr *tex = nir_instr_as_tex(instr); for (int i = 0; i < tex->num_srcs; i++) { - bool divergent = tex->src[i].src.ssa->divergent; + bool divergent = nir_src_is_divergent(&tex->src[i].src); switch (tex->src[i].src_type) { case nir_tex_src_texture_deref: diff --git a/src/nouveau/compiler/nak_nir_lower_cf.c b/src/nouveau/compiler/nak_nir_lower_cf.c index 45dbb87987e..c2aedf8679f 100644 --- a/src/nouveau/compiler/nak_nir_lower_cf.c +++ b/src/nouveau/compiler/nak_nir_lower_cf.c @@ -295,18 +295,19 @@ lower_cf_list(nir_builder *b, nir_def *esc_reg, struct scope *parent_scope, nir_if *nif = nir_cf_node_as_if(node); nir_def *cond = nif->condition.ssa; + bool divergent = nir_src_is_divergent(&nif->condition); nir_instr_clear_src(NULL, &nif->condition); nir_block *then_block = nir_block_create(b->shader); nir_block *else_block = nir_block_create(b->shader); nir_block *merge_block = nir_block_create(b->shader); - const bool needs_sync = cond->divergent && + const bool needs_sync = divergent && block_is_merge(nir_cf_node_as_block(nir_cf_node_next(node))) && !parent_scope_will_sync(&nif->cf_node, parent_scope); struct scope scope = push_scope(b, SCOPE_TYPE_IF_MERGE, - parent_scope, cond->divergent, + parent_scope, divergent, needs_sync, merge_block); nir_goto_if(b, then_block, cond, else_block); diff --git a/src/nouveau/compiler/nak_nir_lower_non_uniform_ldcx.c b/src/nouveau/compiler/nak_nir_lower_non_uniform_ldcx.c index 88073d829e8..231a891dba8 100644 --- a/src/nouveau/compiler/nak_nir_lower_non_uniform_ldcx.c +++ b/src/nouveau/compiler/nak_nir_lower_non_uniform_ldcx.c @@ -432,7 +432,7 @@ lower_cf_list(nir_builder *b, struct exec_list *cf_list) case nir_cf_node_if: { nir_if *nif = nir_cf_node_as_if(node); - if (nif->condition.ssa->divergent) { + if (nir_src_is_divergent(&nif->condition)) { nir_block *succ = nir_cf_node_as_block(nir_cf_node_next(node)); progress |= lower_non_uniform_cf_node(b, node, block, succ); } else {