nir: rename nir_src_is_dynamically_uniform to nir_src_is_always_uniform

As this function doesn't check for any control-flow
dependence, it only returns true for statically
(or globally) uniform values.
The same holds true for is_binding_dynamically_uniform()
in nir_opt_gcm().
Rename to better reflect that property.

Reviewed-by: Emma Anholt <emma@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14994>
This commit is contained in:
Daniel Schürmann 2022-02-11 11:18:51 +01:00 committed by Marge Bot
parent a9eddf9edc
commit 832d67e99d
5 changed files with 30 additions and 27 deletions

View file

@ -1589,17 +1589,19 @@ nir_src_as_const_value(nir_src src)
}
/**
* Returns true if the source is known to be dynamically uniform. Otherwise it
* returns false which means it may or may not be dynamically uniform but it
* can't be determined.
* Returns true if the source is known to be always uniform. Otherwise it
* returns false which means it may or may not be uniform but it can't be
* determined.
*
* For a more precise analysis of uniform values, use nir_divergence_analysis.
*/
bool
nir_src_is_dynamically_uniform(nir_src src)
nir_src_is_always_uniform(nir_src src)
{
if (!src.is_ssa)
return false;
/* Constants are trivially dynamically uniform */
/* Constants are trivially uniform */
if (src.ssa->parent_instr->type == nir_instr_type_load_const)
return true;
@ -1607,9 +1609,12 @@ nir_src_is_dynamically_uniform(nir_src src)
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(src.ssa->parent_instr);
/* As are uniform variables */
if (intr->intrinsic == nir_intrinsic_load_uniform &&
nir_src_is_dynamically_uniform(intr->src[0]))
nir_src_is_always_uniform(intr->src[0]))
return true;
/* Push constant loads always use uniform offsets. */
/* From the Vulkan specification 15.6.1. Push Constant Interface:
* "Any member of a push constant block that is declared as an array must
* only be accessed with dynamically uniform indices."
*/
if (intr->intrinsic == nir_intrinsic_load_push_constant)
return true;
if (intr->intrinsic == nir_intrinsic_load_deref &&
@ -1617,13 +1622,11 @@ nir_src_is_dynamically_uniform(nir_src src)
return true;
}
/* Operating together dynamically uniform expressions produces a
* dynamically uniform result
*/
/* Operating together uniform expressions produces a uniform result */
if (src.ssa->parent_instr->type == nir_instr_type_alu) {
nir_alu_instr *alu = nir_instr_as_alu(src.ssa->parent_instr);
for (int i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
if (!nir_src_is_dynamically_uniform(alu->src[i].src))
if (!nir_src_is_always_uniform(alu->src[i].src))
return false;
}
@ -1631,7 +1634,7 @@ nir_src_is_dynamically_uniform(nir_src src)
}
/* XXX: this could have many more tests, such as when a sampler function is
* called with dynamically uniform arguments.
* called with uniform arguments.
*/
return false;
}

View file

@ -4053,7 +4053,7 @@ NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
nir_instr_type_intrinsic, nir_instr_as_intrinsic)
NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
bool nir_src_is_dynamically_uniform(nir_src src);
bool nir_src_is_always_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);

View file

@ -223,14 +223,14 @@ is_src_scalarizable(nir_src *src)
}
static bool
is_binding_dynamically_uniform(nir_src src)
is_binding_uniform(nir_src src)
{
nir_binding binding = nir_chase_binding(src);
if (!binding.success)
return false;
for (unsigned i = 0; i < binding.num_indices; i++) {
if (!nir_src_is_dynamically_uniform(binding.indices[i]))
if (!nir_src_is_always_uniform(binding.indices[i]))
return false;
}
@ -265,10 +265,10 @@ pin_intrinsic(nir_intrinsic_instr *intrin)
intrin->intrinsic == nir_intrinsic_deref_buffer_array_length) &&
nir_deref_mode_may_be(nir_src_as_deref(intrin->src[0]),
nir_var_mem_ubo | nir_var_mem_ssbo)))) {
if (!is_binding_dynamically_uniform(intrin->src[0]))
if (!is_binding_uniform(intrin->src[0]))
instr->pass_flags = GCM_INSTR_PINNED;
} else if (intrin->intrinsic == nir_intrinsic_load_push_constant) {
if (!nir_src_is_dynamically_uniform(intrin->src[0]))
if (!nir_src_is_always_uniform(intrin->src[0]))
instr->pass_flags = GCM_INSTR_PINNED;
} else if (intrin->intrinsic == nir_intrinsic_load_deref &&
nir_deref_mode_is(nir_src_as_deref(intrin->src[0]),
@ -277,7 +277,7 @@ pin_intrinsic(nir_intrinsic_instr *intrin)
while (deref->deref_type != nir_deref_type_var) {
if ((deref->deref_type == nir_deref_type_array ||
deref->deref_type == nir_deref_type_ptr_as_array) &&
!nir_src_is_dynamically_uniform(deref->arr.index)) {
!nir_src_is_always_uniform(deref->arr.index)) {
instr->pass_flags = GCM_INSTR_PINNED;
return;
}
@ -342,21 +342,21 @@ gcm_pin_instructions(nir_function_impl *impl, struct gcm_state *state)
nir_tex_src *src = &tex->src[i];
switch (src->src_type) {
case nir_tex_src_texture_deref:
if (!tex->texture_non_uniform && !is_binding_dynamically_uniform(src->src))
if (!tex->texture_non_uniform && !is_binding_uniform(src->src))
instr->pass_flags = GCM_INSTR_PINNED;
break;
case nir_tex_src_sampler_deref:
if (!tex->sampler_non_uniform && !is_binding_dynamically_uniform(src->src))
if (!tex->sampler_non_uniform && !is_binding_uniform(src->src))
instr->pass_flags = GCM_INSTR_PINNED;
break;
case nir_tex_src_texture_offset:
case nir_tex_src_texture_handle:
if (!tex->texture_non_uniform && !nir_src_is_dynamically_uniform(src->src))
if (!tex->texture_non_uniform && !nir_src_is_always_uniform(src->src))
instr->pass_flags = GCM_INSTR_PINNED;
break;
case nir_tex_src_sampler_offset:
case nir_tex_src_sampler_handle:
if (!tex->sampler_non_uniform && !nir_src_is_dynamically_uniform(src->src))
if (!tex->sampler_non_uniform && !nir_src_is_always_uniform(src->src))
instr->pass_flags = GCM_INSTR_PINNED;
break;
default:

View file

@ -1371,7 +1371,7 @@ static void visit_load_ubo(struct lp_build_nir_context *bld_base,
LLVMValueRef idx = get_src(bld_base, instr->src[0]);
LLVMValueRef offset = get_src(bld_base, instr->src[1]);
bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[1]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[1]);
idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
offset_is_uniform, idx, offset, result);
@ -1384,7 +1384,7 @@ static void visit_load_push_constant(struct lp_build_nir_context *bld_base,
struct gallivm_state *gallivm = bld_base->base.gallivm;
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
offset_is_uniform, idx, offset, result);
@ -1685,7 +1685,7 @@ static void visit_load_kernel_input(struct lp_build_nir_context *bld_base,
{
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
bool offset_is_uniform = nir_src_is_dynamically_uniform(instr->src[0]);
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest), nir_dest_bit_size(instr->dest),
nir_src_bit_size(instr->src[0]),
offset_is_uniform, offset, result);
@ -2026,7 +2026,7 @@ static enum lp_sampler_lod_property lp_build_nir_lod_property(struct lp_build_ni
{
enum lp_sampler_lod_property lod_property;
if (nir_src_is_dynamically_uniform(lod_src))
if (nir_src_is_always_uniform(lod_src))
lod_property = LP_SAMPLER_LOD_SCALAR;
else if (bld_base->shader->info.stage == MESA_SHADER_FRAGMENT) {
if (gallivm_perf & GALLIVM_PERF_NO_QUAD_LOD)

View file

@ -3688,7 +3688,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
BRW_REGISTER_TYPE_UD);
if (nir_src_is_dynamically_uniform(instr->src[0])) {
if (nir_src_is_always_uniform(instr->src[0])) {
const fs_reg sample_id = bld.emit_uniformize(sample_src);
const fs_reg msg_data = vgrf(glsl_type::uint_type);
bld.exec_all().group(1, 0)