diff --git a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c index adf7917cafc..6ac2e19aa45 100644 --- a/src/intel/compiler/brw_nir_analyze_ubo_ranges.c +++ b/src/intel/compiler/brw_nir_analyze_ubo_ranges.c @@ -96,7 +96,6 @@ struct ubo_block_info struct ubo_analysis_state { struct hash_table *blocks; - bool uses_regular_uniforms; const struct intel_device_info *devinfo; }; @@ -127,18 +126,9 @@ analyze_ubos_block(struct ubo_analysis_state *state, nir_block *block) continue; nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); - switch (intrin->intrinsic) { - case nir_intrinsic_load_uniform: - state->uses_regular_uniforms = true; + if (intrin->intrinsic != nir_intrinsic_load_ubo) continue; - case nir_intrinsic_load_ubo: - break; /* Fall through to the analysis below */ - - default: - continue; /* Not a uniform or UBO intrinsic */ - } - if (brw_nir_ubo_surface_index_is_pushable(intrin->src[0]) && nir_src_is_const(intrin->src[1])) { const int block = brw_nir_ubo_surface_index_get_push_block(intrin->src[0]); @@ -193,18 +183,11 @@ brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler, void *mem_ctx = ralloc_context(NULL); struct ubo_analysis_state state = { - .uses_regular_uniforms = false, .blocks = _mesa_hash_table_create(mem_ctx, NULL, _mesa_key_pointer_equal), .devinfo = compiler->devinfo, }; - /* Compute shaders use push constants to get the subgroup ID so it's - * best to just assume some system values are pushed. - */ - if (nir->info.stage == MESA_SHADER_COMPUTE) - state.uses_regular_uniforms = true; - /* Walk the IR, recording how many times each UBO block/offset is used. */ nir_foreach_function_impl(impl, nir) { nir_foreach_block(block, impl) { @@ -278,7 +261,7 @@ brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler, /* TODO: Consider combining ranges. * - * We can only push 3-4 ranges via 3DSTATE_CONSTANT_XS. If there are + * We can only push 4 ranges via 3DSTATE_CONSTANT_XS. If there are * more ranges, and two are close by with only a small hole, it may be * worth combining them. The holes will waste register space, but the * benefit of removing pulls may outweigh that cost. @@ -292,9 +275,7 @@ brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler, struct ubo_range_entry *entries = ranges.data; - /* Return the top 4 or so. We drop by one if regular uniforms are in - * use, assuming one push buffer will be dedicated to those. We may - * also only get 3 on Haswell if we can't write INSTPM. + /* Return the top 4. * * The backend may need to shrink these ranges to ensure that they * don't exceed the maximum push constant limits. It can simply drop @@ -302,7 +283,7 @@ brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler, * unfortunately can't truncate it here, because we don't know what * the backend is planning to do with regular uniforms. */ - const int max_ubos = 4 - state.uses_regular_uniforms; + const int max_ubos = 4; nr_entries = MIN2(nr_entries, max_ubos); for (int i = 0; i < nr_entries; i++) {