intel: rework push constant handling
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

nr_params & params array are gone.

brw_ubo_range is not stored on the prog_data structure anymore (Anv
already stored a copy of that with its own additional information)

The backend now only deals with load_push_data_intel. load_uniform &
load_push_constant have to be lowered by the driver.

Pre Gfx12.5 platforms have to provide a subgroup_id_param to specify
where the subgroup_id value is located in the push constants.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38975>
This commit is contained in:
Lionel Landwerlin 2025-12-01 13:00:46 +02:00 committed by Marge Bot
parent 60e359412d
commit faa857a061
24 changed files with 397 additions and 341 deletions

View file

@ -47,6 +47,7 @@ struct iris_bo;
struct iris_context;
struct blorp_batch;
struct blorp_params;
struct brw_ubo_range;
#define IRIS_MAX_DRAW_BUFFERS 8
#define IRIS_MAX_SOL_BINDINGS 64
@ -699,10 +700,15 @@ struct iris_compiled_shader {
mesa_shader_stage stage;
/**
* Data derived from prog_data.
* Data derived from ELK prog_data.
*/
struct iris_ubo_range ubo_ranges[4];
/**
* Data derived from BRW prog_data.
*/
uint16_t push_sizes[4];
unsigned nr_params;
unsigned total_scratch;
unsigned total_shared;
@ -1353,7 +1359,8 @@ uint32_t iris_bti_to_group_index(const struct iris_binding_table *bt,
enum iris_surface_group group,
uint32_t bti);
void iris_apply_brw_prog_data(struct iris_compiled_shader *shader,
struct brw_stage_prog_data *prog_data);
struct brw_stage_prog_data *prog_data,
struct brw_ubo_range *ubo_ranges);
void iris_apply_elk_prog_data(struct iris_compiled_shader *shader,
struct elk_stage_prog_data *prog_data);
struct intel_cs_dispatch_info

View file

@ -151,6 +151,7 @@ iris_disk_cache_store(struct disk_cache *cache,
if (brw) {
blob_write_bytes(&blob, brw->relocs,
brw->num_relocs * sizeof(struct intel_shader_reloc));
blob_write_bytes(&blob, shader->ubo_ranges, sizeof(shader->ubo_ranges));
} else {
#ifdef INTEL_USE_ELK
blob_write_bytes(&blob, elk->relocs,
@ -262,6 +263,7 @@ iris_disk_cache_retrieve(struct iris_screen *screen,
brw->num_relocs * sizeof(struct intel_shader_reloc));
brw->relocs = relocs;
}
blob_copy_bytes(&blob, shader->ubo_ranges, sizeof(shader->ubo_ranges));
} else {
#ifdef INTEL_USE_ELK
elk->relocs = NULL;
@ -311,7 +313,7 @@ iris_disk_cache_retrieve(struct iris_screen *screen,
num_cbufs++;
if (brw)
iris_apply_brw_prog_data(shader, brw);
iris_apply_brw_prog_data(shader, brw, NULL);
else
#ifdef INTEL_USE_ELK
iris_apply_elk_prog_data(shader, elk);

View file

@ -61,10 +61,10 @@
# error "Unsupported generation"
#endif
#define load_param(b, bit_size, struct_name, field_name) \
nir_load_uniform(b, 1, bit_size, nir_imm_int(b, 0), \
.base = offsetof(struct_name, field_name), \
.range = bit_size / 8)
#define load_param(b, bit_size, struct_name, field_name) \
nir_load_push_data_intel(b, 1, bit_size, nir_imm_int(b, 0), \
.base = offsetof(struct_name, field_name), \
.range = bit_size / 8)
static nir_def *
load_fragment_index(nir_builder *b)
@ -291,8 +291,7 @@ emit_indirect_generate_draw(struct iris_batch *batch,
ps.BindingTableEntryCount = GFX_VER == 9 ? 1 : 0;
#if GFX_VER < 20
ps.PushConstantEnable = shader->nr_params > 0 ||
shader->ubo_ranges[0].length;
ps.PushConstantEnable = shader->push_sizes[0] > 0;
#endif
#if GFX_VER >= 9

View file

@ -166,7 +166,7 @@ iris_apply_brw_cs_prog_data(struct iris_compiled_shader *shader,
iris->prog_mask = brw->prog_mask;
/* The pushed constants only contain the subgroup_id */
iris->first_param_is_builtin_subgroup_id = brw->base.nr_params > 0;
iris->first_param_is_builtin_subgroup_id = brw->base.push_sizes[0] > 0;
}
static void
@ -248,16 +248,20 @@ iris_apply_brw_gs_prog_data(struct iris_compiled_shader *shader,
void
iris_apply_brw_prog_data(struct iris_compiled_shader *shader,
struct brw_stage_prog_data *brw)
struct brw_stage_prog_data *brw,
struct brw_ubo_range *ubo_ranges)
{
STATIC_ASSERT(ARRAY_SIZE(brw->ubo_ranges) == ARRAY_SIZE(shader->ubo_ranges));
for (int i = 0; i < ARRAY_SIZE(shader->ubo_ranges); i++) {
shader->ubo_ranges[i].block = brw->ubo_ranges[i].block;
shader->ubo_ranges[i].start = brw->ubo_ranges[i].start;
shader->ubo_ranges[i].length = brw->ubo_ranges[i].length;
if (ubo_ranges != NULL) {
for (int i = 0; i < ARRAY_SIZE(shader->ubo_ranges); i++) {
shader->ubo_ranges[i].block = ubo_ranges[i].block;
shader->ubo_ranges[i].start = ubo_ranges[i].start;
shader->ubo_ranges[i].length = ubo_ranges[i].length;
}
}
shader->nr_params = brw->nr_params;
for (int i = 0; i < ARRAY_SIZE(shader->push_sizes); i++)
shader->push_sizes[i] = brw->push_sizes[i];
shader->total_scratch = brw->total_scratch;
shader->total_shared = brw->total_shared;
shader->program_size = brw->program_size;
@ -1872,6 +1876,29 @@ iris_debug_archiver_open(void *tmp_ctx, struct iris_screen *screen,
return debug_archiver;
}
static void
brw_apply_ubo_ranges(struct brw_compiler *compiler,
nir_shader *nir,
struct brw_ubo_range ubo_ranges[4],
struct brw_stage_prog_data *prog_data)
{
brw_nir_analyze_ubo_ranges(compiler, nir, ubo_ranges);
NIR_PASS(_, nir, brw_nir_lower_ubo_ranges, ubo_ranges);
if (ubo_ranges[0].length == 0 &&
nir->info.stage == MESA_SHADER_FRAGMENT &&
compiler->devinfo->needs_null_push_constant_tbimr_workaround) {
ubo_ranges[0] = (struct brw_ubo_range) {
.block = IRIS_SURFACE_NULL_PUSH_TBIMR_WA,
.start = 0,
.length = 1,
};
}
for (uint32_t i = 0; i < 4; i++)
prog_data->push_sizes[i] = ubo_ranges[i].length * 32;
}
/**
* Compile a vertex shader, and upload the assembly.
*/
@ -1921,7 +1948,8 @@ iris_compile_vs(struct iris_screen *screen,
brw_prog_data->base.base.use_alt_mode = nir->info.use_legacy_math_rules;
brw_nir_analyze_ubo_ranges(screen->brw, nir, brw_prog_data->base.base.ubo_ranges);
struct brw_ubo_range ubo_ranges[4] = {};
brw_apply_ubo_ranges(screen->brw, nir, ubo_ranges, &brw_prog_data->base.base);
struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(screen, key);
@ -1940,7 +1968,7 @@ iris_compile_vs(struct iris_screen *screen,
program = brw_compile_vs(screen->brw, &params);
error = params.base.error_str;
if (program) {
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base, ubo_ranges);
iris_debug_recompile_brw(screen, dbg, ish, &brw_key.base);
}
} else {
@ -2163,7 +2191,9 @@ iris_compile_tcs(struct iris_screen *screen,
if (screen->brw) {
struct brw_tcs_prog_data *brw_prog_data =
rzalloc(mem_ctx, struct brw_tcs_prog_data);
brw_nir_analyze_ubo_ranges(screen->brw, nir, brw_prog_data->base.base.ubo_ranges);
struct brw_ubo_range ubo_ranges[4] = {};
brw_apply_ubo_ranges(screen->brw, nir, ubo_ranges, &brw_prog_data->base.base);
struct brw_compile_tcs_params params = {
.base = {
@ -2181,7 +2211,7 @@ iris_compile_tcs(struct iris_screen *screen,
error = params.base.error_str;
if (program) {
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base, ubo_ranges);
iris_debug_recompile_brw(screen, dbg, ish, &brw_key.base);
}
} else {
@ -2366,7 +2396,8 @@ iris_compile_tes(struct iris_screen *screen,
struct brw_tes_prog_data *brw_prog_data =
rzalloc(mem_ctx, struct brw_tes_prog_data);
brw_nir_analyze_ubo_ranges(screen->brw, nir, brw_prog_data->base.base.ubo_ranges);
struct brw_ubo_range ubo_ranges[4] = {};
brw_apply_ubo_ranges(screen->brw, nir, ubo_ranges, &brw_prog_data->base.base);
struct intel_vue_map input_vue_map;
brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
@ -2392,7 +2423,7 @@ iris_compile_tes(struct iris_screen *screen,
if (program) {
iris_debug_recompile_brw(screen, dbg, ish, &brw_key.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base, ubo_ranges);
}
} else {
#ifdef INTEL_USE_ELK
@ -2560,7 +2591,8 @@ iris_compile_gs(struct iris_screen *screen,
struct brw_gs_prog_data *brw_prog_data =
rzalloc(mem_ctx, struct brw_gs_prog_data);
brw_nir_analyze_ubo_ranges(screen->brw, nir, brw_prog_data->base.base.ubo_ranges);
struct brw_ubo_range ubo_ranges[4] = {};
brw_apply_ubo_ranges(screen->brw, nir, ubo_ranges, &brw_prog_data->base.base);
brw_compute_vue_map(devinfo,
&brw_prog_data->base.vue_map, nir->info.outputs_written,
@ -2584,7 +2616,7 @@ iris_compile_gs(struct iris_screen *screen,
error = params.base.error_str;
if (program) {
iris_debug_recompile_brw(screen, dbg, ish, &brw_key.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base.base, ubo_ranges);
}
} else {
#ifdef INTEL_USE_ELK
@ -2753,16 +2785,8 @@ iris_compile_fs(struct iris_screen *screen,
brw_prog_data->base.use_alt_mode = nir->info.use_legacy_math_rules;
brw_nir_analyze_ubo_ranges(screen->brw, nir, brw_prog_data->base.ubo_ranges);
if (brw_prog_data->base.ubo_ranges[0].length == 0 &&
screen->devinfo->needs_null_push_constant_tbimr_workaround) {
brw_prog_data->base.ubo_ranges[0] = (struct brw_ubo_range) {
.block = IRIS_SURFACE_NULL_PUSH_TBIMR_WA,
.start = 0,
.length = 1,
};
}
struct brw_ubo_range ubo_ranges[4] = {};
brw_apply_ubo_ranges(screen->brw, nir, ubo_ranges, &brw_prog_data->base);
struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(screen, key);
@ -2786,7 +2810,7 @@ iris_compile_fs(struct iris_screen *screen,
error = params.base.error_str;
if (program) {
iris_debug_recompile_brw(screen, dbg, ish, &brw_key.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base, ubo_ranges);
}
} else {
#ifdef INTEL_USE_ELK
@ -3112,7 +3136,7 @@ iris_compile_cs(struct iris_screen *screen,
bool subgroup_id_lowered = false;
NIR_PASS(subgroup_id_lowered, nir, brw_nir_lower_cs_subgroup_id, devinfo, 0);
if (subgroup_id_lowered) {
brw_prog_data->base.nr_params = 1;
brw_prog_data->base.push_sizes[0] = 4;
brw_cs_fill_push_const_info(devinfo, brw_prog_data, 0);
} else {
brw_cs_fill_push_const_info(devinfo, brw_prog_data, -1);
@ -3134,7 +3158,7 @@ iris_compile_cs(struct iris_screen *screen,
error = params.base.error_str;
if (program) {
iris_debug_recompile_brw(screen, dbg, ish, &brw_key.base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base);
iris_apply_brw_prog_data(shader, &brw_prog_data->base, NULL);
}
} else {
#ifdef INTEL_USE_ELK

View file

@ -278,7 +278,7 @@ iris_blorp_upload_shader(struct blorp_batch *blorp_batch, uint32_t stage,
memcpy(prog_data, prog_data_templ, prog_data_size);
if (screen->brw) {
iris_apply_brw_prog_data(shader, prog_data);
iris_apply_brw_prog_data(shader, prog_data, NULL);
} else {
#ifdef INTEL_USE_ELK
assert(screen->elk);
@ -445,9 +445,7 @@ iris_ensure_indirect_generation_shader(struct iris_batch *batch)
struct brw_wm_prog_data *prog_data = ralloc_size(NULL, sizeof(*prog_data));
memset(prog_data, 0, sizeof(*prog_data));
prog_data->base.nr_params = nir->num_uniforms / 4;
brw_nir_analyze_ubo_ranges(screen->brw, nir, prog_data->base.ubo_ranges);
prog_data->base.push_sizes[0] = uniform_size;
struct genisa_stats stats[3];
struct brw_compile_fs_params params = {
@ -463,7 +461,7 @@ iris_ensure_indirect_generation_shader(struct iris_batch *batch)
};
program = brw_compile_fs(screen->brw, &params);
assert(program);
iris_apply_brw_prog_data(shader, &prog_data->base);
iris_apply_brw_prog_data(shader, &prog_data->base, NULL);
} else {
#ifdef INTEL_USE_ELK
union elk_any_prog_key prog_key;

View file

@ -27,7 +27,6 @@ blorp_compile_fs_brw(struct blorp_context *blorp, void *mem_ctx,
const struct brw_compiler *compiler = blorp->compiler->brw;
struct brw_wm_prog_data *wm_prog_data = rzalloc(mem_ctx, struct brw_wm_prog_data);
wm_prog_data->base.nr_params = 0;
struct brw_nir_compiler_opts opts = {
.softfp64 = blorp->get_fp64_nir ? blorp->get_fp64_nir(blorp) : NULL,
@ -124,6 +123,24 @@ lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin,
return true;
}
static bool
lower_load_uniform(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *data)
{
if (intrin->intrinsic != nir_intrinsic_load_uniform)
return false;
b->cursor = nir_instr_remove(&intrin->instr);
nir_def_rewrite_uses(&intrin->def,
nir_load_push_data_intel(b,
intrin->def.num_components,
intrin->def.bit_size,
intrin->src[0].ssa,
.base = nir_intrinsic_base(intrin),
.range = nir_intrinsic_range(intrin)));
return true;
}
static struct blorp_program
blorp_compile_cs_brw(struct blorp_context *blorp, void *mem_ctx,
struct nir_shader *nir)
@ -139,21 +156,24 @@ blorp_compile_cs_brw(struct blorp_context *blorp, void *mem_ctx,
NIR_PASS(_, nir, nir_lower_io, nir_var_uniform, type_size_scalar_bytes,
(nir_lower_io_options)0);
NIR_PASS(_, nir, nir_shader_intrinsics_pass, lower_load_uniform,
nir_metadata_control_flow, NULL);
STATIC_ASSERT(offsetof(struct blorp_wm_inputs, subgroup_id) + 4 ==
sizeof(struct blorp_wm_inputs));
nir->num_uniforms = offsetof(struct blorp_wm_inputs, subgroup_id);
unsigned nr_params = nir->num_uniforms / 4;
struct brw_cs_prog_data *cs_prog_data = rzalloc(mem_ctx, struct brw_cs_prog_data);
cs_prog_data->base.nr_params = nr_params;
brw_cs_fill_push_const_info(compiler->devinfo, cs_prog_data, nr_params);
cs_prog_data->base.push_sizes[0] = sizeof(struct blorp_wm_inputs);
brw_cs_fill_push_const_info(compiler->devinfo, cs_prog_data,
offsetof(struct blorp_wm_inputs, subgroup_id) / 4);
NIR_PASS(_, nir, brw_nir_lower_cs_intrinsics, compiler->devinfo,
cs_prog_data);
NIR_PASS(_, nir, brw_nir_lower_cs_subgroup_id, compiler->devinfo,
offsetof(struct blorp_wm_inputs, subgroup_id));
offsetof(struct blorp_wm_inputs, subgroup_id));
NIR_PASS(_, nir, nir_shader_intrinsics_pass, lower_base_workgroup_id,
nir_metadata_control_flow, NULL);
nir_metadata_control_flow, NULL);
struct brw_cs_prog_key cs_key;
memset(&cs_key, 0, sizeof(cs_key));

View file

@ -36,11 +36,11 @@ brw_cs_fill_push_const_info(const struct intel_device_info *devinfo,
if (devinfo->verx10 < 125 && subgroup_id_index >= 0) {
/* Fill all but the last register with cross-thread payload */
cross_thread_dwords = 8 * (subgroup_id_index / 8);
per_thread_dwords = prog_data->nr_params - cross_thread_dwords;
per_thread_dwords = prog_data->push_sizes[0] / 4 - cross_thread_dwords;
assert(per_thread_dwords > 0 && per_thread_dwords <= 8);
} else {
/* Fill all data using cross-thread payload */
cross_thread_dwords = prog_data->nr_params;
cross_thread_dwords = prog_data->push_sizes[0] / 4;
per_thread_dwords = 0u;
}
@ -51,7 +51,7 @@ brw_cs_fill_push_const_info(const struct intel_device_info *devinfo,
cs_prog_data->push.per_thread.size == 0);
assert(cs_prog_data->push.cross_thread.dwords +
cs_prog_data->push.per_thread.dwords ==
prog_data->nr_params);
prog_data->push_sizes[0] / 4);
}
static bool

View file

@ -600,7 +600,6 @@ brw_emit_repclear_shader(brw_shader &s)
brw_send_inst *write = NULL;
assert(s.devinfo->ver < 20);
assert(s.uniforms == 0);
assume(key->nr_color_regions > 0);
brw_reg color_output = retype(brw_vec4_grf(127, 0), BRW_TYPE_UD);
@ -1123,7 +1122,7 @@ gfx9_ps_header_only_workaround(struct brw_wm_prog_data *wm_prog_data)
if (wm_prog_data->num_varying_inputs)
return;
if (wm_prog_data->base.curb_read_length)
if (wm_prog_data->base.push_sizes[0] > 0)
return;
wm_prog_data->urb_setup[VARYING_SLOT_LAYER] = 0;
@ -1296,7 +1295,13 @@ brw_assign_urb_setup(brw_shader &s)
struct brw_wm_prog_data *prog_data = brw_wm_prog_data(s.prog_data);
int urb_start = s.payload().num_regs + prog_data->base.curb_read_length;
uint32_t push_size = 0;
for (uint32_t i = 0; i < 4; i++)
push_size += prog_data->base.push_sizes[i];
const int urb_start =
s.payload().num_regs +
DIV_ROUND_UP(align(push_size, REG_SIZE * reg_unit(s.devinfo)), REG_SIZE);
bool read_attribute_payload = false;
/* Offset all the urb_setup[] index by the actual position of the

View file

@ -562,15 +562,16 @@ enum brw_param_builtin {
(((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) & 0x3)
struct brw_stage_prog_data {
/* Ranges of memory of nir_intrinsic_load_ubo instructions that were promoted
* to push constants to improve performance.
*/
struct brw_ubo_range ubo_ranges[4];
unsigned nr_params; /**< number of float params/constants */
mesa_shader_stage stage;
/**
* Amount of push data delivered to the shader (in bytes)
*
* The HW can push up to 4 ranges from 4 different virtual addresses.
* Values should be aligned to 32B.
*/
uint16_t push_sizes[4];
/* If robust_ubo_ranges not 0, push_reg_mask_param specifies the param
* index (in 32-bit units) where the 4 UBO range limits will be pushed
* as 8-bit integers. The shader will zero byte i of UBO range j if:
@ -583,7 +584,6 @@ struct brw_stage_prog_data {
uint8_t robust_ubo_ranges;
unsigned push_reg_mask_param;
unsigned curb_read_length;
unsigned total_scratch;
unsigned total_shared;
@ -752,13 +752,13 @@ struct brw_wm_prog_data {
/**
* Push constant location of intel_msaa_flags (dynamic configuration of the
* pixel shader).
* pixel shader) in bytes.
*/
unsigned msaa_flags_param;
/**
* Push constant location of the remapping offset in the instruction heap
* for Wa_18019110168.
* for Wa_18019110168 in bytes.
*/
unsigned per_primitive_remap_param;
@ -1169,7 +1169,7 @@ struct brw_tcs_prog_data
/**
* Push constant location of intel_tess_config (dynamic configuration of
* the tessellation shaders).
* the tessellation shaders) in bytes.
*/
unsigned tess_config_param;
};
@ -1184,7 +1184,7 @@ struct brw_tes_prog_data
/**
* Push constant location of intel_tess_config (dynamic configuration of
* the tessellation shaders).
* the tessellation shaders) in bytes.
*/
unsigned tess_config_param;
};

View file

@ -785,7 +785,7 @@ enum ENUM_PACKED brw_reg_file {
ADDRESS,
VGRF,
ATTR,
UNIFORM, /* prog_data->params[reg] */
UNIFORM, /* pushed constant delivered register */
};
/* Align1 support for 3-src instructions. Bit 35 of the instruction

View file

@ -1992,8 +1992,7 @@ get_nir_def(nir_to_brw_state &ntb, const nir_def &def, bool all_sources_uniform)
is_scalar = get_nir_src(ntb, instr->src[1], 0).is_scalar;
break;
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_push_constant:
case nir_intrinsic_load_push_data_intel:
is_scalar = get_nir_src(ntb, instr->src[0], 0).is_scalar;
break;
@ -2976,7 +2975,10 @@ brw_from_nir_emit_tcs_intrinsic(nir_to_brw_state &ntb,
case nir_intrinsic_load_tess_config_intel:
bld.MOV(retype(dst, BRW_TYPE_UD),
brw_uniform_reg(tcs_prog_data->tess_config_param, BRW_TYPE_UD));
byte_offset(
brw_uniform_reg(
tcs_prog_data->tess_config_param / REG_SIZE, BRW_TYPE_UD),
tcs_prog_data->tess_config_param % REG_SIZE));
break;
default:
@ -3026,7 +3028,10 @@ brw_from_nir_emit_tes_intrinsic(nir_to_brw_state &ntb,
case nir_intrinsic_load_tess_config_intel:
bld.MOV(retype(dest, BRW_TYPE_UD),
brw_uniform_reg(tes_prog_data->tess_config_param, BRW_TYPE_UD));
byte_offset(
brw_uniform_reg(
tes_prog_data->tess_config_param / REG_SIZE, BRW_TYPE_UD),
tes_prog_data->tess_config_param % REG_SIZE));
break;
default:
@ -5393,15 +5398,14 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
break;
}
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_push_constant: {
case nir_intrinsic_load_push_data_intel: {
/* Offsets are in bytes but they should always aligned to
* the type size
*/
unsigned base_offset = nir_intrinsic_base(instr);
assert(base_offset % 4 == 0 || base_offset % brw_type_size_bytes(dest.type) == 0);
brw_reg src = brw_uniform_reg(base_offset / 4, dest.type);
brw_reg src = brw_uniform_reg(base_offset / REG_SIZE, dest.type);
if (nir_src_is_const(instr->src[0])) {
unsigned load_offset = nir_src_as_uint(instr->src[0]);
@ -5410,7 +5414,7 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
* data take the modulo of the offset with 4 bytes and add it to
* the offset to read from within the source register.
*/
src.offset = load_offset + base_offset % 4;
src.offset = load_offset + base_offset % REG_SIZE;
for (unsigned j = 0; j < instr->num_components; j++) {
xbld.MOV(offset(dest, xbld, j), offset(src, xbld, j));
@ -5458,8 +5462,14 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
break;
}
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ubo_uniform_block_intel: {
case nir_intrinsic_load_ubo_uniform_block_intel:
s.prog_data->has_ubo_pull = true;
brw_from_nir_emit_memory_access(ntb, bld, xbld, instr);
break;
case nir_intrinsic_load_ubo: {
s.prog_data->has_ubo_pull = true;
brw_reg surface, surface_handle;
bool no_mask_handle = false;
@ -5475,37 +5485,29 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
const unsigned num_components = last_component - first_component + 1;
if (!nir_src_is_const(instr->src[1])) {
s.prog_data->has_ubo_pull = true;
/* load_ubo with non-constant offset. The offset might still be
* uniform on non-LSC platforms when loading fewer than 4 components.
*/
brw_reg base_offset = retype(get_nir_src(ntb, instr->src[1], 0),
BRW_TYPE_UD);
if (nir_intrinsic_has_base(instr)) {
struct brw_reg imm = brw_imm_int(base_offset.type,
nir_intrinsic_base(instr));
base_offset = bld.ADD(base_offset, imm);
}
if (instr->intrinsic == nir_intrinsic_load_ubo) {
/* load_ubo with non-constant offset. The offset might still be
* uniform on non-LSC platforms when loading fewer than 4
* components.
*/
brw_reg base_offset = retype(get_nir_src(ntb, instr->src[1], 0),
BRW_TYPE_UD);
if (nir_intrinsic_has_base(instr)) {
struct brw_reg imm = brw_imm_int(base_offset.type,
nir_intrinsic_base(instr));
base_offset = bld.ADD(base_offset, imm);
}
const unsigned comps_per_load = brw_type_size_bytes(dest.type) == 8 ? 2 : 4;
const unsigned comps_per_load = brw_type_size_bytes(dest.type) == 8 ? 2 : 4;
for (unsigned i = first_component;
i <= last_component;
i += comps_per_load) {
const unsigned remaining = last_component + 1 - i;
xbld.VARYING_PULL_CONSTANT_LOAD(offset(dest, xbld, i),
surface, surface_handle,
base_offset,
i * brw_type_size_bytes(dest.type),
instr->def.bit_size / 8,
MIN2(remaining, comps_per_load));
}
} else {
/* load_ubo_uniform_block_intel with non-constant offset */
brw_from_nir_emit_memory_access(ntb, bld, xbld, instr);
for (unsigned i = first_component;
i <= last_component;
i += comps_per_load) {
const unsigned remaining = last_component + 1 - i;
xbld.VARYING_PULL_CONSTANT_LOAD(offset(dest, xbld, i),
surface, surface_handle,
base_offset,
i * brw_type_size_bytes(dest.type),
instr->def.bit_size / 8,
MIN2(remaining, comps_per_load));
}
} else {
/* Even if we are loading doubles, a pull constant load will load
@ -5520,42 +5522,6 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
const unsigned load_offset =
nir_src_as_uint(instr->src[1]) + first_component * type_size +
(nir_intrinsic_has_base(instr) ? nir_intrinsic_base(instr) : 0);
const unsigned end_offset = load_offset + num_components * type_size;
const unsigned ubo_block =
brw_nir_ubo_surface_index_get_push_block(instr->src[0]);
const unsigned offset_256b = load_offset / 32;
const unsigned end_256b = DIV_ROUND_UP(end_offset, 32);
/* See if we've selected this as a push constant candidate */
brw_reg push_reg;
for (int i = 0; i < 4; i++) {
const struct brw_ubo_range *range = &s.prog_data->ubo_ranges[i];
if (range->block == ubo_block &&
offset_256b >= range->start &&
end_256b <= range->start + range->length) {
push_reg = brw_uniform_reg(UBO_START + i, dest.type);
push_reg.offset = load_offset - 32 * range->start;
break;
}
}
if (push_reg.file != BAD_FILE) {
for (unsigned i = first_component; i <= last_component; i++) {
xbld.MOV(offset(dest, xbld, i),
byte_offset(push_reg,
(i - first_component) * type_size));
}
break;
}
s.prog_data->has_ubo_pull = true;
if (instr->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel) {
brw_from_nir_emit_memory_access(ntb, bld, xbld, instr);
break;
}
const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
const brw_builder ubld = bld.exec_all().group(block_sz / 4, 0);

View file

@ -3144,8 +3144,7 @@ nir_def *
brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load,
nir_def *base_addr, unsigned off)
{
assert(load->intrinsic == nir_intrinsic_load_push_constant ||
load->intrinsic == nir_intrinsic_load_uniform);
assert(load->intrinsic == nir_intrinsic_load_push_data_intel);
unsigned bit_size = load->def.bit_size;
assert(bit_size >= 8 && bit_size % 8 == 0);

View file

@ -400,7 +400,7 @@ lower_cs_subgroup_id_instr(nir_builder *b,
b->cursor = nir_before_instr(&intrin->instr);
nir_def_replace(&intrin->def,
nir_load_uniform(
nir_load_push_data_intel(
b, 1, 32, nir_imm_int(b, 0),
.base = *subgroup_id_offset_ptr,
.range = 4));

View file

@ -135,8 +135,7 @@ lower_rt_intrinsics_impl(nir_function_impl *impl,
nir_instr_remove(instr);
break;
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_push_constant:
case nir_intrinsic_load_push_data_intel:
/* We don't want to lower this in the launch trampoline.
*
* Also if the driver chooses to use an inline push address, we

View file

@ -427,7 +427,6 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
* passed in as push constants in the first register. We deal with the
* raygen BSR address here; the global data we'll deal with later.
*/
b.shader->num_uniforms = 32;
nir_def *raygen_param_bsr_addr =
load_trampoline_param(&b, raygen_bsr_addr, 1, 64);
nir_def *is_indirect =

View file

@ -425,7 +425,6 @@ brw_shader::brw_shader(const brw_shader_params *params)
this->source_depth_to_render_target = false;
this->first_non_payload_grf = 0;
this->uniforms = this->nir->num_uniforms / 4;
this->last_scratch = 0;
memset(&this->shader_stats, 0, sizeof(this->shader_stats));
@ -621,37 +620,22 @@ brw_shader::mark_last_urb_write_with_eot()
return true;
}
static unsigned
round_components_to_whole_registers(const intel_device_info *devinfo,
unsigned c)
{
return DIV_ROUND_UP(c, 8 * reg_unit(devinfo)) * reg_unit(devinfo);
}
void
brw_shader::assign_curb_setup()
{
unsigned uniform_push_length =
round_components_to_whole_registers(devinfo, prog_data->nr_params);
unsigned ubo_push_length = 0;
unsigned ubo_push_start[4];
for (int i = 0; i < 4; i++) {
ubo_push_start[i] = 8 * (ubo_push_length + uniform_push_length);
ubo_push_length += prog_data->ubo_ranges[i].length;
assert(ubo_push_start[i] % (8 * reg_unit(devinfo)) == 0);
assert(ubo_push_length % (1 * reg_unit(devinfo)) == 0);
uint32_t ranges_start[4];
this->push_data_size = 0;
for (uint32_t i = 0; i < 4; i++) {
ranges_start[i] = this->push_data_size / REG_SIZE;
this->push_data_size += align(prog_data->push_sizes[i], REG_SIZE);
}
prog_data->curb_read_length = uniform_push_length + ubo_push_length;
uint64_t used = 0;
const bool pull_constants =
devinfo->verx10 >= 125 &&
(mesa_shader_stage_is_compute(stage) ||
mesa_shader_stage_is_mesh(stage)) &&
uniform_push_length;
this->push_data_size > 0;
if (pull_constants) {
const bool pull_constants_a64 =
@ -685,9 +669,11 @@ brw_shader::assign_curb_setup()
/* On Gfx12-HP we load constants at the start of the program using A32
* stateless messages.
*/
for (unsigned i = 0; i < uniform_push_length;) {
const unsigned n_push_data_regs = reg_unit(devinfo) *
DIV_ROUND_UP(this->push_data_size, reg_unit(devinfo) * REG_SIZE);
for (unsigned i = 0; i < this->push_data_size / REG_SIZE;) {
/* Limit ourselves to LSC HW limit of 8 GRFs (256bytes D32V64). */
unsigned num_regs = MIN2(uniform_push_length - i, 8);
unsigned num_regs = MIN2(this->push_data_size / REG_SIZE - i, 8);
assert(num_regs > 0);
num_regs = 1 << util_logbase2(num_regs);
@ -743,7 +729,7 @@ brw_shader::assign_curb_setup()
send->size_written =
lsc_msg_dest_len(devinfo, LSC_DATA_SIZE_D32, num_regs * 8) * REG_SIZE;
assert((payload().num_regs + i + send->size_written / REG_SIZE) <=
(payload().num_regs + prog_data->curb_read_length));
(payload().num_regs + n_push_data_regs));
send->is_volatile = true;
send->src[SEND_SRC_DESC] =
@ -762,29 +748,13 @@ brw_shader::assign_curb_setup()
foreach_block_and_inst(block, brw_inst, inst, cfg) {
for (unsigned int i = 0; i < inst->sources; i++) {
if (inst->src[i].file == UNIFORM) {
int uniform_nr = inst->src[i].nr + inst->src[i].offset / 4;
int constant_nr;
if (inst->src[i].nr >= UBO_START) {
/* constant_nr is in 32-bit units, the rest are in bytes */
constant_nr = ubo_push_start[inst->src[i].nr - UBO_START] +
inst->src[i].offset / 4;
} else if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
constant_nr = uniform_nr;
} else {
/* Section 5.11 of the OpenGL 4.1 spec says:
* "Out-of-bounds reads return undefined values, which include
* values from other variables of the active program or zero."
* Just return the first push constant.
*/
constant_nr = 0;
}
assert(inst->src[i].nr < 64);
used |= BITFIELD64_BIT(inst->src[i].nr);
assert(constant_nr / 8 < 64);
used |= BITFIELD64_BIT(constant_nr / 8);
assert(inst->src[i].nr < this->push_data_size);
struct brw_reg brw_reg = brw_vec1_grf(payload().num_regs +
constant_nr / 8,
constant_nr % 8);
inst->src[i].nr, 0);
brw_reg.abs = inst->src[i].abs;
brw_reg.negate = inst->src[i].negate;
@ -795,7 +765,7 @@ brw_shader::assign_curb_setup()
assert(inst->src[i].stride == 0 || inst->exec_size == 2);
inst->src[i] = byte_offset(
retype(brw_reg, inst->src[i].type),
inst->src[i].offset % 4);
inst->src[i].offset);
}
}
}
@ -821,15 +791,16 @@ brw_shader::assign_curb_setup()
ubld.group(16, 0).ADD(horiz_offset(offset_base, 16), offset_base, brw_imm_uw(16));
u_foreach_bit(i, prog_data->robust_ubo_ranges) {
struct brw_ubo_range *ubo_range = &prog_data->ubo_ranges[i];
const unsigned range_length =
DIV_ROUND_UP(prog_data->push_sizes[i], REG_SIZE);
unsigned range_start = ubo_push_start[i] / 8;
uint64_t want_zero = (used >> range_start) & BITFIELD64_MASK(ubo_range->length);
const unsigned range_start = ranges_start[i];
uint64_t want_zero = (used >> range_start) & BITFIELD64_MASK(range_length);
if (!want_zero)
continue;
const unsigned grf_start = payload().num_regs + range_start;
const unsigned grf_end = grf_start + ubo_range->length;
const unsigned grf_end = grf_start + range_length;
const unsigned max_grf_mask = max_grf_writes * 4;
unsigned grf = grf_start;
@ -896,7 +867,10 @@ brw_shader::assign_curb_setup()
}
/* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
this->first_non_payload_grf = payload().num_regs + prog_data->curb_read_length;
this->first_non_payload_grf = payload().num_regs +
DIV_ROUND_UP(align(this->push_data_size,
REG_SIZE * reg_unit(devinfo)),
REG_SIZE);
this->debug_optimizer(this->nir, "assign_curb_setup", 90, 0);
}
@ -932,7 +906,9 @@ brw_shader::convert_attr_sources_to_hw_regs(brw_inst *inst)
if (inst->src[i].file == ATTR) {
assert(inst->src[i].nr == 0);
int grf = payload().num_regs +
prog_data->curb_read_length +
DIV_ROUND_UP(
align(this->push_data_size, REG_SIZE * reg_unit(devinfo)),
REG_SIZE) +
inst->src[i].offset / REG_SIZE;
/* As explained at brw_lower_vgrf_to_fixed_grf, From the Haswell PRM:

View file

@ -156,8 +156,11 @@ public:
brw_analysis<brw_def_analysis, brw_shader> def_analysis;
brw_analysis<brw_ip_ranges, brw_shader> ip_ranges_analysis;
/** Number of uniform variable components visited. */
unsigned uniforms;
/** Amount data push constant data delivered to the shader
*
* Aligned to native GRF registers
*/
unsigned push_data_size;
/** Byte-offset for the next available spot in the scratch space buffer. */
unsigned last_scratch;
@ -285,13 +288,19 @@ sample_mask_flag_subreg(const brw_shader &s)
inline brw_reg
brw_dynamic_msaa_flags(const struct brw_wm_prog_data *wm_prog_data)
{
return brw_uniform_reg(wm_prog_data->msaa_flags_param, BRW_TYPE_UD);
return byte_offset(
brw_uniform_reg(
wm_prog_data->msaa_flags_param / REG_SIZE, BRW_TYPE_UD),
wm_prog_data->msaa_flags_param % REG_SIZE);
}
inline brw_reg
brw_dynamic_per_primitive_remap(const struct brw_wm_prog_data *wm_prog_data)
{
return brw_uniform_reg(wm_prog_data->per_primitive_remap_param, BRW_TYPE_UD);
return byte_offset(
brw_uniform_reg(
wm_prog_data->per_primitive_remap_param / REG_SIZE, BRW_TYPE_UD),
wm_prog_data->per_primitive_remap_param % REG_SIZE);
}
enum intel_barycentric_mode brw_barycentric_mode(const struct brw_wm_prog_key *key,

View file

@ -152,14 +152,10 @@ compile_shader(struct anv_device *device,
};
NIR_PASS(_, nir, nir_opt_load_store_vectorize, &options);
nir->num_uniforms = uniform_size;
prog_data.base.push_sizes[0] = uniform_size;
void *temp_ctx = ralloc_context(NULL);
prog_data.base.nr_params = nir->num_uniforms / 4;
brw_nir_analyze_ubo_ranges(compiler, nir, prog_data.base.ubo_ranges);
const unsigned *program;
if (stage == MESA_SHADER_FRAGMENT) {
struct genisa_stats stats[3];

View file

@ -39,22 +39,23 @@ struct vk_pipeline_robustness_state;
(sizeof(((struct anv_push_constants *)0)->field))
#define anv_load_driver_uniform(b, components, field) \
nir_load_push_constant(b, components, \
anv_drv_const_size(field) * 8, \
nir_imm_int(b, 0), \
.base = anv_drv_const_offset(field), \
.range = components * anv_drv_const_size(field))
/* Use load_uniform for indexed values since load_push_constant requires that
* the offset source is dynamically uniform in the subgroup which we cannot
* guarantee.
nir_load_push_data_intel(b, components, \
anv_drv_const_size(field) * 8, \
nir_imm_int(b, 0), \
.base = anv_drv_const_offset(field), \
.range = components * anv_drv_const_size(field))
/* Use ACCESS_NON_UNIFORM for indexed values since load_push_constant requires
* that the offset source is dynamically uniform in the subgroup which we
* cannot guarantee.
*/
#define anv_load_driver_uniform_indexed(b, components, field, idx) \
nir_load_uniform(b, components, \
anv_drv_const_size(field[0]) * 8, \
nir_imul_imm(b, idx, \
anv_drv_const_size(field[0])), \
.base = anv_drv_const_offset(field), \
.range = anv_drv_const_size(field))
nir_load_push_data_intel(b, components, \
anv_drv_const_size(field[0]) * 8, \
nir_imul_imm(b, idx, \
anv_drv_const_size(field[0])), \
.base = anv_drv_const_offset(field), \
.range = anv_drv_const_size(field), \
.access = ACCESS_NON_UNIFORM)
/* This map is represent a mapping where the key is the NIR
* nir_intrinsic_resource_intel::block index. It allows mapping bindless UBOs

View file

@ -26,6 +26,94 @@
#include "compiler/brw/brw_nir.h"
#include "util/mesa-sha1.h"
struct lower_to_push_data_intel_state {
const struct anv_pipeline_bind_map *bind_map;
const struct anv_pipeline_push_map *push_map;
};
static bool
lower_to_push_data_intel(nir_builder *b,
nir_intrinsic_instr *intrin,
void *data)
{
const struct lower_to_push_data_intel_state *state = data;
/* With bindless shaders we load uniforms with SEND messages. All the push
* constants are located after the RT_DISPATCH_GLOBALS. We just need to add
* the offset to the address right after RT_DISPATCH_GLOBALS (see
* brw_nir_lower_rt_intrinsics.c).
*/
const unsigned base_offset =
brw_shader_stage_is_bindless(b->shader->info.stage) ?
0 : state->bind_map->push_ranges[0].start * 32;
switch (intrin->intrinsic) {
case nir_intrinsic_load_push_data_intel: {
nir_intrinsic_set_base(intrin, nir_intrinsic_base(intrin) - base_offset);
return true;
}
case nir_intrinsic_load_push_constant: {
b->cursor = nir_before_instr(&intrin->instr);
nir_def *data = nir_load_push_data_intel(
b,
intrin->def.num_components,
intrin->def.bit_size,
intrin->src[0].ssa,
.base = nir_intrinsic_base(intrin) - base_offset,
.range = nir_intrinsic_range(intrin));
nir_def_replace(&intrin->def, data);
return true;
}
case nir_intrinsic_load_ubo: {
if (!brw_nir_ubo_surface_index_is_pushable(intrin->src[0]) ||
!nir_src_is_const(intrin->src[1]))
return false;
const int block = brw_nir_ubo_surface_index_get_push_block(intrin->src[0]);
const unsigned byte_offset = nir_src_as_uint(intrin->src[1]);
const unsigned num_components =
nir_def_last_component_read(&intrin->def) + 1;
const int bytes = num_components * (intrin->def.bit_size / 8);
const struct anv_pipeline_binding *binding =
&state->push_map->block_to_descriptor[block];
uint32_t range_offset = 0;
const struct anv_push_range *push_range = NULL;
for (uint32_t i = 0; i < 4; i++) {
if (state->bind_map->push_ranges[i].set == binding->set &&
state->bind_map->push_ranges[i].index == binding->index &&
byte_offset >= state->bind_map->push_ranges[i].start * 32 &&
(byte_offset + bytes) <= (state->bind_map->push_ranges[i].start +
state->bind_map->push_ranges[i].length) * 32) {
push_range = &state->bind_map->push_ranges[i];
break;
} else {
range_offset += state->bind_map->push_ranges[i].length * 32;
}
}
if (push_range == NULL)
return false;
b->cursor = nir_before_instr(&intrin->instr);
nir_def *data = nir_load_push_data_intel(
b,
nir_def_last_component_read(&intrin->def) + 1,
intrin->def.bit_size,
nir_imm_int(b, 0),
.base = range_offset + byte_offset - push_range->start * 32,
.range = nir_intrinsic_range(intrin));
nir_def_replace(&intrin->def, data);
return true;
}
default:
return false;
}
}
bool
anv_nir_compute_push_layout(nir_shader *nir,
const struct anv_physical_device *pdevice,
@ -57,8 +145,8 @@ anv_nir_compute_push_layout(nir_shader *nir,
has_const_ubo = true;
break;
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_push_constant: {
case nir_intrinsic_load_push_constant:
case nir_intrinsic_load_push_data_intel: {
unsigned base = nir_intrinsic_base(intrin);
unsigned range = nir_intrinsic_range(intrin);
push_start = MIN2(push_start, base);
@ -80,8 +168,6 @@ anv_nir_compute_push_layout(nir_shader *nir,
}
}
const bool has_push_intrinsic = push_start <= push_end;
const bool push_ubo_ranges =
has_const_ubo && nir->info.stage != MESA_SHADER_COMPUTE &&
!brw_shader_stage_requires_bindless_resources(nir->info.stage);
@ -174,8 +260,8 @@ anv_nir_compute_push_layout(nir_shader *nir,
/* For scalar, push data size needs to be aligned to a DWORD. */
const unsigned alignment = 4;
nir->num_uniforms = align(push_end - push_start, alignment);
prog_data->nr_params = nir->num_uniforms / 4;
const unsigned push_size = align(push_end - push_start, alignment);
prog_data->push_sizes[0] = push_size;
/* Fill the compute push constant layout (cross/per thread constants) for
* platforms pre Gfx12.5.
@ -195,39 +281,6 @@ anv_nir_compute_push_layout(nir_shader *nir,
.length = align(push_end - push_start, devinfo->grf_size) / 32,
};
if (has_push_intrinsic) {
nir_foreach_function_impl(impl, nir) {
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_push_constant: {
/* With bindless shaders we load uniforms with SEND
* messages. All the push constants are located after the
* RT_DISPATCH_GLOBALS. We just need to add the offset to
* the address right after RT_DISPATCH_GLOBALS (see
* brw_nir_lower_rt_intrinsics.c).
*/
unsigned base_offset =
brw_shader_stage_is_bindless(nir->info.stage) ? 0 : push_start;
nir_intrinsic_set_base(intrin,
nir_intrinsic_base(intrin) -
base_offset);
break;
}
default:
break;
}
}
}
}
}
/* When platforms support Mesh and the fragment shader is not fully linked
* to the previous shader, payload format can change if the preceding
* shader is mesh or not, this is an issue in particular for PrimitiveID
@ -263,15 +316,17 @@ anv_nir_compute_push_layout(nir_shader *nir,
map->push_ranges[n_push_ranges++] = push_constant_range;
if (push_ubo_ranges) {
brw_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
struct brw_ubo_range ubo_ranges[4] = {};
brw_nir_analyze_ubo_ranges(compiler, nir, ubo_ranges);
const unsigned max_push_regs = 64;
unsigned total_push_regs = push_constant_range.length;
for (unsigned i = 0; i < 4; i++) {
if (total_push_regs + prog_data->ubo_ranges[i].length > max_push_regs)
prog_data->ubo_ranges[i].length = max_push_regs - total_push_regs;
total_push_regs += prog_data->ubo_ranges[i].length;
if (total_push_regs + ubo_ranges[i].length > max_push_regs)
ubo_ranges[i].length = max_push_regs - total_push_regs;
total_push_regs += ubo_ranges[i].length;
}
assert(total_push_regs <= max_push_regs);
@ -286,7 +341,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
const unsigned max_push_buffers = needs_padding_per_primitive ? 3 : 4;
for (unsigned i = 0; i < 4; i++) {
struct brw_ubo_range *ubo_range = &prog_data->ubo_ranges[i];
struct brw_ubo_range *ubo_range = &ubo_ranges[i];
if (ubo_range->length == 0)
continue;
@ -310,7 +365,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
/* We only bother to shader-zero pushed client UBOs */
if (binding->set < MAX_SETS &&
(robust_flags & BRW_ROBUSTNESS_UBO)) {
prog_data->robust_ubo_ranges |= (uint8_t) (1 << i);
prog_data->robust_ubo_ranges |= (uint8_t) (1 << (n_push_ranges - 1));
}
}
}
@ -330,8 +385,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
.start = 0,
.length = 1,
};
assert(prog_data->nr_params == 0);
prog_data->nr_params = 32 / 4;
prog_data->push_sizes[0] = 32;
}
if (needs_padding_per_primitive) {
@ -345,21 +399,36 @@ anv_nir_compute_push_layout(nir_shader *nir,
assert(n_push_ranges <= 4);
if (nir->info.stage == MESA_SHADER_TESS_CTRL && needs_dyn_tess_config) {
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
bool progress = nir_shader_intrinsics_pass(
nir, lower_to_push_data_intel,
nir_metadata_control_flow,
&(struct lower_to_push_data_intel_state) {
.bind_map = map,
.push_map = push_map,
});
const uint32_t tess_config_offset = anv_drv_const_offset(gfx.tess_config);
assert(tess_config_offset >= push_start);
tcs_prog_data->tess_config_param = (tess_config_offset - push_start) / 4;
}
if (nir->info.stage == MESA_SHADER_TESS_EVAL && push_info->separate_tessellation) {
struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
switch (nir->info.stage) {
case MESA_SHADER_TESS_CTRL:
if (needs_dyn_tess_config) {
struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
const uint32_t tess_config_offset = anv_drv_const_offset(gfx.tess_config);
assert(tess_config_offset >= push_start);
tes_prog_data->tess_config_param = (tess_config_offset - push_start) / 4;
}
if (nir->info.stage == MESA_SHADER_FRAGMENT) {
const uint32_t tess_config_offset = anv_drv_const_offset(gfx.tess_config);
assert(tess_config_offset >= push_start);
tcs_prog_data->tess_config_param = tess_config_offset - push_start;
}
break;
case MESA_SHADER_TESS_EVAL:
if (push_info->separate_tessellation) {
struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
const uint32_t tess_config_offset = anv_drv_const_offset(gfx.tess_config);
assert(tess_config_offset >= push_start);
tes_prog_data->tess_config_param = tess_config_offset - push_start;
}
break;
case MESA_SHADER_FRAGMENT: {
struct brw_wm_prog_data *wm_prog_data =
container_of(prog_data, struct brw_wm_prog_data, base);
@ -367,17 +436,26 @@ anv_nir_compute_push_layout(nir_shader *nir,
const uint32_t fs_msaa_flags_offset =
anv_drv_const_offset(gfx.fs_msaa_flags);
assert(fs_msaa_flags_offset >= push_start);
wm_prog_data->msaa_flags_param =
(fs_msaa_flags_offset - push_start) / 4;
wm_prog_data->msaa_flags_param = fs_msaa_flags_offset - push_start;
}
if (needs_wa_18019110168) {
const uint32_t fs_per_prim_remap_offset =
anv_drv_const_offset(gfx.fs_per_prim_remap_offset);
assert(fs_per_prim_remap_offset >= push_start);
wm_prog_data->per_primitive_remap_param =
(fs_per_prim_remap_offset - push_start) / 4;
fs_per_prim_remap_offset - push_start;
}
break;
}
default:
break;
}
for (uint32_t i = 0; i < 4; i++) {
if (map->push_ranges[i].set == ANV_DESCRIPTOR_SET_PER_PRIM_PADDING)
continue;
prog_data->push_sizes[i] = map->push_ranges[i].length * 32;
}
#if 0
@ -397,7 +475,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
_mesa_sha1_compute(map->push_ranges,
sizeof(map->push_ranges),
map->push_sha1);
return false;
return progress;
}
void
@ -406,10 +484,9 @@ anv_nir_validate_push_layout(const struct anv_physical_device *pdevice,
struct anv_pipeline_bind_map *map)
{
#ifndef NDEBUG
unsigned prog_data_push_size = align(prog_data->nr_params, pdevice->info.grf_size / 4) / 8;
unsigned prog_data_push_size = 0;
for (unsigned i = 0; i < 4; i++)
prog_data_push_size += prog_data->ubo_ranges[i].length;
prog_data_push_size += DIV_ROUND_UP(prog_data->push_sizes[i], 32);
unsigned bind_map_push_size = 0;
for (unsigned i = 0; i < 4; i++) {

View file

@ -444,52 +444,34 @@ cmd_buffer_flush_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer,
continue;
const struct anv_shader *shader = gfx->shaders[stage];
if (shader->prog_data->robust_ubo_ranges) {
const struct anv_pipeline_bind_map *bind_map = &shader->bind_map;
struct anv_push_constants *push = &gfx->base.push_constants;
const struct anv_pipeline_bind_map *bind_map = &shader->bind_map;
struct anv_push_constants *push = &gfx->base.push_constants;
u_foreach_bit(r, shader->prog_data->robust_ubo_ranges) {
const struct anv_push_range *range = &bind_map->push_ranges[r];
unsigned ubo_range_index = 0;
for (unsigned i = 0; i < 4; i++) {
const struct anv_push_range *range = &bind_map->push_ranges[i];
if (range->length == 0)
break;
assert(range->length != 0);
assert(range->set < MAX_SETS);
/* Skip any push ranges that were not promoted from UBOs */
if (range->set >= MAX_SETS) {
/* The indexing in prog_data->robust_ubo_ranges is based off
* prog_data->ubo_ranges which does not include the
* prog_data->nr_params (Vulkan push constants).
*/
if (range->set != ANV_DESCRIPTOR_SET_PUSH_CONSTANTS)
ubo_range_index++;
continue;
}
unsigned bound_size =
get_push_range_bound_size(cmd_buffer, shader, range);
assert(shader->prog_data->robust_ubo_ranges & (1 << ubo_range_index));
uint8_t range_mask = 0;
unsigned bound_size =
get_push_range_bound_size(cmd_buffer, shader, range);
uint8_t range_mask = 0;
/* Determine the bound length of the range in 16-byte units */
if (bound_size > range->start * 32) {
bound_size = MIN2(
DIV_ROUND_UP(bound_size - range->start * 32, 16),
2 * range->length);
/* Determine the bound length of the range in 16-byte units */
if (bound_size > range->start * 32) {
bound_size = MIN2(
DIV_ROUND_UP(bound_size - range->start * 32, 16),
2 * range->length);
range_mask = (uint8_t) bound_size;
assert(bound_size < 256);
}
}
/* Update the pushed bound length constant if it changed */
if (range_mask != push->gfx.push_reg_mask[stage][ubo_range_index]) {
push->gfx.push_reg_mask[stage][ubo_range_index] = range_mask;
cmd_buffer->state.push_constants_dirty |=
mesa_to_vk_shader_stage(stage);
gfx->base.push_constants_data_dirty = true;
}
ubo_range_index++;
/* Update the pushed bound length constant if it changed */
if (range_mask != push->gfx.push_reg_mask[stage][r]) {
push->gfx.push_reg_mask[stage][r] = range_mask;
cmd_buffer->state.push_constants_dirty |=
mesa_to_vk_shader_stage(stage);
gfx->base.push_constants_data_dirty = true;
}
}
}

View file

@ -26,10 +26,10 @@
#include "genxml/gen_macros.h"
#define load_param(b, bit_size, struct_name, field_name) \
nir_load_uniform(b, 1, bit_size, nir_imm_int(b, 0), \
.base = offsetof(struct_name, field_name), \
.range = bit_size / 8)
#define load_param(b, bit_size, struct_name, field_name) \
nir_load_push_data_intel(b, 1, bit_size, nir_imm_int(b, 0), \
.base = offsetof(struct_name, field_name), \
.range = bit_size / 8)
static nir_def *
load_fragment_index(nir_builder *b)

View file

@ -1064,9 +1064,7 @@ emit_ps_shader(struct anv_batch *batch,
ps.SamplerCount = GFX_VER == 11 ? 0 : get_sampler_count(shader);
ps.BindingTableEntryCount = shader->bind_map.surface_count;
#if GFX_VER < 20
ps.PushConstantEnable =
wm_prog_data->base.nr_params > 0 ||
wm_prog_data->base.ubo_ranges[0].length;
ps.PushConstantEnable = wm_prog_data->base.push_sizes[0] > 0;
#endif
ps.MaximumNumberofThreadsPerPSD = devinfo->max_threads_per_psd - 1;

View file

@ -205,8 +205,7 @@ genX(emit_simpler_shader_init_fragment)(struct anv_simple_shader *state)
ps.BindingTableEntryCount = GFX_VER == 9 ? 1 : 0;
#if GFX_VER < 20
ps.PushConstantEnable = prog_data->base.nr_params > 0 ||
prog_data->base.ubo_ranges[0].length;
ps.PushConstantEnable = prog_data->base.push_sizes[0] > 0;
#endif
ps.DispatchGRFStartRegisterForConstantSetupData0 =