anv: Bounds-check pushed UBOs when robustBufferAccess = true

We also have to add nir_intrinsic_load_push_constant to the list of
intrinsics which use push constants in brw_nir_analyze_ubo_ranges
because we're moving the loop where we rewrite the intrinsics to after
we've analyzed UBO loads.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Tested-by: Marge Bot <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3777>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3777>
This commit is contained in:
Jason Ekstrand 2020-02-07 07:13:12 -06:00 committed by Marge Bot
parent faea84e254
commit e03f965280
5 changed files with 267 additions and 41 deletions

View file

@ -57,6 +57,7 @@ void anv_nir_apply_pipeline_layout(const struct anv_physical_device *pdevice,
struct anv_pipeline_bind_map *map); struct anv_pipeline_bind_map *map);
void anv_nir_compute_push_layout(const struct anv_physical_device *pdevice, void anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
bool robust_buffer_access,
nir_shader *nir, nir_shader *nir,
struct brw_stage_prog_data *prog_data, struct brw_stage_prog_data *prog_data,
struct anv_pipeline_bind_map *map, struct anv_pipeline_bind_map *map,

View file

@ -22,18 +22,22 @@
*/ */
#include "anv_nir.h" #include "anv_nir.h"
#include "nir_builder.h"
#include "compiler/brw_nir.h" #include "compiler/brw_nir.h"
#include "util/mesa-sha1.h" #include "util/mesa-sha1.h"
void void
anv_nir_compute_push_layout(const struct anv_physical_device *pdevice, anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
bool robust_buffer_access,
nir_shader *nir, nir_shader *nir,
struct brw_stage_prog_data *prog_data, struct brw_stage_prog_data *prog_data,
struct anv_pipeline_bind_map *map, struct anv_pipeline_bind_map *map,
void *mem_ctx) void *mem_ctx)
{ {
const struct brw_compiler *compiler = pdevice->compiler;
memset(map->push_ranges, 0, sizeof(map->push_ranges)); memset(map->push_ranges, 0, sizeof(map->push_ranges));
bool has_const_ubo = false;
unsigned push_start = UINT_MAX, push_end = 0; unsigned push_start = UINT_MAX, push_end = 0;
nir_foreach_function(function, nir) { nir_foreach_function(function, nir) {
if (!function->impl) if (!function->impl)
@ -45,19 +49,46 @@ anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
continue; continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_push_constant) switch (intrin->intrinsic) {
continue; case nir_intrinsic_load_ubo:
if (nir_src_is_const(intrin->src[0]) &&
nir_src_is_const(intrin->src[1]))
has_const_ubo = true;
break;
unsigned base = nir_intrinsic_base(intrin); case nir_intrinsic_load_push_constant: {
unsigned range = nir_intrinsic_range(intrin); unsigned base = nir_intrinsic_base(intrin);
push_start = MIN2(push_start, base); unsigned range = nir_intrinsic_range(intrin);
push_end = MAX2(push_end, base + range); push_start = MIN2(push_start, base);
push_end = MAX2(push_end, base + range);
break;
}
default:
break;
}
} }
} }
} }
const bool has_push_intrinsic = push_start <= push_end; const bool has_push_intrinsic = push_start <= push_end;
const bool push_ubo_ranges =
(pdevice->info.gen >= 8 || pdevice->info.is_haswell) &&
has_const_ubo && nir->info.stage != MESA_SHADER_COMPUTE;
if (push_ubo_ranges && robust_buffer_access) {
/* We can't on-the-fly adjust our push ranges because doing so would
* mess up the layout in the shader. When robustBufferAccess is
* enabled, we have to manually bounds check our pushed UBO accesses.
*/
const uint32_t ubo_size_start =
offsetof(struct anv_push_constants, push_ubo_sizes);
const uint32_t ubo_size_end = ubo_size_start + (4 * sizeof(uint32_t));
push_start = MIN2(push_start, ubo_size_start);
push_end = MAX2(push_end, ubo_size_end);
}
if (nir->info.stage == MESA_SHADER_COMPUTE) { if (nir->info.stage == MESA_SHADER_COMPUTE) {
/* For compute shaders, we always have to have the subgroup ID. The /* For compute shaders, we always have to have the subgroup ID. The
* back-end compiler will "helpfully" add it for us in the last push * back-end compiler will "helpfully" add it for us in the last push
@ -76,34 +107,10 @@ anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
push_start = MIN2(push_start, push_end); push_start = MIN2(push_start, push_end);
push_start = align_down_u32(push_start, 32); push_start = align_down_u32(push_start, 32);
if (has_push_intrinsic) {
nir_foreach_function(function, nir) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_push_constant)
continue;
intrin->intrinsic = nir_intrinsic_load_uniform;
nir_intrinsic_set_base(intrin,
nir_intrinsic_base(intrin) -
push_start);
}
}
}
}
/* For vec4 our push data size needs to be aligned to a vec4 and for /* For vec4 our push data size needs to be aligned to a vec4 and for
* scalar, it needs to be aligned to a DWORD. * scalar, it needs to be aligned to a DWORD.
*/ */
const unsigned align = const unsigned align = compiler->scalar_stage[nir->info.stage] ? 4 : 16;
pdevice->compiler->scalar_stage[nir->info.stage] ? 4 : 16;
nir->num_uniforms = ALIGN(push_end - push_start, align); nir->num_uniforms = ALIGN(push_end - push_start, align);
prog_data->nr_params = nir->num_uniforms / 4; prog_data->nr_params = nir->num_uniforms / 4;
prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params); prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
@ -114,10 +121,11 @@ anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
.length = DIV_ROUND_UP(push_end - push_start, 32), .length = DIV_ROUND_UP(push_end - push_start, 32),
}; };
if ((pdevice->info.gen >= 8 || pdevice->info.is_haswell) && /* Mapping from brw_ubo_range to anv_push_range */
nir->info.stage != MESA_SHADER_COMPUTE) { int push_range_idx_map[4] = { -1, -1, -1, -1 };
brw_nir_analyze_ubo_ranges(pdevice->compiler, nir, NULL,
prog_data->ubo_ranges); if (push_ubo_ranges) {
brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
/* We can push at most 64 registers worth of data. The back-end /* We can push at most 64 registers worth of data. The back-end
* compiler would do this fixup for us but we'd like to calculate * compiler would do this fixup for us but we'd like to calculate
@ -137,13 +145,19 @@ anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
map->push_ranges[n++] = push_constant_range; map->push_ranges[n++] = push_constant_range;
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
const struct brw_ubo_range *ubo_range = &prog_data->ubo_ranges[i]; struct brw_ubo_range *ubo_range = &prog_data->ubo_ranges[i];
if (ubo_range->length == 0) if (ubo_range->length == 0)
continue; continue;
if (n >= 4 || (n == 3 && compiler->constant_buffer_0_is_relative)) {
memset(ubo_range, 0, sizeof(*ubo_range));
continue;
}
const struct anv_pipeline_binding *binding = const struct anv_pipeline_binding *binding =
&map->surface_to_descriptor[ubo_range->block]; &map->surface_to_descriptor[ubo_range->block];
push_range_idx_map[i] = n;
map->push_ranges[n++] = (struct anv_push_range) { map->push_ranges[n++] = (struct anv_push_range) {
.set = binding->set, .set = binding->set,
.index = binding->index, .index = binding->index,
@ -164,6 +178,133 @@ anv_nir_compute_push_layout(const struct anv_physical_device *pdevice,
map->push_ranges[0] = push_constant_range; map->push_ranges[0] = push_constant_range;
} }
if (has_push_intrinsic || (push_ubo_ranges && robust_buffer_access)) {
nir_foreach_function(function, nir) {
if (!function->impl)
continue;
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_ubo: {
if (!robust_buffer_access)
break;
if (!nir_src_is_const(intrin->src[0]) ||
!nir_src_is_const(intrin->src[1]))
break;
uint32_t index = nir_src_as_uint(intrin->src[0]);
uint64_t offset = nir_src_as_uint(intrin->src[1]);
uint32_t size = intrin->num_components *
(intrin->dest.ssa.bit_size / 8);
int ubo_range_idx = -1;
for (unsigned i = 0; i < 4; i++) {
if (prog_data->ubo_ranges[i].length > 0 &&
prog_data->ubo_ranges[i].block == index) {
ubo_range_idx = i;
break;
}
}
if (ubo_range_idx < 0)
break;
const struct brw_ubo_range *range =
&prog_data->ubo_ranges[ubo_range_idx];
const uint32_t range_end =
(range->start + range->length) * 32;
if (range_end < offset || offset + size <= range->start)
break;
b.cursor = nir_after_instr(&intrin->instr);
assert(push_range_idx_map[ubo_range_idx] >= 0);
const uint32_t ubo_size_offset =
offsetof(struct anv_push_constants, push_ubo_sizes) +
push_range_idx_map[ubo_range_idx] * sizeof(uint32_t);
nir_intrinsic_instr *load_size =
nir_intrinsic_instr_create(b.shader,
nir_intrinsic_load_uniform);
load_size->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
nir_intrinsic_set_base(load_size,
ubo_size_offset - push_start);
nir_intrinsic_set_range(load_size, 4);
nir_intrinsic_set_type(load_size, nir_type_uint32);
load_size->num_components = 1;
nir_ssa_dest_init(&load_size->instr, &load_size->dest,
1, 32, NULL);
nir_builder_instr_insert(&b, &load_size->instr);
/* Do the size checks per-component. Thanks to scalar block
* layout, we could end up with a single vector straddling a
* 32B boundary.
*
* We align up to 32B so that we can get better CSE.
*
* We check
*
* offset + size - 1 < push_ubo_sizes[i]
*
* rather than
*
* offset + size <= push_ubo_sizes[i]
*
* because it properly returns OOB for the case where
* offset + size == 0.
*/
nir_const_value last_byte_const[NIR_MAX_VEC_COMPONENTS];
for (unsigned c = 0; c < intrin->dest.ssa.num_components; c++) {
assert(intrin->dest.ssa.bit_size % 8 == 0);
const unsigned comp_size_B = intrin->dest.ssa.bit_size / 8;
const uint32_t comp_last_byte =
align_u32(offset + (c + 1) * comp_size_B,
ANV_UBO_BOUNDS_CHECK_ALIGNMENT) - 1;
last_byte_const[c] =
nir_const_value_for_uint(comp_last_byte, 32);
}
nir_ssa_def *last_byte =
nir_build_imm(&b, intrin->dest.ssa.num_components, 32,
last_byte_const);
nir_ssa_def *in_bounds =
nir_ult(&b, last_byte, &load_size->dest.ssa);
nir_ssa_def *zero =
nir_imm_zero(&b, intrin->dest.ssa.num_components,
intrin->dest.ssa.bit_size);
nir_ssa_def *value =
nir_bcsel(&b, in_bounds, &intrin->dest.ssa, zero);
nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
nir_src_for_ssa(value),
value->parent_instr);
break;
}
case nir_intrinsic_load_push_constant:
intrin->intrinsic = nir_intrinsic_load_uniform;
nir_intrinsic_set_base(intrin,
nir_intrinsic_base(intrin) -
push_start);
break;
default:
break;
}
}
}
}
}
/* Now that we're done computing the push constant portion of the /* Now that we're done computing the push constant portion of the
* bind map, hash it. This lets us quickly determine if the actual * bind map, hash it. This lets us quickly determine if the actual
* mapping has changed and not just a no-op pipeline change. * mapping has changed and not just a no-op pipeline change.

View file

@ -693,8 +693,8 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
nir_lower_non_uniform_texture_access | nir_lower_non_uniform_texture_access |
nir_lower_non_uniform_image_access); nir_lower_non_uniform_image_access);
anv_nir_compute_push_layout(pdevice, nir, prog_data, anv_nir_compute_push_layout(pdevice, pipeline->device->robust_buffer_access,
&stage->bind_map, mem_ctx); nir, prog_data, &stage->bind_map, mem_ctx);
stage->nir = nir; stage->nir = nir;
} }

View file

@ -2476,6 +2476,9 @@ struct anv_push_constants {
/** Dynamic offsets for dynamic UBOs and SSBOs */ /** Dynamic offsets for dynamic UBOs and SSBOs */
uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS]; uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS];
/** Pad out to a multiple of 32 bytes */
uint32_t push_ubo_sizes[4];
struct { struct {
/** Base workgroup ID /** Base workgroup ID
* *
@ -2489,9 +2492,6 @@ struct anv_push_constants {
* uploading the push constants for compute shaders. * uploading the push constants for compute shaders.
*/ */
uint32_t subgroup_id; uint32_t subgroup_id;
/** Pad out to a multiple of 32 bytes */
uint32_t pad[4];
} cs; } cs;
}; };

View file

@ -2938,6 +2938,67 @@ get_push_range_address(struct anv_cmd_buffer *cmd_buffer,
} }
} }
/** Returns the size in bytes of the bound buffer relative to range->start
*
* This may be smaller than range->length * 32.
*/
static uint32_t
get_push_range_bound_size(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage,
const struct anv_push_range *range)
{
assert(stage != MESA_SHADER_COMPUTE);
const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
switch (range->set) {
case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
struct anv_descriptor_set *set =
gfx_state->base.descriptors[range->index];
assert(range->start * 32 < set->desc_mem.alloc_size);
assert((range->start + range->length) * 32 < set->desc_mem.alloc_size);
return set->desc_mem.alloc_size - range->start * 32;
}
case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
return range->length * 32;
default: {
assert(range->set < MAX_SETS);
struct anv_descriptor_set *set =
gfx_state->base.descriptors[range->set];
const struct anv_descriptor *desc =
&set->descriptors[range->index];
if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
if (range->start * 32 > desc->buffer_view->range)
return 0;
return desc->buffer_view->range - range->start * 32;
} else {
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
/* Compute the offset within the buffer */
struct anv_push_constants *push =
&cmd_buffer->state.push_constants[stage];
uint32_t dynamic_offset =
push->dynamic_offsets[range->dynamic_offset_index];
uint64_t offset = desc->offset + dynamic_offset;
/* Clamp to the buffer size */
offset = MIN2(offset, desc->buffer->size);
/* Clamp the range to the buffer size */
uint32_t bound_range = MIN2(desc->range, desc->buffer->size - offset);
/* Align the range for consistency */
bound_range = align_u32(bound_range, ANV_UBO_BOUNDS_CHECK_ALIGNMENT);
if (range->start * 32 > bound_range)
return 0;
return bound_range - range->start * 32;
}
}
}
}
static void static void
cmd_buffer_emit_push_constant(struct anv_cmd_buffer *cmd_buffer, cmd_buffer_emit_push_constant(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage, gl_shader_stage stage,
@ -3099,7 +3160,30 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
if (anv_pipeline_has_stage(pipeline, stage)) { if (anv_pipeline_has_stage(pipeline, stage)) {
const struct anv_pipeline_bind_map *bind_map = const struct anv_pipeline_bind_map *bind_map =
&pipeline->shaders[stage]->bind_map; &pipeline->shaders[stage]->bind_map;
struct anv_push_constants *push =
&cmd_buffer->state.push_constants[stage];
if (cmd_buffer->device->robust_buffer_access) {
for (unsigned i = 0; i < 4; i++) {
const struct anv_push_range *range = &bind_map->push_ranges[i];
if (range->length == 0) {
push->push_ubo_sizes[i] = 0;
} else {
push->push_ubo_sizes[i] =
get_push_range_bound_size(cmd_buffer, stage, range);
}
cmd_buffer->state.push_constants_dirty |=
mesa_to_vk_shader_stage(stage);
}
}
/* We have to gather buffer addresses as a second step because the
* loop above puts data into the push constant area and the call to
* get_push_range_address is what locks our push constants and copies
* them into the actual GPU buffer. If we did the two loops at the
* same time, we'd risk only having some of the sizes in the push
* constant buffer when we did the copy.
*/
for (unsigned i = 0; i < 4; i++) { for (unsigned i = 0; i < 4; i++) {
const struct anv_push_range *range = &bind_map->push_ranges[i]; const struct anv_push_range *range = &bind_map->push_ranges[i];
if (range->length == 0) if (range->length == 0)