anv: fix null descriptor handling with A64 messages

global load/store (or A64 messages) need the NIR bound checking which
is enabled by "robust" behavior even when robust behavior is disabled.

Many thanks to Christopher Snowhill for pointing out the pushed
constant related issue with the initial version of this patch.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: mesa-stable
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21645>
(cherry picked from commit efcda1c530)
This commit is contained in:
Lionel Landwerlin 2023-03-22 00:22:22 +02:00 committed by Eric Engestrom
parent 0340da7c82
commit 841d63ab08
5 changed files with 28 additions and 24 deletions

View file

@ -1417,7 +1417,7 @@
"description": "anv: fix null descriptor handling with A64 messages",
"nominated": true,
"nomination_type": 0,
"resolution": 0,
"resolution": 1,
"main_sha": null,
"because_sha": null
},

View file

@ -3387,6 +3387,10 @@ VkResult anv_CreateDevice(
goto fail_internal_cache;
}
device->robust_buffer_access =
device->vk.enabled_features.robustBufferAccess ||
device->vk.enabled_features.nullDescriptor;
anv_device_init_blorp(device);
anv_device_init_border_colors(device);
@ -4312,7 +4316,7 @@ anv_get_buffer_memory_requirements(struct anv_device *device,
* This would ensure that not internal padding would be needed for
* 16-bit types.
*/
if (device->vk.enabled_features.robustBufferAccess &&
if (device->robust_buffer_access &&
(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
pMemoryRequirements->memoryRequirements.size = align64(size, 4);

View file

@ -209,9 +209,9 @@ anv_shader_stage_to_nir(struct anv_device *device,
.fragment_shading_rate = pdevice->info.ver >= 11,
},
.ubo_addr_format =
anv_nir_ubo_addr_format(pdevice, device->vk.enabled_features.robustBufferAccess),
anv_nir_ubo_addr_format(pdevice, device->robust_buffer_access),
.ssbo_addr_format =
anv_nir_ssbo_addr_format(pdevice, device->vk.enabled_features.robustBufferAccess),
anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
.phys_ssbo_addr_format = nir_address_format_64bit_global,
.push_const_addr_format = nir_address_format_logical,
@ -649,7 +649,7 @@ anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
const struct anv_device *device = pipeline->base.device;
const bool rba = device->vk.enabled_features.robustBufferAccess;
const bool rba = device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
for (uint32_t s = 0; s < ANV_GRAPHICS_SHADER_STAGE_COUNT; s++) {
@ -682,7 +682,7 @@ anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
const struct anv_device *device = pipeline->base.device;
const bool rba = device->vk.enabled_features.robustBufferAccess;
const bool rba = device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
const bool afs = device->physical->instance->assume_full_subgroups;
@ -707,7 +707,7 @@ anv_pipeline_hash_ray_tracing_shader(struct anv_ray_tracing_pipeline *pipeline,
if (layout != NULL)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
const bool rba = pipeline->base.device->vk.enabled_features.robustBufferAccess;
const bool rba = pipeline->base.device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
_mesa_sha1_update(&ctx, stage->shader_sha1, sizeof(stage->shader_sha1));
@ -729,7 +729,7 @@ anv_pipeline_hash_ray_tracing_combined_shader(struct anv_ray_tracing_pipeline *p
if (layout != NULL)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
const bool rba = pipeline->base.device->vk.enabled_features.robustBufferAccess;
const bool rba = pipeline->base.device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
_mesa_sha1_update(&ctx, intersection->shader_sha1, sizeof(intersection->shader_sha1));
@ -867,15 +867,13 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
NIR_PASS_V(nir, anv_nir_apply_pipeline_layout,
pdevice, pipeline->device->vk.enabled_features.robustBufferAccess,
pdevice, pipeline->device->robust_buffer_access,
layout, &stage->bind_map);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ubo,
anv_nir_ubo_addr_format(pdevice,
pipeline->device->vk.enabled_features.robustBufferAccess));
anv_nir_ubo_addr_format(pdevice, pipeline->device->robust_buffer_access));
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ssbo,
anv_nir_ssbo_addr_format(pdevice,
pipeline->device->vk.enabled_features.robustBufferAccess));
anv_nir_ssbo_addr_format(pdevice, pipeline->device->robust_buffer_access));
/* First run copy-prop to get rid of all of the vec() that address
* calculations often create and then constant-fold so that, when we
@ -916,7 +914,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
}
NIR_PASS_V(nir, anv_nir_compute_push_layout,
pdevice, pipeline->device->vk.enabled_features.robustBufferAccess,
pdevice, pipeline->device->robust_buffer_access,
prog_data, &stage->bind_map, mem_ctx);
if (gl_shader_stage_uses_workgroup(nir->info.stage)) {
@ -1474,40 +1472,40 @@ anv_graphics_pipeline_init_keys(struct anv_graphics_pipeline *pipeline,
switch (stages[s].stage) {
case MESA_SHADER_VERTEX:
populate_vs_prog_key(device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
&stages[s].key.vs);
break;
case MESA_SHADER_TESS_CTRL:
populate_tcs_prog_key(device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
state->ts->patch_control_points,
&stages[s].key.tcs);
break;
case MESA_SHADER_TESS_EVAL:
populate_tes_prog_key(device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
&stages[s].key.tes);
break;
case MESA_SHADER_GEOMETRY:
populate_gs_prog_key(device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
&stages[s].key.gs);
break;
case MESA_SHADER_FRAGMENT: {
populate_wm_prog_key(pipeline,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
state->dynamic, state->ms, state->fsr, state->rp,
&stages[s].key.wm);
break;
}
case MESA_SHADER_TASK:
populate_task_prog_key(device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
&stages[s].key.task);
break;
case MESA_SHADER_MESH:
populate_mesh_prog_key(device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
&stages[s].key.mesh);
break;
default:
@ -2012,7 +2010,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
struct anv_shader_bin *bin = NULL;
populate_cs_prog_key(device, device->vk.enabled_features.robustBufferAccess, &stage.key.cs);
populate_cs_prog_key(device, device->robust_buffer_access, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
@ -2610,7 +2608,7 @@ anv_pipeline_init_ray_tracing_stages(struct anv_ray_tracing_pipeline *pipeline,
};
populate_bs_prog_key(pipeline->base.device,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
pipeline->base.device->robust_buffer_access,
ray_flags,
&stages[i].key.bs);

View file

@ -1206,6 +1206,8 @@ struct anv_device {
*/
uint32_t empty_vs_input[2];
bool robust_buffer_access;
/** Shadow ray query BO
*
* The ray_query_bo only holds the current ray being traced. When using

View file

@ -2759,7 +2759,7 @@ cmd_buffer_flush_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer,
#endif
/* Compute robust pushed register access mask for each stage. */
if (cmd_buffer->device->vk.enabled_features.robustBufferAccess) {
if (cmd_buffer->device->robust_buffer_access) {
anv_foreach_stage(stage, dirty_stages) {
if (!anv_pipeline_has_stage(pipeline, stage))
continue;