hasvk: use Vulkan runtime's robust buffer access

Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21338>
This commit is contained in:
Constantine Shablya 2023-02-15 18:35:37 +02:00 committed by Marge Bot
parent 5053527806
commit bd848ac92d
4 changed files with 16 additions and 41 deletions

View file

@ -2685,28 +2685,6 @@ VkResult anv_CreateDevice(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
/* Check enabled features */
bool robust_buffer_access = false;
if (pCreateInfo->pEnabledFeatures) {
if (pCreateInfo->pEnabledFeatures->robustBufferAccess)
robust_buffer_access = true;
}
vk_foreach_struct_const(ext, pCreateInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
const VkPhysicalDeviceFeatures2 *features = (const void *)ext;
if (features->features.robustBufferAccess)
robust_buffer_access = true;
break;
}
default:
/* Don't warn */
break;
}
}
/* Check requested queues and fail if we are requested to create any
* queues with flags we don't support.
*/
@ -2845,8 +2823,6 @@ VkResult anv_CreateDevice(
*/
device->can_chain_batches = device->info->ver >= 8;
device->robust_buffer_access = robust_buffer_access;
if (pthread_mutex_init(&device->mutex, NULL) != 0) {
result = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
goto fail_vmas;
@ -3908,7 +3884,7 @@ anv_get_buffer_memory_requirements(struct anv_device *device,
* This would ensure that not internal padding would be needed for
* 16-bit types.
*/
if (device->robust_buffer_access &&
if (device->vk.enabled_features.robustBufferAccess &&
(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
pMemoryRequirements->memoryRequirements.size = align64(size, 4);

View file

@ -115,9 +115,9 @@ anv_shader_stage_to_nir(struct anv_device *device,
.workgroup_memory_explicit_layout = true,
},
.ubo_addr_format =
anv_nir_ubo_addr_format(pdevice, device->robust_buffer_access),
anv_nir_ubo_addr_format(pdevice, device->vk.enabled_features.robustBufferAccess),
.ssbo_addr_format =
anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access),
anv_nir_ssbo_addr_format(pdevice, device->vk.enabled_features.robustBufferAccess),
.phys_ssbo_addr_format = nir_address_format_64bit_global,
.push_const_addr_format = nir_address_format_logical,
@ -454,7 +454,7 @@ anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
const bool rba = pipeline->base.device->robust_buffer_access;
const bool rba = pipeline->base.device->vk.enabled_features.robustBufferAccess;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
for (uint32_t s = 0; s < ANV_GRAPHICS_SHADER_STAGE_COUNT; s++) {
@ -482,7 +482,7 @@ anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
const struct anv_device *device = pipeline->base.device;
const bool rba = device->robust_buffer_access;
const bool rba = device->vk.enabled_features.robustBufferAccess;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
const bool afs = device->physical->instance->assume_full_subgroups;
@ -577,15 +577,15 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
NIR_PASS_V(nir, anv_nir_apply_pipeline_layout,
pdevice, pipeline->device->robust_buffer_access,
pdevice, pipeline->device->vk.enabled_features.robustBufferAccess,
layout, &stage->bind_map);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ubo,
anv_nir_ubo_addr_format(pdevice,
pipeline->device->robust_buffer_access));
pipeline->device->vk.enabled_features.robustBufferAccess));
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ssbo,
anv_nir_ssbo_addr_format(pdevice,
pipeline->device->robust_buffer_access));
pipeline->device->vk.enabled_features.robustBufferAccess));
/* First run copy-prop to get rid of all of the vec() that address
* calculations often create and then constant-fold so that, when we
@ -618,7 +618,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
}
NIR_PASS_V(nir, anv_nir_compute_push_layout,
pdevice, pipeline->device->robust_buffer_access,
pdevice, pipeline->device->vk.enabled_features.robustBufferAccess,
prog_data, &stage->bind_map, mem_ctx);
if (gl_shader_stage_uses_workgroup(nir->info.stage)) {
@ -1097,28 +1097,28 @@ anv_graphics_pipeline_init_keys(struct anv_graphics_pipeline *pipeline,
switch (stages[s].stage) {
case MESA_SHADER_VERTEX:
populate_vs_prog_key(device,
pipeline->base.device->robust_buffer_access,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
&stages[s].key.vs);
break;
case MESA_SHADER_TESS_CTRL:
populate_tcs_prog_key(device,
pipeline->base.device->robust_buffer_access,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
state->ts->patch_control_points,
&stages[s].key.tcs);
break;
case MESA_SHADER_TESS_EVAL:
populate_tes_prog_key(device,
pipeline->base.device->robust_buffer_access,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
&stages[s].key.tes);
break;
case MESA_SHADER_GEOMETRY:
populate_gs_prog_key(device,
pipeline->base.device->robust_buffer_access,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
&stages[s].key.gs);
break;
case MESA_SHADER_FRAGMENT: {
populate_wm_prog_key(pipeline,
pipeline->base.device->robust_buffer_access,
pipeline->base.device->vk.enabled_features.robustBufferAccess,
state->dynamic, state->ms, state->rp,
&stages[s].key.wm);
break;
@ -1497,7 +1497,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
struct anv_shader_bin *bin = NULL;
populate_cs_prog_key(device, device->robust_buffer_access, &stage.key.cs);
populate_cs_prog_key(device, device->vk.enabled_features.robustBufferAccess, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);

View file

@ -1024,7 +1024,6 @@ struct anv_device {
uint32_t context_id;
int fd;
bool can_chain_batches;
bool robust_buffer_access;
pthread_mutex_t vma_mutex;
struct util_vma_heap vma_lo;

View file

@ -2772,7 +2772,7 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
/* Compute robust pushed register access mask for each stage. */
if (cmd_buffer->device->robust_buffer_access) {
if (cmd_buffer->device->vk.enabled_features.robustBufferAccess) {
anv_foreach_stage(stage, dirty_stages) {
if (!anv_pipeline_has_stage(pipeline, stage))
continue;