pvr, pco: add primitive support for VK_KHR_robustness2.nullDescriptor

Signed-off-by: Simon Perretta <simon.perretta@imgtec.com>
Acked-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/37512>
This commit is contained in:
Simon Perretta 2025-09-05 15:13:32 +01:00 committed by Marge Bot
parent 2a7ebf2ae0
commit a1acd6f8d1
13 changed files with 334 additions and 26 deletions

View file

@ -654,7 +654,7 @@ Khronos extensions that are not part of any Vulkan version:
VK_EXT_provoking_vertex DONE (anv, hasvk, hk, lvp, nvk, panvk, pvr, radv, tu, v3dv, vn)
VK_EXT_queue_family_foreign DONE (anv, hasvk, hk, nvk, lvp, panvk, pvr, radv, tu, v3dv, vn)
VK_EXT_rasterization_order_attachment_access DONE (lvp, tu, vn)
VK_EXT_robustness2 DONE (anv, hasvk, hk, lvp, nvk, panvk/v10+, radv, tu, vn)
VK_EXT_robustness2 DONE (anv, hasvk, hk, lvp, nvk, panvk/v10+, pvr, radv, tu, vn)
VK_EXT_sample_locations DONE (anv, hasvk, hk, nvk, radv, tu/a650+, vn, lavapipe)
VK_EXT_shader_atomic_float DONE (anv, hasvk, lvp, radv, tu, vn)
VK_EXT_shader_atomic_float2 DONE (anv, lvp, radv, vn)

View file

@ -2798,3 +2798,6 @@ intrinsic("load_fs_coeffs_pco", dest_comp=3, flags=[CAN_ELIMINATE, CAN_REORDER],
# load_packed_sample_location_pco(dword_index)
intrinsic("load_packed_sample_location_pco", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER], bit_sizes=[32])
# src[] = { buffer_index/deref }.
intrinsic("is_null_descriptor", src_comp=[-1], dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER], bit_sizes=[1])

View file

@ -1043,6 +1043,8 @@ nir_get_io_index_src_number(const nir_intrinsic_instr *instr)
switch (instr->intrinsic) {
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_get_ubo_size:
case nir_intrinsic_get_ssbo_size:
case nir_intrinsic_load_input_vertex:
case nir_intrinsic_load_per_vertex_input:
case nir_intrinsic_load_per_vertex_output:

View file

@ -18,6 +18,7 @@ libpowervr_compiler_files = files(
'pco_nir.c',
'pco_nir_compute.c',
'pco_nir_io.c',
'pco_nir_lower_null_descriptors.c',
'pco_nir_pvfio.c',
'pco_nir_sync.c',
'pco_nir_tex.c',

View file

@ -223,6 +223,7 @@ typedef struct _pco_common_data {
} uses;
bool robust_buffer_access;
bool null_descriptor;
bool image_2d_view_of_3d;
bool multiview;
} pco_common_data;

View file

@ -1782,6 +1782,20 @@ bool pco_ra(pco_shader *shader);
bool pco_schedule(pco_shader *shader);
bool pco_shrink_vecs(pco_shader *shader);
typedef enum {
pco_nir_lower_null_descriptor_ubo = (1 << 0),
pco_nir_lower_null_descriptor_ssbo = (1 << 1),
pco_nir_lower_null_descriptor_global = (1 << 2),
pco_nir_lower_null_descriptor_texture = (1 << 3),
pco_nir_lower_null_descriptor_image = (1 << 4),
pco_nir_lower_null_descriptor_all = BITFIELD_MASK(5),
} pco_nir_lower_null_descriptor_options;
bool pco_nir_lower_null_descriptors(
nir_shader *shader,
pco_nir_lower_null_descriptor_options options);
/**
* \brief Returns the PCO bits for a bit size.
*

View file

@ -57,6 +57,7 @@ static const nir_shader_compiler_options nir_options = {
.lower_uadd_carry = true,
.lower_uadd_sat = true,
.lower_usub_borrow = true,
.lower_usub_sat = true,
.lower_mul_2x32_64 = true,
.compact_arrays = true,
.scalarize_ddx = true,
@ -839,6 +840,13 @@ void pco_lower_nir(pco_ctx *ctx, nir_shader *nir, pco_data *data)
if (data->common.robust_buffer_access)
NIR_PASS(_, nir, nir_lower_robust_access, robustness_filter, NULL);
if (data->common.null_descriptor) {
NIR_PASS(_,
nir,
pco_nir_lower_null_descriptors,
pco_nir_lower_null_descriptor_all);
}
NIR_PASS(_, nir, pco_nir_lower_vk, data);
NIR_PASS(_, nir, pco_nir_lower_io);

View file

@ -0,0 +1,145 @@
/*
* Copyright © 2025 Imagination Technologies Ltd.
* Copyright (C) 2020-2021 Collabora, Ltd.
* Copyright © 2020 Valve Corporation
*
* SPDX-License-Identifier: MIT
*/
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
#include "pco_internal.h"
static nir_def *get_is_null(nir_builder *b,
nir_instr *instr,
nir_def **def,
pco_nir_lower_null_descriptor_options options)
{
bool is_deref = false;
*def = NULL;
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_image_deref_size:
case nir_intrinsic_image_deref_levels:
case nir_intrinsic_image_deref_samples:
case nir_intrinsic_image_deref_load:
case nir_intrinsic_image_deref_store:
case nir_intrinsic_image_deref_atomic:
case nir_intrinsic_image_deref_atomic_swap:
if (!(options & pco_nir_lower_null_descriptor_image))
return NULL;
is_deref = true;
break;
case nir_intrinsic_load_global:
case nir_intrinsic_load_global_2x32:
case nir_intrinsic_load_global_constant:
case nir_intrinsic_global_atomic:
case nir_intrinsic_global_atomic_2x32:
case nir_intrinsic_global_atomic_swap:
case nir_intrinsic_global_atomic_swap_2x32:
case nir_intrinsic_store_global:
case nir_intrinsic_store_global_2x32:
if (!(options & pco_nir_lower_null_descriptor_global))
return NULL;
break;
case nir_intrinsic_get_ubo_size:
case nir_intrinsic_load_ubo:
if (!(options & pco_nir_lower_null_descriptor_ubo))
return NULL;
break;
case nir_intrinsic_get_ssbo_size:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_ssbo_atomic:
case nir_intrinsic_ssbo_atomic_swap:
case nir_intrinsic_store_ssbo:
if (!(options & pco_nir_lower_null_descriptor_ssbo))
return NULL;
break;
default:
return NULL;
}
nir_src *index = nir_get_io_index_src(intr);
assert(index || is_deref);
if (nir_intrinsic_infos[intr->intrinsic].has_dest)
*def = &intr->def;
return nir_is_null_descriptor(b,
is_deref ? intr->src[0].ssa : index->ssa);
}
if (instr->type == nir_instr_type_tex) {
if (!(options & pco_nir_lower_null_descriptor_texture))
return NULL;
nir_tex_instr *tex = nir_instr_as_tex(instr);
nir_def *deref_def = nir_get_tex_src(tex, nir_tex_src_texture_deref);
if (!deref_def)
return NULL;
*def = &tex->def;
return nir_is_null_descriptor(b, deref_def);
}
return NULL;
}
static bool lower(nir_builder *b, nir_instr *instr, void *data)
{
pco_nir_lower_null_descriptor_options *options = data;
b->cursor = nir_before_instr(instr);
nir_def *def;
nir_def *is_null = get_is_null(b, instr, &def, *options);
if (!is_null)
return false;
nir_def *zero = NULL;
nir_if *nif = nir_push_if(b, nir_inot(b, is_null));
nir_instr_remove(instr);
nir_builder_instr_insert(b, instr);
if (def) {
nir_push_else(b, nif);
zero = nir_imm_zero(b, def->num_components, def->bit_size);
}
nir_pop_if(b, nif);
if (def) {
nir_def *phi = nir_if_phi(b, def, zero);
/* We can't use nir_def_rewrite_uses_after on phis, so use the global
* version and fixup the phi manually
*/
nir_def_rewrite_uses(def, phi);
nir_instr *phi_instr = phi->parent_instr;
nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
nir_phi_src *phi_src =
nir_phi_get_src_from_block(phi_as_phi, instr->block);
nir_src_rewrite(&phi_src->src, def);
}
return true;
}
bool pco_nir_lower_null_descriptors(
nir_shader *shader,
pco_nir_lower_null_descriptor_options options)
{
return nir_shader_instructions_pass(shader,
lower,
nir_metadata_none,
&options);
}

View file

@ -220,6 +220,31 @@ lower_image_derefs(nir_builder *b, nir_intrinsic_instr *intr, pco_data *data)
return NIR_LOWER_INSTR_PROGRESS;
}
static nir_def *lower_is_null_descriptor(nir_builder *b,
nir_intrinsic_instr *intr)
{
nir_src *deref_src = &intr->src[0];
nir_deref_instr *deref = nir_src_as_deref(*deref_src);
/* Will be taken care of by lower_load_vulkan_descriptor. */
if (!deref)
return NULL;
b->cursor = nir_before_instr(&intr->instr);
nir_variable *var = nir_deref_instr_get_variable(deref);
assert(var);
unsigned desc_set = var->data.descriptor_set;
unsigned binding = var->data.binding;
nir_def *elem = array_elem_from_deref(b, deref);
uint32_t desc_set_binding = pco_pack_desc(desc_set, binding);
nir_def *index = nir_vec2(b, nir_imm_int(b, desc_set_binding), elem);
nir_src_rewrite(deref_src, index);
return NIR_LOWER_INSTR_PROGRESS;
}
/**
* \brief Lowers a Vulkan-related instruction.
*
@ -247,6 +272,9 @@ static nir_def *lower_vk(nir_builder *b, nir_instr *instr, void *cb_data)
case nir_intrinsic_image_deref_size:
return lower_image_derefs(b, intr, data);
case nir_intrinsic_is_null_descriptor:
return lower_is_null_descriptor(b, intr);
default:
break;
}
@ -281,6 +309,7 @@ static bool is_vk(const nir_instr *instr, UNUSED const void *cb_data)
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_vulkan_descriptor:
case nir_intrinsic_is_null_descriptor:
case nir_intrinsic_image_deref_load:
case nir_intrinsic_image_deref_store:
case nir_intrinsic_image_deref_atomic:

View file

@ -724,6 +724,7 @@ static unsigned fetch_resource_base_reg(const pco_common_data *common,
unsigned desc_set,
unsigned binding,
unsigned elem,
unsigned *stride,
bool *is_img_smp)
{
const pco_range *range;
@ -755,6 +756,9 @@ static unsigned fetch_resource_base_reg(const pco_common_data *common,
*is_img_smp = binding_data->is_img_smp;
}
if (stride)
*stride = range->stride;
unsigned reg_offset = elem * range->stride;
assert(reg_offset < range->count);
@ -765,13 +769,19 @@ static unsigned fetch_resource_base_reg(const pco_common_data *common,
static unsigned fetch_resource_base_reg_packed(const pco_common_data *common,
uint32_t packed_desc,
unsigned elem,
unsigned *stride,
bool *is_img_smp)
{
unsigned desc_set;
unsigned binding;
pco_unpack_desc(packed_desc, &desc_set, &binding);
return fetch_resource_base_reg(common, desc_set, binding, elem, is_img_smp);
return fetch_resource_base_reg(common,
desc_set,
binding,
elem,
stride,
is_img_smp);
}
/**
@ -1119,7 +1129,7 @@ static pco_instr *trans_load_buffer(trans_ctx *tctx,
uint32_t packed_desc = nir_src_comp_as_uint(intr->src[0], 0);
unsigned elem = nir_src_comp_as_uint(intr->src[0], 1);
unsigned sh_index =
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL);
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL, NULL);
pco_ref base_addr[2];
pco_ref_hwreg_addr_comps(sh_index, PCO_REG_CLASS_SHARED, base_addr);
@ -1175,7 +1185,7 @@ trans_get_buffer_size(trans_ctx *tctx, nir_intrinsic_instr *intr, pco_ref dest)
uint32_t packed_desc = nir_src_comp_as_uint(intr->src[0], 0);
unsigned elem = nir_src_comp_as_uint(intr->src[0], 1);
unsigned sh_index =
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL);
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL, NULL);
pco_ref size_reg = pco_ref_hwreg(sh_index, PCO_REG_CLASS_SHARED);
size_reg = pco_ref_offset(size_reg, 2);
@ -1196,7 +1206,7 @@ static pco_instr *trans_store_buffer(trans_ctx *tctx,
uint32_t packed_desc = nir_src_comp_as_uint(intr->src[1], 0);
unsigned elem = nir_src_comp_as_uint(intr->src[1], 1);
unsigned sh_index =
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL);
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL, NULL);
pco_ref base_addr[2];
pco_ref_hwreg_addr_comps(sh_index, PCO_REG_CLASS_SHARED, base_addr);
@ -1275,7 +1285,7 @@ static pco_instr *trans_atomic_buffer(trans_ctx *tctx,
uint32_t packed_desc = nir_src_comp_as_uint(intr->src[0], 0);
unsigned elem = nir_src_comp_as_uint(intr->src[0], 1);
unsigned sh_index =
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL);
fetch_resource_base_reg_packed(common, packed_desc, elem, NULL, NULL);
pco_ref base_addr[2];
pco_ref_hwreg_addr_comps(sh_index, PCO_REG_CLASS_SHARED, base_addr);
@ -1554,8 +1564,12 @@ static pco_instr *lower_load_tex_smp_state(trans_ctx *tctx,
const pco_common_data *common = &tctx->shader->data.common;
bool is_img_smp;
unsigned sh_index =
fetch_resource_base_reg(common, desc_set, binding, elem, &is_img_smp);
unsigned sh_index = fetch_resource_base_reg(common,
desc_set,
binding,
elem,
NULL,
&is_img_smp);
pco_ref state_words =
pco_ref_hwreg_vec(sh_index, PCO_REG_CLASS_SHARED, chans);
@ -1591,8 +1605,12 @@ static pco_instr *lower_load_tex_smp_meta(trans_ctx *tctx,
const pco_common_data *common = &tctx->shader->data.common;
bool is_img_smp;
unsigned sh_index =
fetch_resource_base_reg(common, desc_set, binding, elem, &is_img_smp);
unsigned sh_index = fetch_resource_base_reg(common,
desc_set,
binding,
elem,
NULL,
&is_img_smp);
pco_ref state_words =
pco_ref_hwreg_vec(sh_index, PCO_REG_CLASS_SHARED, chans);
@ -2003,6 +2021,56 @@ static pco_instr *trans_intr(trans_ctx *tctx, nir_intrinsic_instr *intr)
instr = trans_global_atomic_buffer(tctx, intr, dest, src[0]);
break;
case nir_intrinsic_is_null_descriptor: {
const pco_common_data *common = &tctx->shader->data.common;
uint32_t packed_desc = nir_src_comp_as_uint(intr->src[0], 0);
unsigned elem = nir_src_comp_as_uint(intr->src[0], 1);
unsigned stride;
bool is_img_smp;
unsigned sh_index = fetch_resource_base_reg_packed(common,
packed_desc,
elem,
&stride,
&is_img_smp);
if (is_img_smp)
stride = ROGUE_NUM_TEXSTATE_DWORDS;
pco_ref all_words_zero;
for (unsigned u = 0; u < stride; ++u) {
pco_ref word = pco_ref_hwreg(sh_index + u, PCO_REG_CLASS_SHARED);
pco_ref word_is_zero =
pco_ref_new_ssa(tctx->func, pco_ref_get_bits(dest), 1);
pco_tstz(&tctx->b,
word_is_zero,
pco_ref_null(),
word,
.tst_type_main = PCO_TST_TYPE_MAIN_U32);
if (!u) {
all_words_zero = word_is_zero;
continue;
}
pco_ref _all_words_zero =
pco_ref_new_ssa(tctx->func, pco_ref_get_bits(dest), 1);
pco_logical(&tctx->b,
_all_words_zero,
pco_ref_null(),
all_words_zero,
pco_ref_null(),
word_is_zero,
.logiop = PCO_LOGIOP_AND);
all_words_zero = _all_words_zero;
}
instr = pco_mov(&tctx->b, dest, all_words_zero);
break;
}
case nir_intrinsic_load_scratch:
instr = trans_scratch(tctx, dest, src[0], src[1]);
break;

View file

@ -467,8 +467,8 @@ pvr_AllocateDescriptorSets(VkDevice _device,
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
VK_FROM_HANDLE(pvr_descriptor_set_layout,
layout,
pAllocateInfo->pSetLayouts[i]);
layout,
pAllocateInfo->pSetLayouts[i]);
struct pvr_descriptor_set *set;
result = pvr_descriptor_set_create(device, pool, layout, &set);
@ -519,10 +519,16 @@ write_buffer(const struct pvr_descriptor_set *set,
const struct pvr_descriptor_set_layout_binding *binding,
uint32_t elem)
{
VK_FROM_HANDLE(pvr_buffer, buffer, buffer_info->buffer);
const unsigned desc_offset = binding->offset + (elem * binding->stride);
void *desc_mapping = (uint8_t *)set->mapping + desc_offset;
if (buffer_info->buffer == VK_NULL_HANDLE) {
memset(desc_mapping, 0, sizeof(struct pvr_buffer_descriptor));
return;
}
VK_FROM_HANDLE(pvr_buffer, buffer, buffer_info->buffer);
const pvr_dev_addr_t buffer_addr =
PVR_DEV_ADDR_OFFSET(buffer->dev_addr, buffer_info->offset);
@ -543,12 +549,18 @@ write_dynamic_buffer(struct pvr_descriptor_set *set,
const struct pvr_descriptor_set_layout_binding *binding,
uint32_t elem)
{
VK_FROM_HANDLE(pvr_buffer, buffer, buffer_info->buffer);
assert(binding->dynamic_buffer_idx != ~0);
const unsigned desc_offset = binding->dynamic_buffer_idx + elem;
struct pvr_buffer_descriptor *desc_mapping =
&set->dynamic_buffers[desc_offset];
if (buffer_info->buffer == VK_NULL_HANDLE) {
memset(desc_mapping, 0, sizeof(*desc_mapping));
return;
}
VK_FROM_HANDLE(pvr_buffer, buffer, buffer_info->buffer);
const pvr_dev_addr_t buffer_addr =
PVR_DEV_ADDR_OFFSET(buffer->dev_addr, buffer_info->offset);
@ -587,20 +599,23 @@ write_image_sampler(const struct pvr_descriptor_set *set,
const struct pvr_descriptor_set_layout_binding *binding,
uint32_t elem)
{
VK_FROM_HANDLE(pvr_sampler, info_sampler, image_info->sampler);
VK_FROM_HANDLE(pvr_image_view, image_view, image_info->imageView);
const unsigned desc_offset = binding->offset + (elem * binding->stride);
void *desc_mapping = (uint8_t *)set->mapping + desc_offset;
struct pvr_combined_image_sampler_descriptor image_sampler_desc = { 0 };
VK_FROM_HANDLE(pvr_sampler, info_sampler, image_info->sampler);
struct pvr_sampler *sampler = binding->immutable_sampler_count
? binding->immutable_samplers[elem]
: info_sampler;
struct pvr_combined_image_sampler_descriptor image_sampler_desc = {
.image = image_view->image_state[PVR_TEXTURE_STATE_SAMPLE],
.sampler = sampler->descriptor,
};
image_sampler_desc.sampler = sampler->descriptor;
if (image_info->imageView != VK_NULL_HANDLE) {
VK_FROM_HANDLE(pvr_image_view, image_view, image_info->imageView);
image_sampler_desc.image =
image_view->image_state[PVR_TEXTURE_STATE_SAMPLE];
}
memcpy(desc_mapping, &image_sampler_desc, sizeof(image_sampler_desc));
}
@ -647,14 +662,19 @@ write_storage_image(const struct pvr_descriptor_set *set,
uint32_t elem,
const struct pvr_device_info *dev_info)
{
const unsigned desc_offset = binding->offset + (elem * binding->stride);
void *desc_mapping = (uint8_t *)set->mapping + desc_offset;
if (image_info->imageView == VK_NULL_HANDLE) {
memset(desc_mapping, 0, sizeof(struct pvr_image_descriptor));
return;
}
VK_FROM_HANDLE(pvr_image_view, image_view, image_info->imageView);
bool is_cube = image_view->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE ||
image_view->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
const unsigned desc_offset = binding->offset + (elem * binding->stride);
void *desc_mapping = (uint8_t *)set->mapping + desc_offset;
struct pvr_image_descriptor storage_image_desc =
image_view->image_state[is_cube ? PVR_TEXTURE_STATE_STORAGE
: PVR_TEXTURE_STATE_SAMPLE];
@ -680,11 +700,15 @@ write_buffer_view(const struct pvr_descriptor_set *set,
bool is_texel_buffer,
const struct pvr_device_info *dev_info)
{
VK_FROM_HANDLE(pvr_buffer_view, buffer_view, _buffer_view);
const unsigned desc_offset = binding->offset + (elem * binding->stride);
void *desc_mapping = (uint8_t *)set->mapping + desc_offset;
if (_buffer_view == VK_NULL_HANDLE) {
memset(desc_mapping, 0, sizeof(struct pvr_image_descriptor));
return;
}
VK_FROM_HANDLE(pvr_buffer_view, buffer_view, _buffer_view);
struct pvr_image_descriptor buffer_view_state = buffer_view->image_state;
if (is_texel_buffer &&

View file

@ -195,6 +195,7 @@ static void pvr_physical_device_get_supported_extensions(
.KHR_multiview = true,
.KHR_present_id2 = PVR_USE_WSI_PLATFORM,
.KHR_present_wait2 = PVR_USE_WSI_PLATFORM,
.KHR_robustness2 = true,
.KHR_separate_depth_stencil_layouts = true,
.KHR_shader_draw_parameters = true,
.KHR_shader_expect_assume = false,
@ -223,6 +224,7 @@ static void pvr_physical_device_get_supported_extensions(
.EXT_physical_device_drm = true,
.EXT_private_data = true,
.EXT_provoking_vertex = true,
.EXT_robustness2 = true,
.EXT_queue_family_foreign = true,
.EXT_separate_stencil_usage = true,
.EXT_scalar_block_layout = true,
@ -371,6 +373,11 @@ static void pvr_physical_device_get_supported_features(
/* Vulkan 1.2 / VK_KHR_shader_subgroup_extended_types */
.shaderSubgroupExtendedTypes = true,
/* Vulkan 1.1 / VK_KHR_robustness2 */
.robustBufferAccess2 = false,
.robustImageAccess2 = false,
.nullDescriptor = true,
/* Vulkan 1.2 / VK_KHR_uniform_buffer_standard_layout */
.uniformBufferStandardLayout = true,
@ -759,6 +766,10 @@ static bool pvr_physical_device_get_properties(
.provokingVertexModePerPipeline = true,
.transformFeedbackPreservesTriangleFanProvokingVertex = false,
/* Vulkan 1.1 / VK_KHR_robustness2 */
.robustStorageBufferAccessSizeAlignment = PVR_STORAGE_BUFFER_OFFSET_ALIGNMENT,
.robustUniformBufferAccessSizeAlignment = PVR_STORAGE_BUFFER_OFFSET_ALIGNMENT,
/* Vulkan 1.2 / VK_KHR_shader_float_controls */
.denormBehaviorIndependence =
VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY,

View file

@ -2253,6 +2253,8 @@ static void pvr_init_descriptors(pco_data *data,
data->common.robust_buffer_access =
device->vk.enabled_features.robustBufferAccess;
data->common.null_descriptor = device->vk.enabled_features.nullDescriptor;
data->common.image_2d_view_of_3d =
device->vk.enabled_features.image2DViewOf3D;