panvk: implement non uniform indexing except for input attachments

We need to lower everything on bifrost, and everything except nonuniform
texture offsets on valhall. With texture offsets, the hardware supports
nonuniform offsets passed in a staging register, but does not support
passing nonuniform offsets as src1 in the narrow layout. We need to make
sure we check for this if we implement narrow offsetms later.

Input attachments are skipped in this commit because they need
additional changes.

Signed-off-by: Olivia Lee <olivia.lee@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Reviewed-by: Lars-Ivar Hesselberg Simonsen <lars-ivar.simonsen@arm.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35408>
This commit is contained in:
Olivia Lee 2025-06-09 23:20:09 -07:00 committed by Marge Bot
parent e7a439f73b
commit 8e4c6990a1
3 changed files with 32 additions and 7 deletions

View file

@ -4412,7 +4412,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr)
src0 = bi_imm_u32(packed_handle);
/* TODO: narrow offsetms */
/* TODO: narrow offsetms. (only when offsetms is dynamically uniform) */
src1 = bi_zero();
} else {
src0 = sampler;

View file

@ -285,13 +285,13 @@ panvk_per_arch(get_physical_device_features)(
.shaderInputAttachmentArrayDynamicIndexing = true,
.shaderUniformTexelBufferArrayDynamicIndexing = true,
.shaderStorageTexelBufferArrayDynamicIndexing = true,
.shaderUniformBufferArrayNonUniformIndexing = false,
.shaderSampledImageArrayNonUniformIndexing = false,
.shaderStorageBufferArrayNonUniformIndexing = false,
.shaderStorageImageArrayNonUniformIndexing = false,
.shaderUniformBufferArrayNonUniformIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageBufferArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.shaderInputAttachmentArrayNonUniformIndexing = false,
.shaderUniformTexelBufferArrayNonUniformIndexing = false,
.shaderStorageTexelBufferArrayNonUniformIndexing = false,
.shaderUniformTexelBufferArrayNonUniformIndexing = true,
.shaderStorageTexelBufferArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = false,
.descriptorBindingSampledImageUpdateAfterBind = false,
.descriptorBindingStorageImageUpdateAfterBind = false,

View file

@ -822,6 +822,31 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_global,
nir_address_format_64bit_global);
/* nir_lower_non_uniform_access needs to run after lowering UBO and SSBO
* IO. This means we run it after nir_lower_descriptors, which reads the
* array indices, but it's okay because lower_descriptors treats all
* dynamic indices the same. */
enum nir_lower_non_uniform_access_type lower_non_uniform_access_types =
nir_lower_non_uniform_ubo_access |
nir_lower_non_uniform_ssbo_access |
nir_lower_non_uniform_texture_access |
nir_lower_non_uniform_image_access |
nir_lower_non_uniform_get_ssbo_size;
#if PAN_ARCH <= 7
lower_non_uniform_access_types |=
nir_lower_non_uniform_texture_offset_access;
#endif
/* In practice, most shaders do not have non-uniform-qualified accesses
* thus a cheaper and likely to fail check is run first. */
if (nir_has_non_uniform_access(nir, lower_non_uniform_access_types)) {
NIR_PASS(_, nir, nir_opt_non_uniform_access);
struct nir_lower_non_uniform_access_options opts = {
.types = lower_non_uniform_access_types,
};
NIR_PASS(_, nir, nir_lower_non_uniform_access, &opts);
}
#if PAN_ARCH >= 9
NIR_PASS(_, nir, nir_shader_intrinsics_pass, valhall_lower_get_ssbo_size,
nir_metadata_control_flow, NULL);