panfrost: Handle pre-Valhall images and texel buffers in lower_res_indices

There's no point in having these as separate passes that live in the
compiler.  We already have lower_res_indices(), which is panfrost's
equivalent to panvk's descriptor lowering.  We can just do it there.

Reviewed-by: Ryan Mckeever <ryan.mckeever@collabora.com>
Reviewed-by: Lars-Ivar Hesselberg Simonsen <lars-ivar.simonsen@arm.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/41320>
This commit is contained in:
Faith Ekstrand 2026-05-01 14:13:34 -04:00 committed by Marge Bot
parent d0bdd18212
commit dd2d30656d
6 changed files with 125 additions and 140 deletions

View file

@ -454,8 +454,7 @@ bool panfrost_nir_remove_fragcolor_stores(nir_shader *s, unsigned nr_cbufs);
bool panfrost_nir_lower_sysvals(nir_shader *s, unsigned arch,
struct panfrost_sysvals *sysvals);
bool panfrost_nir_lower_res_indices(nir_shader *shader,
struct pan_compile_inputs *inputs);
bool panfrost_nir_lower_res_indices(nir_shader *shader, uint64_t gpu_id);
bool panfrost_nir_lower_pls(nir_shader *shader,
struct panfrost_screen *screen);

View file

@ -9,6 +9,118 @@
#include "pan_shader.h"
#include "pan_nir.h"
static unsigned
mid_image_offset(nir_shader *nir)
{
/* Bifrost and Midgard shaders get passed images through the vertex
* attribute descriptor array. For vertex shaders, we need to add an
* offset to all image intrinsics so they point to the right attribute.
*/
if (nir->info.stage == MESA_SHADER_VERTEX)
return util_bitcount64(nir->info.inputs_read);
else
return 0;
}
static unsigned
mid_texel_buffer_offset(nir_shader *nir, uint64_t gpu_id)
{
/* Bifrost needs to use attributes to access texel buffers. We place these
* after images, which are also accessed using attributes.
*/
assert(pan_arch(gpu_id) <= 7);
if (pan_arch(gpu_id) >= 6)
return mid_image_offset(nir) + BITSET_LAST_BIT(nir->info.images_used);
else
return 0;
}
static bool
mid_lower_tex(nir_builder *b, nir_tex_instr *tex, uint64_t gpu_id)
{
if (tex->sampler_dim != GLSL_SAMPLER_DIM_BUF)
return false;
/* We don't adjust the index for queries because those go through the
* sysvals table and it's simpler if they stay the original texture index.
*
* TODO: Fix this once we have better query code.
*/
if (tex->op != nir_texop_txf)
return false;
unsigned tex_offset = mid_texel_buffer_offset(b->shader, gpu_id);
if (tex_offset == 0)
return false;
tex->texture_index += tex_offset;
return true;
}
static bool
mid_lower_image_intrin(nir_builder *b, nir_intrinsic_instr *intrin)
{
unsigned image_offset = mid_image_offset(b->shader);
if (image_offset == 0)
return false;
b->cursor = nir_before_instr(&intrin->instr);
nir_src *tex_handle = &intrin->src[0];
nir_def *new_handle = nir_iadd_imm(b, tex_handle->ssa, image_offset);
nir_src_rewrite(tex_handle, new_handle);
return true;
}
static bool
mid_lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
{
switch (intrin->intrinsic) {
case nir_intrinsic_image_load:
case nir_intrinsic_image_store:
case nir_intrinsic_image_atomic:
case nir_intrinsic_image_atomic_swap:
return mid_lower_image_intrin(b, intrin);
case nir_intrinsic_image_size:
case nir_intrinsic_image_samples:
/* We don't adjust the index for queries because those go through the
* sysvals table and it's simpler if they stay the original texture
* index.
*
* TODO: Fix this once we have better query code.
*/
return false;
default:
return false;
}
}
static bool
mid_lower_instr(nir_builder *b, nir_instr *instr, void *data)
{
uint64_t gpu_id = *(uint64_t *)data;
switch (instr->type) {
case nir_instr_type_tex:
return mid_lower_tex(b, nir_instr_as_tex(instr), gpu_id);
case nir_instr_type_intrinsic:
return mid_lower_intrinsic(b, nir_instr_as_intrinsic(instr));
default:
return false;
}
}
static bool
mid_lower_res_indices(nir_shader *shader, uint64_t gpu_id)
{
return nir_shader_instructions_pass(shader, mid_lower_instr,
nir_metadata_control_flow,
&gpu_id);
}
static bool
va_lower_tex(nir_builder *b, nir_tex_instr *tex)
{
@ -140,17 +252,18 @@ va_lower_instr(nir_builder *b, nir_instr *instr, void *data)
}
}
bool
panfrost_nir_lower_res_indices(nir_shader *shader,
struct pan_compile_inputs *inputs)
static bool
va_lower_res_indices(nir_shader *shader)
{
/**
* Starting with Valhall, we are required to encode table indices by the
* compiler ABI.
*/
if (pan_arch(inputs->gpu_id) < 9)
return false;
return nir_shader_instructions_pass(shader, va_lower_instr,
nir_metadata_control_flow, NULL);
}
bool
panfrost_nir_lower_res_indices(nir_shader *shader, uint64_t gpu_id)
{
if (pan_arch(gpu_id) >= 9)
return va_lower_res_indices(shader);
else
return mid_lower_res_indices(shader, gpu_id);
}

View file

@ -184,7 +184,7 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
}
/* Lower resource indices */
NIR_PASS(_, s, panfrost_nir_lower_res_indices, &inputs);
NIR_PASS(_, s, panfrost_nir_lower_res_indices, inputs.gpu_id);
pan_postprocess_nir(s, &inputs, &out->info);
@ -554,22 +554,6 @@ panfrost_create_shader_state(struct pipe_context *pctx,
so->noperspective_varyings =
pan_nir_collect_noperspective_varyings_fs(nir);
unsigned attrib_offset = 0;
if (nir->info.stage == MESA_SHADER_VERTEX && dev->arch <= 7) {
/* Vertex shaders get passed images through the vertex attribute
* descriptor array. We need to add an offset to all image intrinsics so
* they point to the right attribute.
*/
attrib_offset += util_bitcount64(nir->info.inputs_read);
NIR_PASS(_, nir, pan_nir_lower_image_index, attrib_offset);
}
if (dev->arch >= 6 && dev->arch <= 7) {
/* Bifrost needs to use attributes to access texel buffers. We place these
* after images, which are also accessed using attributes. */
attrib_offset += BITSET_LAST_BIT(nir->info.images_used);
NIR_PASS(_, nir, pan_nir_lower_texel_buffer_fetch_index, attrib_offset);
}
/* If this shader uses transform feedback, compile the transform
* feedback program. This is a special shader variant.
*/

View file

@ -12,12 +12,10 @@ libpanfrost_compiler_files = files(
'pan_nir_lower_fs_outputs.c',
'pan_nir_lower_helper_invocation.c',
'pan_nir_lower_image.c',
'pan_nir_lower_image_index.c',
'pan_nir_lower_image_ms.c',
'pan_nir_lower_noperspective.c',
'pan_nir_lower_sample_position.c',
'pan_nir_lower_tex.c',
'pan_nir_lower_texel_buffer_index.c',
'pan_nir_lower_vertex_id.c',
'pan_nir_lower_vs_outputs.c',
'pan_nir_lower_xfb.c',

View file

@ -1,55 +0,0 @@
/*
* Copyright (C) 2024 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#include "compiler/nir/nir_builder.h"
#include "pan_nir.h"
/* Vertex shader gets passed image attribute descriptors through the
* vertex attribute descriptor array. This forces us to apply an offset
* to all image access to get the actual attribute offset.
*
* The gallium driver emits the vertex attributes on each draw, and puts
* image attributes right after the vertex attributes, which implies passing
* vs_img_attrib_offset = util_bitcount64(nir->info.inputs_read).
*
* The Vulkan driver, on the other hand, uses
* VkVertexInputAttributeDescription to build a table of attributes passed
* to the shader. While there's no reason for the app to define more
* attributes than it actually uses in the vertex shader, it doesn't seem
* to be disallowed either. Not to mention that vkCmdSetVertexInputEXT()
* allows one to dynamically change the vertex input configuration, and
* possibly pass more attributes than referenced by the vertex shader bound to
* the command buffer at draw time. Of course, we could carry this information
* at the pipeline level, and re-emit the attribute array, but emitting only
* when the vertex input configuration is flagged dirty is simpler.
* In order for this to work, we use a fixed image attribute offset.
*/
static bool
lower_image_intr(struct nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (intr->intrinsic != nir_intrinsic_image_load &&
intr->intrinsic != nir_intrinsic_image_store)
return false;
unsigned img_attr_offset = *(unsigned *)data;
nir_def *index = intr->src[0].ssa;
b->cursor = nir_before_instr(&intr->instr);
index = nir_iadd_imm(b, index, img_attr_offset);
nir_src_rewrite(&intr->src[0], index);
return true;
}
bool
pan_nir_lower_image_index(nir_shader *shader, unsigned vs_img_attrib_offset)
{
if (shader->info.stage != MESA_SHADER_VERTEX)
return false;
return nir_shader_intrinsics_pass(
shader, lower_image_intr,
nir_metadata_control_flow, &vs_img_attrib_offset);
}

View file

@ -1,54 +0,0 @@
/*
* Copyright (C) 2025 Arm, Ltd.
*
* Derived from pan_nir_lower_image_index.c which is:
* Copyright (C) 2024 Collabora, Ltd.
*
* SPDX-License-Identifier: MIT
*/
#include "compiler/nir/nir_builder.h"
#include "pan_nir.h"
static bool
lower_texel_buffer_fetch_intr(struct nir_builder *b, nir_tex_instr *tex,
void *data)
{
if (tex->op != nir_texop_txf || tex->sampler_dim != GLSL_SAMPLER_DIM_BUF)
return false;
b->cursor = nir_before_instr(&tex->instr);
nir_def *index = NULL;
unsigned index_src;
for (index_src = 0; index_src < tex->num_srcs; ++index_src) {
switch (tex->src[index_src].src_type) {
case nir_tex_src_texture_offset:
/* This should always be 0 as lower_index_to_offset is expected to be
* set */
assert(tex->texture_index == 0);
index = tex->src[index_src].src.ssa;
break;
default:
continue;
}
}
unsigned attr_offset = *(unsigned *)data;
if (!index)
tex->texture_index += attr_offset;
else {
b->cursor = nir_before_instr(&tex->instr);
index = nir_iadd_imm(b, index, attr_offset);
nir_src_rewrite(&tex->src[index_src].src, index);
}
return true;
}
bool
pan_nir_lower_texel_buffer_fetch_index(nir_shader *shader,
unsigned attrib_offset)
{
return nir_shader_tex_pass(shader, lower_texel_buffer_fetch_intr,
nir_metadata_control_flow, &attrib_offset);
}