pan/nir: Lower image queries in NIR on Valhall+

This new pass, pan_nir_lower_image(), will eventually subsume all image
lowering.  For now, though, it only lowers image_size and only on
Valhall.

Reviewed-by: Lorenzo Rossi <lorenzo.rossi@collabora.com>
Reviewed-by: Lars-Ivar Hesselberg Simonsen <lars-ivar.simonsen@arm.com>
Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/41352>
This commit is contained in:
Faith Ekstrand 2026-04-30 16:24:39 -04:00 committed by Marge Bot
parent 48827ffb21
commit 8e6adcad7d
4 changed files with 78 additions and 0 deletions

View file

@ -942,6 +942,7 @@ bifrost_postprocess_nir(nir_shader *nir,
}
NIR_PASS(_, nir, pan_nir_lower_tex, gpu_id);
NIR_PASS(_, nir, pan_nir_lower_image, gpu_id);
/* Our OpenCL compiler (src/panfrost/clc/pan_compile.c) has a very weird and
* suboptimal optimization pipeline that results in a lot of unoptimized

View file

@ -11,6 +11,7 @@ libpanfrost_compiler_files = files(
'pan_nir_lower_fs_inputs.c',
'pan_nir_lower_fs_outputs.c',
'pan_nir_lower_helper_invocation.c',
'pan_nir_lower_image.c',
'pan_nir_lower_image_index.c',
'pan_nir_lower_image_ms.c',
'pan_nir_lower_noperspective.c',

View file

@ -244,6 +244,7 @@ PRAGMA_DIAGNOSTIC_POP
static_assert(sizeof(struct pan_va_tex_flags) == 4, "Must fit in uint32_t");
bool pan_nir_lower_tex(nir_shader *nir, uint64_t gpu_id);
bool pan_nir_lower_image(nir_shader *nir, uint64_t gpu_id);
nir_alu_type
pan_unpacked_type_for_format(const struct util_format_description *desc);

View file

@ -0,0 +1,75 @@
/*
* Copyright (C) 2026 Collabora, Ltd.
* SPDX-License-Identifier: MIT
*/
#include "pan_nir.h"
#include "panfrost/model/pan_model.h"
static bool
lower_image_size(nir_builder *b, nir_intrinsic_instr *intr, uint64_t gpu_id)
{
const enum glsl_sampler_dim dim = nir_intrinsic_image_dim(intr);
const bool is_array = nir_intrinsic_image_array(intr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *handle = intr->src[0].ssa;
nir_def *res;
if (pan_arch(gpu_id) >= 9) {
if (dim == GLSL_SAMPLER_DIM_BUF)
res = pan_nir_load_va_buf_size_el(b, handle);
else
res = pan_nir_load_va_tex_size(b, handle, dim, is_array);
} else {
/* Not handled yet */
return false;
}
nir_def_replace(&intr->def, res);
return true;
}
static bool
lower_image_samples(nir_builder *b, nir_intrinsic_instr *intr, uint64_t gpu_id)
{
assert(nir_intrinsic_image_dim(intr) == GLSL_SAMPLER_DIM_MS);
b->cursor = nir_before_instr(&intr->instr);
nir_def *handle = intr->src[0].ssa;
nir_def *res;
if (pan_arch(gpu_id) >= 9) {
res = pan_nir_load_va_tex_samples(b, handle);
} else {
/* Not handled yet */
return false;
}
nir_def_replace(&intr->def, res);
return true;
}
static bool
lower_image_intr(nir_builder *b, nir_intrinsic_instr *intr, void *cb_data)
{
uint64_t gpu_id = *(uint64_t *)cb_data;
switch (intr->intrinsic) {
case nir_intrinsic_image_size:
return lower_image_size(b, intr, gpu_id);
case nir_intrinsic_image_samples:
return lower_image_samples(b, intr, gpu_id);
default:
return false;
}
}
bool
pan_nir_lower_image(nir_shader *nir, uint64_t gpu_id)
{
return nir_shader_intrinsics_pass(nir, lower_image_intr,
nir_metadata_none, &gpu_id);
}