asahi: delete layer id code

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28483>
This commit is contained in:
Alyssa Rosenzweig 2024-03-15 16:28:03 -04:00 committed by Marge Bot
parent 6a63fffeaa
commit 70395c1ac1
12 changed files with 28 additions and 126 deletions

View file

@ -1303,6 +1303,9 @@ agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
case nir_intrinsic_load_local_invocation_index:
return agx_get_sr_to(b, dst, AGX_SR_THREAD_INDEX_IN_THREADGROUP);
case nir_intrinsic_load_layer_id:
return agx_get_sr_to(b, dst, AGX_SR_THREADGROUP_POSITION_IN_GRID_Z);
case nir_intrinsic_barrier: {
assert(!b->shader->is_preamble && "invalid");

View file

@ -124,7 +124,11 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
if (load->intrinsic == nir_intrinsic_load_input) {
assert(load->def.bit_size == 32);
return interpolate_flat(b, coefficients);
if (nir_intrinsic_io_semantics(load).location == VARYING_SLOT_LAYER)
return nir_load_layer_id(b);
else
return interpolate_flat(b, coefficients);
} else {
nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]);

View file

@ -37,7 +37,7 @@ agx_compile_meta_shader(struct agx_meta_cache *cache, nir_shader *shader,
agx_preprocess_nir(shader, cache->dev->libagx);
if (tib) {
unsigned bindless_base = 0;
agx_nir_lower_tilebuffer(shader, tib, NULL, &bindless_base, NULL, true);
agx_nir_lower_tilebuffer(shader, tib, NULL, &bindless_base, NULL);
agx_nir_lower_monolithic_msaa(
shader, &(struct agx_msaa_state){.nr_samples = tib->nr_samples});
agx_nir_lower_multisampled_image_store(shader);
@ -69,7 +69,7 @@ build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt,
if (layered) {
coord = nir_vec3(b, nir_channel(b, coord, 0), nir_channel(b, coord, 1),
agx_internal_layer_id(b));
nir_load_layer_id(b));
}
nir_tex_instr *tex = nir_tex_instr_create(b->shader, 2);
@ -167,7 +167,7 @@ agx_build_end_of_tile_shader(struct agx_meta_cache *cache,
nir_def *layer = nir_undef(&b, 1, 16);
if (key->tib.layered)
layer = nir_u2u16(&b, agx_internal_layer_id(&b));
layer = nir_u2u16(&b, nir_load_layer_id(&b));
nir_block_image_store_agx(
&b, nir_imm_int(&b, rt), nir_imm_intN_t(&b, offset_B, 16), layer,

View file

@ -24,7 +24,6 @@ struct ctx {
bool *translucent;
unsigned bindless_base;
bool any_memory_stores;
bool layer_id_sr;
uint8_t outputs_written;
};
@ -153,21 +152,15 @@ dim_for_rt(nir_builder *b, unsigned nr_samples, nir_def **sample)
}
static nir_def *
image_coords(nir_builder *b, nir_def *layer_id)
image_coords(nir_builder *b)
{
nir_def *xy = nir_u2u32(b, nir_load_pixel_coord(b));
nir_def *vec = nir_pad_vector(b, xy, 4);
if (layer_id)
vec = nir_vector_insert_imm(b, vec, layer_id, 2);
return vec;
nir_def *xy__ = nir_pad_vec4(b, nir_u2u32(b, nir_load_pixel_coord(b)));
return nir_vector_insert_imm(b, xy__, nir_load_layer_id(b), 2);
}
static void
store_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
nir_def *layer_id, enum pipe_format format, unsigned rt,
nir_def *value)
enum pipe_format format, unsigned rt, nir_def *value)
{
/* Force bindless for multisampled image writes since they will be lowered
* with a descriptor crawl later.
@ -179,7 +172,7 @@ store_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
nir_def *sample;
enum glsl_sampler_dim dim = dim_for_rt(b, nr_samples, &sample);
nir_def *coords = image_coords(b, layer_id);
nir_def *coords = image_coords(b);
nir_begin_invocation_interlock(b);
@ -208,7 +201,7 @@ store_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
static nir_def *
load_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
nir_def *layer_id, uint8_t comps, uint8_t bit_size, unsigned rt,
uint8_t comps, uint8_t bit_size, unsigned rt,
enum pipe_format format)
{
bool bindless = false;
@ -218,7 +211,7 @@ load_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
nir_def *sample;
enum glsl_sampler_dim dim = dim_for_rt(b, nr_samples, &sample);
nir_def *coords = image_coords(b, layer_id);
nir_def *coords = image_coords(b);
/* Ensure pixels below this one have written out their results */
nir_begin_invocation_interlock(b);
@ -234,29 +227,6 @@ load_memory(nir_builder *b, unsigned bindless_base, unsigned nr_samples,
}
}
nir_def *
agx_internal_layer_id(nir_builder *b)
{
/* In the background and end-of-tile programs, the layer ID is available as
* sr2, the Z component of the workgroup index.
*/
return nir_channel(b, nir_load_workgroup_id(b), 2);
}
static nir_def *
tib_layer_id(nir_builder *b, struct ctx *ctx)
{
if (ctx->layer_id_sr) {
return agx_internal_layer_id(b);
} else {
/* Otherwise, the layer ID is loaded as a flat varying. */
b->shader->info.inputs_read |= VARYING_BIT_LAYER;
return nir_load_input(b, 1, 32, nir_imm_int(b, 0),
.io_semantics.location = VARYING_SLOT_LAYER);
}
}
static nir_def *
tib_impl(nir_builder *b, nir_instr *instr, void *data)
{
@ -311,8 +281,8 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
value = nir_trim_vector(b, intr->src[0].ssa, comps);
if (tib->spilled[rt]) {
store_memory(b, ctx->bindless_base, tib->nr_samples,
tib_layer_id(b, ctx), logical_format, rt, value);
store_memory(b, ctx->bindless_base, tib->nr_samples, logical_format,
rt, value);
ctx->any_memory_stores = true;
} else {
store_tilebuffer(b, tib, format, logical_format, rt, value,
@ -332,8 +302,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
*(ctx->translucent) = true;
return load_memory(b, ctx->bindless_base, tib->nr_samples,
tib_layer_id(b, ctx), intr->num_components,
bit_size, rt, logical_format);
intr->num_components, bit_size, rt, logical_format);
} else {
return load_tilebuffer(b, tib, intr->num_components, bit_size, rt,
format, logical_format);
@ -344,7 +313,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
bool
agx_nir_lower_tilebuffer(nir_shader *shader, struct agx_tilebuffer_layout *tib,
uint8_t *colormasks, unsigned *bindless_base,
bool *translucent, bool layer_id_sr)
bool *translucent)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
@ -352,7 +321,6 @@ agx_nir_lower_tilebuffer(nir_shader *shader, struct agx_tilebuffer_layout *tib,
.tib = tib,
.colormasks = colormasks,
.translucent = translucent,
.layer_id_sr = layer_id_sr,
};
/* Allocate 1 texture + 1 PBE descriptor for each spilled descriptor */

View file

@ -64,7 +64,8 @@ lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
nir_def *index = nir_iadd(b, nir_iadd_imm(b, base, component),
nir_imul_imm(b, nir_u2u16(b, offset), 4));
nir_intrinsic_instr *new_store = nir_store_uvs_agx(b, value, index);
if (sem.location != VARYING_SLOT_LAYER)
nir_store_uvs_agx(b, value, index);
/* Insert clip distance sysval writes, and gather layer/viewport writes so we
* can accumulate their system value. These are still lowered like normal to
@ -73,11 +74,11 @@ lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
if (sem.location == VARYING_SLOT_LAYER) {
assert(ctx->layer == NULL && "only written once");
ctx->layer = value;
ctx->after_layer_viewport = nir_after_instr(&new_store->instr);
ctx->after_layer_viewport = nir_after_instr(index->parent_instr);
} else if (sem.location == VARYING_SLOT_VIEWPORT) {
assert(ctx->viewport == NULL && "only written once");
ctx->viewport = value;
ctx->after_layer_viewport = nir_after_instr(&new_store->instr);
ctx->after_layer_viewport = nir_after_instr(index->parent_instr);
} else if (sem.location == VARYING_SLOT_CLIP_DIST0) {
unsigned clip_base = ctx->layout->group_offs[UVS_CLIP_DIST];
nir_def *index = nir_iadd_imm(b, nir_imul_imm(b, nir_u2u16(b, offset), 4),

View file

@ -1,51 +0,0 @@
/*
* Copyright 2023 Alyssa Rosenzweig
* SPDX-License-Identifier: MIT
*/
#include "compiler/shader_enums.h"
#include "agx_tilebuffer.h"
#include "nir.h"
#include "nir_builder.h"
#include "nir_builder_opcodes.h"
#include "nir_intrinsics_indices.h"
/*
* If a fragment shader reads the layer ID but the vertex shader does not write
* the layer ID, the fragment shader is supposed to read zero. However, in our
* hardware, if the vertex shader does not write the layer ID, the value read by
* the fragment shader is UNDEFINED. To reconcile, the driver passes in whether
* the layer ID value is written, and this pass predicates layer ID on that
* system value. This handles both cases without shader variants, at the cost of
* a single instruction.
*/
static bool
lower(nir_builder *b, nir_intrinsic_instr *intr, void *_)
{
if (intr->intrinsic != nir_intrinsic_load_input)
return false;
if (nir_intrinsic_io_semantics(intr).location != VARYING_SLOT_LAYER)
return false;
b->cursor = nir_after_instr(&intr->instr);
nir_def *written = nir_load_layer_id_written_agx(b);
/* Zero extend the mask since layer IDs are 16-bits, so upper bits of the
* layer ID are necessarily zero.
*/
nir_def *mask = nir_u2uN(b, written, intr->def.bit_size);
nir_def *repl = nir_iand(b, &intr->def, mask);
nir_def_rewrite_uses_after(&intr->def, repl, repl->parent_instr);
return true;
}
bool
agx_nir_predicate_layer_id(nir_shader *shader)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
assert(shader->info.inputs_read & VARYING_BIT_LAYER);
return nir_shader_intrinsics_pass(
shader, lower, nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -90,9 +90,7 @@ agx_build_tilebuffer_layout(enum pipe_format *formats, uint8_t nr_cbufs,
bool agx_nir_lower_tilebuffer(struct nir_shader *shader,
struct agx_tilebuffer_layout *tib,
uint8_t *colormasks, unsigned *bindless_base,
bool *translucent, bool layer_id_sr);
struct nir_def *agx_internal_layer_id(struct nir_builder *b);
bool *translucent);
struct agx_msaa_state {
uint8_t nr_samples;
@ -111,8 +109,6 @@ bool agx_nir_lower_alpha_to_coverage(struct nir_shader *shader,
bool agx_nir_lower_alpha_to_one(struct nir_shader *shader);
bool agx_nir_predicate_layer_id(struct nir_shader *shader);
void agx_usc_tilebuffer(struct agx_usc_builder *b,
struct agx_tilebuffer_layout *tib);

View file

@ -22,7 +22,6 @@ libasahi_lib_files = files(
'agx_nir_lower_tilebuffer.c',
'agx_nir_lower_uvs.c',
'agx_nir_lower_vbo.c',
'agx_nir_predicate_layer_id.c',
'agx_ppp.h',
'agx_scratch.c',
'pool.c',

View file

@ -1820,11 +1820,6 @@ store("uvs_agx", [1], [], [CAN_REORDER])
intrinsic("load_uvs_index_agx", dest_comp = 1, bit_sizes=[16],
indices=[IO_SEMANTICS], flags=[CAN_ELIMINATE, CAN_REORDER])
# In a fragment shader, boolean system value that is true if the last vertex
# stage writes the layer ID. If false, layer IDs are defined to read back zero.
# This system value facilitates that. 16-bit 0/~0 bool allows easy masking.
system_value("layer_id_written_agx", 1, bit_sizes=[16])
# Load/store a pixel in local memory. This operation is formatted, with
# conversion between the specified format and the implied register format of the
# source/destination (for store/loads respectively). This mostly matters for

View file

@ -168,8 +168,6 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
case nir_intrinsic_get_ssbo_size:
return load_sysval_indirect(b, 1, 32, stage_table(b), &s->ssbo_size,
intr->src[0].ssa);
case nir_intrinsic_load_layer_id_written_agx:
return load_sysval_root(b, 1, 16, &u->layer_id_written);
case nir_intrinsic_load_input_assembly_buffer_agx:
return load_sysval_root(b, 1, 64, &u->input_assembly);
case nir_intrinsic_load_geometry_param_buffer_agx:

View file

@ -1900,7 +1900,7 @@ agx_compile_variant(struct agx_device *dev, struct pipe_context *pctx,
(2 * BITSET_LAST_BIT(nir->info.images_used));
unsigned rt_spill = rt_spill_base;
NIR_PASS(_, nir, agx_nir_lower_tilebuffer, &tib, colormasks, &rt_spill,
&force_translucent, false);
&force_translucent);
NIR_PASS(_, nir, agx_nir_lower_sample_intrinsics);
NIR_PASS(_, nir, agx_nir_lower_monolithic_msaa,
@ -1908,9 +1908,6 @@ agx_compile_variant(struct agx_device *dev, struct pipe_context *pctx,
.nr_samples = tib.nr_samples,
.api_sample_mask = key->api_sample_mask,
});
if (nir->info.inputs_read & VARYING_BIT_LAYER)
NIR_PASS(_, nir, agx_nir_predicate_layer_id);
}
NIR_PASS(_, nir, agx_nir_lower_multisampled_image_store);
@ -4961,11 +4958,6 @@ agx_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
agx_update_descriptors(batch, ctx->gs);
agx_update_descriptors(batch, ctx->fs);
struct agx_compiled_shader *prerast = ctx->gs ? ctx->gs->gs_copy : ctx->vs;
batch->uniforms.layer_id_written =
(prerast && prerast->info.writes_layer_viewport) ? ~0 : 0;
if (IS_DIRTY(VS) || IS_DIRTY(FS) || ctx->gs || IS_DIRTY(VERTEX) ||
IS_DIRTY(BLEND_COLOR) || IS_DIRTY(QUERY) || IS_DIRTY(POLY_STIPPLE) ||
IS_DIRTY(RS) || IS_DIRTY(PRIM) || ctx->in_tess) {

View file

@ -156,9 +156,6 @@ struct PACKED agx_draw_uniforms {
/* glSampleMask */
uint16_t sample_mask;
/* Nonzero if the last vertex stage writes the layer ID, zero otherwise */
uint16_t layer_id_written;
/* Nonzero for indexed draws, zero otherwise */
uint16_t is_indexed_draw;