asahi: Lower lod_bias_agx to uniform registers

Track the LOD bias of samplers and upload them at draw time to uniform
registers. This could be optimized in the future.

Vulkan will probably want to pull from a descriptor set instead.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21276>
This commit is contained in:
Alyssa Rosenzweig 2023-02-12 20:20:28 -05:00 committed by Marge Bot
parent 8058d31a25
commit 23f271833f
4 changed files with 65 additions and 39 deletions

View file

@ -33,61 +33,75 @@ struct state {
static bool
pass(struct nir_builder *b, nir_instr *instr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
b->cursor = nir_before_instr(instr);
struct state *state = data;
/* For offsetof with dynamic array elements */
struct agx_draw_uniforms *u = NULL;
void *ptr = NULL;
nir_dest *dest;
switch (intr->intrinsic) {
case nir_intrinsic_load_vbo_base_agx:
ptr = &u->vs.vbo_base[nir_src_as_uint(intr->src[0])];
break;
case nir_intrinsic_load_ubo_base_agx:
ptr = &u->ubo_base[nir_src_as_uint(intr->src[0])];
break;
case nir_intrinsic_load_texture_base_agx:
ptr = &u->texture_base;
break;
case nir_intrinsic_load_blend_const_color_r_float:
ptr = &u->fs.blend_constant[0];
break;
case nir_intrinsic_load_blend_const_color_g_float:
ptr = &u->fs.blend_constant[1];
break;
case nir_intrinsic_load_blend_const_color_b_float:
ptr = &u->fs.blend_constant[2];
break;
case nir_intrinsic_load_blend_const_color_a_float:
ptr = &u->fs.blend_constant[3];
break;
case nir_intrinsic_load_ssbo_address:
ptr = &u->ssbo_base[nir_src_as_uint(intr->src[0])];
break;
case nir_intrinsic_get_ssbo_size:
ptr = &u->ssbo_size[nir_src_as_uint(intr->src[0])];
break;
default:
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
dest = &intr->dest;
switch (intr->intrinsic) {
case nir_intrinsic_load_vbo_base_agx:
ptr = &u->vs.vbo_base[nir_src_as_uint(intr->src[0])];
break;
case nir_intrinsic_load_ubo_base_agx:
ptr = &u->ubo_base[nir_src_as_uint(intr->src[0])];
break;
case nir_intrinsic_load_texture_base_agx:
ptr = &u->texture_base;
break;
case nir_intrinsic_load_blend_const_color_r_float:
ptr = &u->fs.blend_constant[0];
break;
case nir_intrinsic_load_blend_const_color_g_float:
ptr = &u->fs.blend_constant[1];
break;
case nir_intrinsic_load_blend_const_color_b_float:
ptr = &u->fs.blend_constant[2];
break;
case nir_intrinsic_load_blend_const_color_a_float:
ptr = &u->fs.blend_constant[3];
break;
case nir_intrinsic_load_ssbo_address:
ptr = &u->ssbo_base[nir_src_as_uint(intr->src[0])];
break;
case nir_intrinsic_get_ssbo_size:
ptr = &u->ssbo_size[nir_src_as_uint(intr->src[0])];
break;
default:
return false;
}
} else if (instr->type == nir_instr_type_tex) {
nir_tex_instr *tex = nir_instr_as_tex(instr);
dest = &tex->dest;
if (tex->op == nir_texop_lod_bias_agx) {
/* TODO: Dynamic indexing samplers? */
ptr = &u->lod_bias[tex->sampler_index];
} else {
return false;
}
} else {
return false;
}
assert(nir_dest_bit_size(intr->dest) >= 16 && "no 8-bit sysvals");
assert(nir_dest_bit_size(*dest) >= 16 && "no 8-bit sysvals");
unsigned dim = nir_dest_num_components(intr->dest);
unsigned element_size = nir_dest_bit_size(intr->dest) / 16;
unsigned dim = nir_dest_num_components(*dest);
unsigned element_size = nir_dest_bit_size(*dest) / 16;
unsigned length = dim * element_size;
unsigned offset = (uintptr_t)ptr;
assert((offset % 2) == 0 && "all entries are aligned by ABI");
nir_ssa_def *value =
nir_load_preamble(b, dim, nir_dest_bit_size(intr->dest), .base = offset);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, value);
nir_load_preamble(b, dim, nir_dest_bit_size(*dest), .base = offset);
nir_ssa_def_rewrite_uses(&dest->ssa, value);
BITSET_SET_RANGE(state->pushed, (offset / 2), (offset / 2) + length - 1);

View file

@ -509,7 +509,9 @@ agx_create_sampler_state(struct pipe_context *pctx,
struct agx_sampler_state *so = CALLOC_STRUCT(agx_sampler_state);
so->base = *state;
assert(state->lod_bias == 0 && "todo: lod bias");
/* We report a max texture LOD bias of 16, so clamp appropriately */
float lod_bias = CLAMP(state->lod_bias, -16.0, 16.0);
so->lod_bias_as_fp16 = _mesa_float_to_half(lod_bias);
agx_pack(&so->desc, SAMPLER, cfg) {
cfg.minimum_lod = state->min_lod;

View file

@ -79,6 +79,9 @@ struct PACKED agx_draw_uniforms {
uint64_t ssbo_base[PIPE_MAX_SHADER_BUFFERS];
uint32_t ssbo_size[PIPE_MAX_SHADER_BUFFERS];
/* LOD bias as float16 */
uint16_t lod_bias[PIPE_MAX_SAMPLERS];
union {
struct {
/* Vertex buffer object bases, if present */
@ -362,6 +365,9 @@ struct agx_sampler_state {
/* Packed custom border colour, or zero if none is required */
struct agx_border_packed border;
/* LOD bias packed as fp16, the form we'll pass to the shader */
uint16_t lod_bias_as_fp16;
};
struct agx_sampler_view {

View file

@ -79,6 +79,10 @@ agx_upload_uniforms(struct agx_batch *batch, uint64_t textures,
struct agx_draw_uniforms uniforms = {.texture_base = textures};
u_foreach_bit(s, st->valid_samplers) {
uniforms.lod_bias[s] = st->samplers[s]->lod_bias_as_fp16;
}
u_foreach_bit(cb, st->cb_mask) {
uniforms.ubo_base[cb] = agx_const_buffer_ptr(batch, &st->cb[cb]);
}