asahi: port to vtn_bindgen2

this gets rid of all our linking gunk, which is a nice cleanup.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33099>
This commit is contained in:
Alyssa Rosenzweig 2025-01-30 09:40:50 -05:00 committed by Marge Bot
parent 0dd788298f
commit 3b1d8796fb
13 changed files with 69 additions and 177 deletions

View file

@ -286,7 +286,17 @@ main(int argc, char **argv)
nir_shader *s = nir_precompiled_build_variant(
libfunc, v, &agx_nir_options, &opt, load_kernel_input);
agx_link_libagx(s, nir);
nir_link_shader_functions(s, nir);
NIR_PASS(_, s, nir_inline_functions);
nir_remove_non_entrypoints(s);
NIR_PASS(_, s, nir_opt_deref);
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(_, s, nir_remove_dead_derefs);
NIR_PASS(_, s, nir_remove_dead_variables,
nir_var_function_temp | nir_var_shader_temp, NULL);
NIR_PASS(_, s, nir_lower_vars_to_explicit_types,
nir_var_shader_temp | nir_var_function_temp,
glsl_get_cl_type_size_align);
NIR_PASS(_, s, nir_lower_vars_to_explicit_types, nir_var_mem_shared,
glsl_get_cl_type_size_align);
@ -301,7 +311,14 @@ main(int argc, char **argv)
NIR_PASS(progress, s, nir_opt_loop);
} while (progress);
agx_preprocess_nir(s, NULL);
agx_preprocess_nir(s);
NIR_PASS(_, s, nir_opt_deref);
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(_, s, nir_lower_explicit_io,
nir_var_shader_temp | nir_var_function_temp |
nir_var_mem_shared | nir_var_mem_global,
nir_address_format_62bit_generic);
bool has_atomic = false;
nir_shader_intrinsics_pass(s, gather_atomic_info, nir_metadata_all,
@ -319,7 +336,6 @@ main(int argc, char **argv)
struct agx_shader_part compiled;
bool is_helper = !strcmp(libfunc->name, "libagx_helper");
struct agx_shader_key key = {
.libagx = nir,
.promote_constants = !is_helper,
.reserved_preamble = layout.size_B / 2,
.is_helper = is_helper,
@ -357,11 +373,6 @@ main(int argc, char **argv)
nir_precomp_print_binary_map(fp_c, nir, "libagx", *target, remap_variant);
}
/* Remove the NIR functions we compiled to binaries to save memory */
nir_remove_entrypoints(nir);
nir_precomp_print_nir(fp_c, fp_h, nir, "libagx", "nir");
glsl_type_singleton_decref();
fclose(fp_c);
fclose(fp_h);

View file

@ -3620,22 +3620,6 @@ agx_compile_function_nir(nir_shader *nir, nir_function_impl *impl,
return offset;
}
void
agx_link_libagx(nir_shader *nir, const nir_shader *libagx)
{
nir_link_shader_functions(nir, libagx);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_entrypoints(nir);
NIR_PASS(_, nir, nir_opt_deref);
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_remove_dead_derefs);
NIR_PASS(_, nir, nir_remove_dead_variables,
nir_var_function_temp | nir_var_shader_temp, NULL);
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types,
nir_var_shader_temp | nir_var_function_temp,
glsl_get_cl_type_size_align);
}
/*
* The hardware frcp instruction is sometimes off by 1 ULP. For correctly
* rounded frcp, a refinement step is required. This routine has been
@ -3687,7 +3671,7 @@ agx_nir_lower_fdiv(nir_builder *b, nir_alu_instr *alu, void *_)
/* Preprocess NIR independent of shader state */
void
agx_preprocess_nir(nir_shader *nir, const nir_shader *libagx)
agx_preprocess_nir(nir_shader *nir)
{
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
@ -3706,8 +3690,6 @@ agx_preprocess_nir(nir_shader *nir, const nir_shader *libagx)
/* Clean up deref gunk after lowering I/O */
NIR_PASS(_, nir, nir_opt_dce);
agx_link_libagx(nir, libagx);
/* Runs before we lower away idiv, to work at all. But runs after lowering
* textures, since the cube map array lowering generates division by 6.
*/
@ -3779,22 +3761,9 @@ agx_compile_shader_nir(nir_shader *nir, struct agx_shader_key *key,
NIR_PASS(_, nir, nir_lower_printf_buffer, LIBAGX_PRINTF_BUFFER_ADDRESS,
LIBAGX_PRINTF_BUFFER_SIZE - 8);
bool needs_libagx = true /* TODO: Optimize */;
NIR_PASS(_, nir, nir_lower_frag_coord_to_pixel_coord);
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
if (needs_libagx) {
agx_link_libagx(nir, key->libagx);
NIR_PASS(_, nir, nir_opt_deref);
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_shader_temp | nir_var_function_temp |
nir_var_mem_shared | nir_var_mem_global,
nir_address_format_62bit_generic);
}
/* Late sysval lowering creates large loads. Load lowering creates unpacks */
nir_lower_mem_access_bit_sizes_options lower_mem_access_options = {
.modes = nir_var_mem_ssbo | nir_var_mem_constant |

View file

@ -258,9 +258,6 @@ struct agx_shader_key {
/* Number of reserved preamble slots at the start */
unsigned reserved_preamble;
/* Library routines to link against */
const nir_shader *libagx;
/* Whether scratch memory is available in the given shader stage */
bool has_scratch;
@ -294,8 +291,7 @@ struct agx_shader_key {
struct agx_interp_info agx_gather_interp_info(nir_shader *nir);
uint64_t agx_gather_texcoords(nir_shader *nir);
void agx_link_libagx(nir_shader *nir, const nir_shader *libagx);
void agx_preprocess_nir(nir_shader *nir, const nir_shader *libagx);
void agx_preprocess_nir(nir_shader *nir);
bool agx_nir_lower_discard_zs_emit(nir_shader *s);
bool agx_nir_lower_sample_mask(nir_shader *s);
bool agx_nir_lower_interpolation(nir_shader *s);

View file

@ -38,7 +38,7 @@ agx_compile_bg_eot_shader(struct agx_bg_eot_cache *cache, nir_shader *shader,
struct agx_tilebuffer_layout *tib)
{
agx_nir_lower_texture(shader);
agx_preprocess_nir(shader, cache->dev->libagx);
agx_preprocess_nir(shader);
if (tib) {
unsigned bindless_base = 0;
agx_nir_lower_tilebuffer(shader, tib, NULL, &bindless_base, NULL, NULL);
@ -50,8 +50,6 @@ agx_compile_bg_eot_shader(struct agx_bg_eot_cache *cache, nir_shader *shader,
nir_metadata_control_flow, NULL);
}
key->libagx = cache->dev->libagx;
struct agx_bg_eot_shader *res = rzalloc(cache->ht, struct agx_bg_eot_shader);
struct agx_shader_part bin;
agx_compile_shader_nir(shader, key, NULL, &bin);

View file

@ -662,9 +662,6 @@ agx_open_device(void *memctx, struct agx_device *dev)
agx_get_global_ids(dev);
glsl_type_singleton_init_or_ref();
struct blob_reader blob;
blob_reader_init(&blob, (void *)libagx_0_nir, sizeof(libagx_0_nir));
dev->libagx = nir_deserialize(memctx, &agx_nir_options, &blob);
if (agx_gather_device_key(dev).needs_g13x_coherency == U_TRISTATE_YES) {
dev->libagx_programs = libagx_g13x;
@ -695,9 +692,6 @@ agx_open_device(void *memctx, struct agx_device *dev)
}
u_printf_init(&dev->printf, bo, agx_bo_map(bo));
u_printf_singleton_init_or_ref();
u_printf_singleton_add(dev->libagx->printf_info,
dev->libagx->printf_info_count);
return true;
}
@ -706,7 +700,6 @@ agx_close_device(struct agx_device *dev)
{
agx_bo_unreference(dev, dev->printf.bo);
u_printf_destroy(&dev->printf);
ralloc_free((void *)dev->libagx);
agx_bo_cache_evict_all(dev);
util_sparse_array_finish(&dev->bo_map);
agxdecode_destroy_context(dev->agxdecode);
@ -714,7 +707,6 @@ agx_close_device(struct agx_device *dev)
util_vma_heap_finish(&dev->main_heap);
util_vma_heap_finish(&dev->usc_heap);
glsl_type_singleton_decref();
u_printf_singleton_decref();
close(dev->fd);
}

View file

@ -96,9 +96,6 @@ typedef struct {
struct agx_device {
uint32_t debug;
/* NIR library of AGX helpers/shaders. Immutable once created. */
const struct nir_shader *libagx;
/* Precompiled libagx binary table */
const uint32_t **libagx_programs;

View file

@ -375,7 +375,7 @@ lower_id(nir_builder *b, nir_intrinsic_instr *intr, void *data)
* counts are statically known.
*/
static nir_shader *
agx_nir_create_geometry_count_shader(nir_shader *gs, const nir_shader *libagx,
agx_nir_create_geometry_count_shader(nir_shader *gs,
struct lower_gs_state *state)
{
/* Don't muck up the original shader */
@ -394,7 +394,7 @@ agx_nir_create_geometry_count_shader(nir_shader *gs, const nir_shader *libagx,
NIR_PASS(_, shader, nir_shader_intrinsics_pass, lower_id,
nir_metadata_control_flow, NULL);
agx_preprocess_nir(shader, libagx);
agx_preprocess_nir(shader);
return shader;
}
@ -587,8 +587,7 @@ strip_side_effect_from_main(nir_builder *b, nir_intrinsic_instr *intr,
* shades each rasterized output vertex in parallel.
*/
static nir_shader *
agx_nir_create_gs_rast_shader(const nir_shader *gs, const nir_shader *libagx,
bool *side_effects_for_rast)
agx_nir_create_gs_rast_shader(const nir_shader *gs, bool *side_effects_for_rast)
{
/* Don't muck up the original shader */
nir_shader *shader = nir_shader_clone(NULL, gs);
@ -698,7 +697,7 @@ agx_nir_create_gs_rast_shader(const nir_shader *gs, const nir_shader *libagx,
nir_opt_idiv_const(shader, 16);
agx_preprocess_nir(shader, libagx);
agx_preprocess_nir(shader);
return shader;
}
@ -977,10 +976,9 @@ collect_components(nir_builder *b, nir_intrinsic_instr *intr, void *data)
* transform feedback offsets and counters as applicable.
*/
static nir_shader *
agx_nir_create_pre_gs(struct lower_gs_state *state, const nir_shader *libagx,
bool indexed, bool restart, struct nir_xfb_info *xfb,
unsigned vertices_per_prim, uint8_t streams,
unsigned invocations)
agx_nir_create_pre_gs(struct lower_gs_state *state, bool indexed, bool restart,
struct nir_xfb_info *xfb, unsigned vertices_per_prim,
uint8_t streams, unsigned invocations)
{
nir_builder b_ = nir_builder_init_simple_shader(
MESA_SHADER_COMPUTE, &agx_nir_options, "Pre-GS patch up");
@ -1124,7 +1122,7 @@ agx_nir_create_pre_gs(struct lower_gs_state *state, const nir_shader *libagx,
nir_load_stat_query_address_agx(b, .base = PIPE_STAT_QUERY_C_INVOCATIONS),
emitted_prims);
agx_preprocess_nir(b->shader, libagx);
agx_preprocess_nir(b->shader);
return b->shader;
}
@ -1199,28 +1197,8 @@ agx_nir_lower_gs_instancing(nir_shader *gs)
nir_metadata_control_flow, index);
}
static void
link_libagx(nir_shader *nir, const nir_shader *libagx)
{
nir_link_shader_functions(nir, libagx);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_entrypoints(nir);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, 64);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types,
nir_var_shader_temp | nir_var_function_temp | nir_var_mem_shared,
glsl_get_cl_type_size_align);
NIR_PASS(_, nir, nir_opt_deref);
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_shader_temp | nir_var_function_temp | nir_var_mem_shared |
nir_var_mem_global,
nir_address_format_62bit_generic);
}
bool
agx_nir_lower_gs(nir_shader *gs, const nir_shader *libagx,
bool rasterizer_discard, nir_shader **gs_count,
agx_nir_lower_gs(nir_shader *gs, bool rasterizer_discard, nir_shader **gs_count,
nir_shader **gs_copy, nir_shader **pre_gs,
enum mesa_prim *out_mode, unsigned *out_count_words)
{
@ -1312,13 +1290,11 @@ agx_nir_lower_gs(nir_shader *gs, const nir_shader *libagx,
}
bool side_effects_for_rast = false;
*gs_copy = agx_nir_create_gs_rast_shader(gs, libagx, &side_effects_for_rast);
*gs_copy = agx_nir_create_gs_rast_shader(gs, &side_effects_for_rast);
NIR_PASS(_, gs, nir_shader_intrinsics_pass, lower_id,
nir_metadata_control_flow, NULL);
link_libagx(gs, libagx);
NIR_PASS(_, gs, nir_lower_idiv,
&(const nir_lower_idiv_options){.allow_fp16 = true});
@ -1327,7 +1303,7 @@ agx_nir_lower_gs(nir_shader *gs, const nir_shader *libagx,
/* If there is any unknown count, we need a geometry count shader */
if (gs_state.count_stride_el > 0)
*gs_count = agx_nir_create_geometry_count_shader(gs, libagx, &gs_state);
*gs_count = agx_nir_create_geometry_count_shader(gs, &gs_state);
else
*gs_count = NULL;
@ -1410,7 +1386,7 @@ agx_nir_lower_gs(nir_shader *gs, const nir_shader *libagx,
/* Create auxiliary programs */
*pre_gs = agx_nir_create_pre_gs(
&gs_state, libagx, true, gs->info.gs.output_primitive != MESA_PRIM_POINTS,
&gs_state, true, gs->info.gs.output_primitive != MESA_PRIM_POINTS,
gs->xfb_info, verts_in_output_prim(gs), gs->info.gs.active_stream_mask,
gs->info.gs.invocations);
@ -1474,18 +1450,9 @@ lower_vs_before_gs(nir_builder *b, nir_intrinsic_instr *intr, void *data)
}
bool
agx_nir_lower_vs_before_gs(struct nir_shader *vs,
const struct nir_shader *libagx)
agx_nir_lower_vs_before_gs(struct nir_shader *vs)
{
bool progress = false;
/* Lower vertex stores to memory stores */
progress |= nir_shader_intrinsics_pass(vs, lower_vs_before_gs,
nir_metadata_control_flow, NULL);
/* Link libagx, used in lower_vs_before_gs */
if (progress)
link_libagx(vs, libagx);
return progress;
return nir_shader_intrinsics_pass(vs, lower_vs_before_gs,
nir_metadata_control_flow, NULL);
}

View file

@ -28,18 +28,16 @@ nir_def *agx_nir_load_vertex_id(struct nir_builder *b, nir_def *id,
bool agx_nir_lower_sw_vs(struct nir_shader *s, unsigned index_size_B);
bool agx_nir_lower_vs_before_gs(struct nir_shader *vs,
const struct nir_shader *libagx);
bool agx_nir_lower_vs_before_gs(struct nir_shader *vs);
bool agx_nir_lower_gs(struct nir_shader *gs, const struct nir_shader *libagx,
bool rasterizer_discard, struct nir_shader **gs_count,
struct nir_shader **gs_copy, struct nir_shader **pre_gs,
enum mesa_prim *out_mode, unsigned *out_count_words);
bool agx_nir_lower_gs(struct nir_shader *gs, bool rasterizer_discard,
struct nir_shader **gs_count, struct nir_shader **gs_copy,
struct nir_shader **pre_gs, enum mesa_prim *out_mode,
unsigned *out_count_words);
bool agx_nir_lower_tcs(struct nir_shader *tcs, const struct nir_shader *libagx);
bool agx_nir_lower_tcs(struct nir_shader *tcs);
bool agx_nir_lower_tes(struct nir_shader *tes, const struct nir_shader *libagx,
bool to_hw_vs);
bool agx_nir_lower_tes(struct nir_shader *tes, bool to_hw_vs);
uint64_t agx_tcs_per_vertex_outputs(const struct nir_shader *nir);

View file

@ -186,31 +186,11 @@ lower_tcs(nir_builder *b, nir_intrinsic_instr *intr, void *data)
return true;
}
static void
link_libagx(nir_shader *nir, const nir_shader *libagx)
{
nir_link_shader_functions(nir, libagx);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_entrypoints(nir);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, 64);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_function_temp,
glsl_get_cl_type_size_align);
NIR_PASS(_, nir, nir_opt_deref);
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_shader_temp | nir_var_function_temp | nir_var_mem_shared |
nir_var_mem_global,
nir_address_format_62bit_generic);
}
bool
agx_nir_lower_tcs(nir_shader *tcs, const struct nir_shader *libagx)
agx_nir_lower_tcs(nir_shader *tcs)
{
nir_shader_intrinsics_pass(tcs, lower_tcs, nir_metadata_control_flow, NULL);
link_libagx(tcs, libagx);
return true;
return nir_shader_intrinsics_pass(tcs, lower_tcs, nir_metadata_control_flow,
NULL);
}
static nir_def *
@ -270,7 +250,7 @@ lower_tes_indexing(nir_builder *b, nir_intrinsic_instr *intr, void *data)
}
bool
agx_nir_lower_tes(nir_shader *tes, const nir_shader *libagx, bool to_hw_vs)
agx_nir_lower_tes(nir_shader *tes, bool to_hw_vs)
{
nir_lower_tess_coord_z(
tes, tes->info.tess._primitive_mode == TESS_PRIMITIVE_TRIANGLES);
@ -308,7 +288,6 @@ agx_nir_lower_tes(nir_shader *tes, const nir_shader *libagx, bool to_hw_vs)
nir_metadata_control_flow, NULL);
}
link_libagx(tes, libagx);
nir_lower_idiv(tes, &(nir_lower_idiv_options){.allow_fp16 = true});
nir_metadata_preserve(nir_shader_get_entrypoint(tes), nir_metadata_none);
return true;

View file

@ -33,15 +33,15 @@ libagx_spv = custom_target(
depfile : 'libagx_shaders.h.d',
)
libagx_h = custom_target(
'libagx.h',
libagx = custom_target(
'libagx',
input : libagx_spv,
output : 'libagx.h',
command : [prog_vtn_bindgen, libagx_spv, '@OUTPUT@'],
depends : [prog_vtn_bindgen],
output : ['libagx.cpp', 'libagx.h'],
command : [prog_vtn_bindgen2, libagx_spv, '@OUTPUT0@', '@OUTPUT1@'],
depends : [prog_vtn_bindgen2],
)
idep_libagx = declare_dependency(
sources : [libagx_h],
sources : [libagx],
include_directories : include_directories('.'),
)

View file

@ -1664,11 +1664,10 @@ hk_get_prolog_epilog_locked(struct hk_device *dev, struct hk_internal_key *key,
builder(&b, key->key);
if (preprocess_nir)
agx_preprocess_nir(b.shader, dev->dev.libagx);
agx_preprocess_nir(b.shader);
struct agx_shader_key backend_key = {
.dev = agx_gather_device_key(&dev->dev),
.libagx = dev->dev.libagx,
.secondary = true,
.no_stop = !stop,
};

View file

@ -168,7 +168,7 @@ hk_preprocess_nir_internal(struct vk_physical_device *vk_pdev, nir_shader *nir)
NIR_PASS(_, nir, nir_split_struct_vars, nir_var_function_temp);
/* Optimize but allow copies because we haven't lowered them yet */
agx_preprocess_nir(nir, NULL);
agx_preprocess_nir(nir);
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, nir_lower_var_copies);
@ -731,7 +731,7 @@ hk_lower_nir(struct hk_device *dev, nir_shader *nir,
NIR_PASS(_, nir, agx_nir_lower_texture);
NIR_PASS(_, nir, agx_nir_lower_multisampled_image_store);
agx_preprocess_nir(nir, dev->dev.libagx);
agx_preprocess_nir(nir);
NIR_PASS(_, nir, nir_opt_conditional_discard);
NIR_PASS(_, nir, nir_opt_if,
nir_opt_if_optimize_phi_true_false | nir_opt_if_avoid_64bit_phis);
@ -849,7 +849,7 @@ hk_compile_nir(struct hk_device *dev, const VkAllocationCallbacks *pAllocator,
shader->info.tess.tcs_output_stride = agx_tcs_output_stride(nir);
} else {
/* This destroys info so it needs to happen after the gather */
NIR_PASS(_, nir, agx_nir_lower_tes, dev->dev.libagx, hw);
NIR_PASS(_, nir, agx_nir_lower_tes, hw);
}
}
@ -876,7 +876,6 @@ hk_compile_nir(struct hk_device *dev, const VkAllocationCallbacks *pAllocator,
struct agx_shader_key backend_key = {
.dev = agx_gather_device_key(&dev->dev),
.reserved_preamble = 128 /* TODO */,
.libagx = dev->dev.libagx,
.no_stop = nir->info.stage == MESA_SHADER_FRAGMENT,
.has_scratch = !nir->info.internal,
.promote_constants = true,
@ -1059,7 +1058,7 @@ hk_compile_shader(struct hk_device *dev, struct vk_shader_compile_info *info,
NIR_PASS(_, nir, agx_nir_lower_sample_intrinsics, false);
} else if (sw_stage == MESA_SHADER_TESS_CTRL) {
NIR_PASS_V(nir, agx_nir_lower_tcs, dev->dev.libagx);
NIR_PASS_V(nir, agx_nir_lower_tcs);
}
/* Compile all variants up front */
@ -1076,9 +1075,8 @@ hk_compile_shader(struct hk_device *dev, struct vk_shader_compile_info *info,
enum mesa_prim out_prim = MESA_PRIM_MAX;
nir_shader *count = NULL, *rast = NULL, *pre_gs = NULL;
NIR_PASS(_, clone, agx_nir_lower_gs, dev->dev.libagx, rast_disc,
&count, &rast, &pre_gs, &out_prim,
&count_variant->info.gs.count_words);
NIR_PASS(_, clone, agx_nir_lower_gs, rast_disc, &count, &rast, &pre_gs,
&out_prim, &count_variant->info.gs.count_words);
if (!rast_disc) {
struct hk_shader *shader = &obj->variants[HK_GS_VARIANT_RAST];
@ -1181,7 +1179,7 @@ hk_compile_shader(struct hk_device *dev, struct vk_shader_compile_info *info,
if (hw) {
hk_lower_hw_vs(clone, shader);
} else {
NIR_PASS(_, clone, agx_nir_lower_vs_before_gs, dev->dev.libagx);
NIR_PASS(_, clone, agx_nir_lower_vs_before_gs);
}
/* hk_compile_nir takes ownership of the clone */

View file

@ -1530,7 +1530,6 @@ agx_compile_nir(struct agx_device *dev, nir_shader *nir,
struct agx_shader_key key = {
.dev = agx_gather_device_key(dev),
.libagx = dev->libagx,
.has_scratch = !secondary,
.promote_constants = true,
.no_stop = !terminal,
@ -1610,7 +1609,7 @@ agx_compile_variant(struct agx_device *dev, struct pipe_context *pctx,
struct asahi_vs_shader_key *key = &key_->vs;
if (nir->info.vs.tes_agx) {
NIR_PASS(_, nir, agx_nir_lower_tes, dev->libagx, key->hw);
NIR_PASS(_, nir, agx_nir_lower_tes, key->hw);
} else {
NIR_PASS(_, nir, agx_nir_lower_vs_input_to_prolog,
attrib_components_read);
@ -1626,7 +1625,7 @@ agx_compile_variant(struct agx_device *dev, struct pipe_context *pctx,
NIR_PASS(_, nir, agx_nir_lower_cull_distance_vs);
NIR_PASS(_, nir, agx_nir_lower_uvs, &uvs);
} else {
NIR_PASS(_, nir, agx_nir_lower_vs_before_gs, dev->libagx);
NIR_PASS(_, nir, agx_nir_lower_vs_before_gs);
/* Turn into a compute shader now that we're free of vertexisms */
nir->info.stage = MESA_SHADER_COMPUTE;
@ -1635,12 +1634,12 @@ agx_compile_variant(struct agx_device *dev, struct pipe_context *pctx,
outputs = nir->info.outputs_written;
}
} else if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
NIR_PASS_V(nir, agx_nir_lower_tcs, dev->libagx);
NIR_PASS_V(nir, agx_nir_lower_tcs);
} else if (nir->info.stage == MESA_SHADER_GEOMETRY) {
struct asahi_gs_shader_key *key = &key_->gs;
NIR_PASS(_, nir, agx_nir_lower_gs, dev->libagx, key->rasterizer_discard,
&gs_count, &gs_copy, &pre_gs, &gs_out_prim, &gs_out_count_words);
NIR_PASS(_, nir, agx_nir_lower_gs, key->rasterizer_discard, &gs_count,
&gs_copy, &pre_gs, &gs_out_prim, &gs_out_count_words);
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
struct asahi_fs_shader_key *key = &key_->fs;
@ -1899,7 +1898,7 @@ agx_shader_initialize(struct agx_device *dev, struct agx_uncompiled_shader *so,
NIR_PASS(_, nir, agx_nir_lower_texture);
NIR_PASS(_, nir, nir_lower_ssbo, NULL);
agx_preprocess_nir(nir, dev->libagx);
agx_preprocess_nir(nir);
if (nir->info.stage == MESA_SHADER_FRAGMENT &&
(nir->info.inputs_read & VARYING_BITS_TEX_ANY)) {
@ -2653,18 +2652,7 @@ agx_build_meta_shader_internal(struct agx_context *ctx,
struct agx_device *dev = agx_device(ctx->base.screen);
if (!prolog) {
/* We need to link libagx and assign shared before preprocessing, matching
* what the driver would otherwise produce.
*/
agx_link_libagx(b.shader, dev->libagx);
NIR_PASS(_, b.shader, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, glsl_get_cl_type_size_align);
NIR_PASS(_, b.shader, nir_lower_explicit_io, nir_var_mem_shared,
nir_address_format_62bit_generic);
agx_preprocess_nir(b.shader, NULL);
agx_preprocess_nir(b.shader);
NIR_PASS(_, b.shader, agx_nir_lower_texture);
NIR_PASS(_, b.shader, agx_nir_lower_multisampled_image_store);
}