radv: remove radv_use_llvm_for_stage()

This was useful few years ago to investigate ACO vs LLVM issues but
now it seems unnecessary.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/40375>
This commit is contained in:
Samuel Pitoiset 2026-03-12 17:55:42 +01:00 committed by Marge Bot
parent a7cc55c33b
commit f6b4acdf45
7 changed files with 10 additions and 20 deletions

View file

@ -272,16 +272,6 @@ vk_queue_to_radv(const struct radv_physical_device *pdev, int queue_family_index
return pdev->vk_queue_to_radv[queue_family_index];
}
/**
* Helper used for debugging compiler issues by enabling/disabling LLVM for a
* specific shader stage (developers only).
*/
static inline bool
radv_use_llvm_for_stage(const struct radv_physical_device *pdev, UNUSED mesa_shader_stage stage)
{
return pdev->use_llvm;
}
bool radv_host_image_copy_enabled(const struct radv_physical_device *pdev);
bool radv_enable_rt(const struct radv_physical_device *pdev);

View file

@ -250,7 +250,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat
{
const struct radv_physical_device *pdev = radv_device_physical(device);
enum amd_gfx_level gfx_level = pdev->info.gfx_level;
const bool use_llvm = radv_use_llvm_for_stage(pdev, stage->stage);
const bool use_llvm = pdev->use_llvm;
bool progress;
/* Wave and workgroup size should already be filled. */
@ -1019,7 +1019,7 @@ radv_GetPipelineExecutableInternalRepresentationsKHR(
/* backend IR */
if (p < end) {
p->isText = true;
if (radv_use_llvm_for_stage(pdev, stage)) {
if (pdev->use_llvm) {
VK_COPY_STR(p->name, "LLVM IR");
VK_COPY_STR(p->description, "The LLVM IR after some optimizations");
} else {

View file

@ -2404,7 +2404,7 @@ radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache
.hw_stage = AC_HW_VERTEX_SHADER,
.wave_size = 64,
.workgroup_size = 64,
.use_llvm = radv_use_llvm_for_stage(pdev, MESA_SHADER_VERTEX)});
.use_llvm = pdev->use_llvm});
NIR_PASS(_, nir, radv_nir_lower_abi, pdev->info.gfx_level, &gs_copy_stage, gfx_state, pdev->info.address32_hi);
NIR_PASS(_, nir, ac_nir_lower_global_access);

View file

@ -429,7 +429,7 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
.stack_alignment = 16,
.localized_loads = true,
.vectorizer_callback = ac_nir_mem_vectorize_callback,
.vectorizer_data = &(struct ac_nir_config){pdev->info.gfx_level, !radv_use_llvm_for_stage(pdev, stage->stage)},
.vectorizer_data = &(struct ac_nir_config){pdev->info.gfx_level, !pdev->use_llvm},
};
nir_lower_shader_calls(stage->nir, &opts, &resume_shaders, &num_resume_shaders, mem_ctx);
}

View file

@ -732,7 +732,7 @@ radv_shader_spirv_to_nir(struct radv_device *device, const struct radv_shader_st
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
bool gfx7minus = pdev->info.gfx_level <= GFX7;
bool use_llvm = radv_use_llvm_for_stage(pdev, nir->info.stage);
bool use_llvm = pdev->use_llvm;
NIR_PASS(_, nir, nir_lower_subgroups,
&(struct nir_lower_subgroups_options){
@ -3321,10 +3321,10 @@ shader_compile(struct radv_device *device, struct nir_shader *const *shaders, in
struct radv_shader_binary *binary = NULL;
#if AMD_LLVM_AVAILABLE
if (radv_use_llvm_for_stage(pdev, stage) || options->dump_shader || options->record_ir)
if (pdev->use_llvm || options->dump_shader || options->record_ir)
ac_init_llvm_once();
if (radv_use_llvm_for_stage(pdev, stage)) {
if (pdev->use_llvm) {
llvm_compile_shader(options, info, shader_count, shaders, &binary, args);
#else
if (false) {

View file

@ -315,8 +315,8 @@ radv_init_shader_args(const struct radv_device *device, mesa_shader_stage stage,
const struct radv_physical_device *pdev = radv_device_physical(device);
memset(args, 0, sizeof(*args));
args->explicit_scratch_args = !radv_use_llvm_for_stage(pdev, stage);
args->remap_spi_ps_input = !radv_use_llvm_for_stage(pdev, stage);
args->explicit_scratch_args = !pdev->use_llvm;
args->remap_spi_ps_input = !pdev->use_llvm;
for (int i = 0; i < MAX_SETS; i++)
args->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;

View file

@ -1423,7 +1423,7 @@ radv_link_shaders_info(struct radv_device *device, struct radv_shader_stage *sta
ac_compute_lshs_workgroup_size(pdev->info.gfx_level, MESA_SHADER_VERTEX, tcs_stage->info.num_tess_patches,
gfx_state->ts.patch_control_points, tcs_stage->info.tcs.tcs_vertices_out);
if (!radv_use_llvm_for_stage(pdev, MESA_SHADER_VERTEX)) {
if (!pdev->use_llvm) {
/* When the number of TCS input and output vertices are the same (typically 3):
* - There is an equal amount of LS and HS invocations
* - In case of merged LSHS shaders, the LS and HS halves of the shader always process