all: rename gl_shader_stage_is_compute to mesa_shader_stage_is_compute

Acked-by: Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Yonggang Luo <luoyonggang@gmail.com>
Acked-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36569>
This commit is contained in:
Qiang Yu 2025-08-05 16:43:06 +08:00
parent e2b747a85b
commit 7a91473192
27 changed files with 43 additions and 43 deletions

View file

@ -1486,7 +1486,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te
break;
case nir_texop_tex:
if (ctx->stage != MESA_SHADER_FRAGMENT &&
(!gl_shader_stage_is_compute(ctx->stage) ||
(!mesa_shader_stage_is_compute(ctx->stage) ||
ctx->info->derivative_group == DERIVATIVE_GROUP_NONE)) {
assert(!args->lod);
args->level_zero = true;
@ -1522,7 +1522,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te
args->attributes = AC_ATTR_INVARIANT_LOAD;
bool cs_derivs =
gl_shader_stage_is_compute(ctx->stage) && ctx->info->derivative_group != DERIVATIVE_GROUP_NONE;
mesa_shader_stage_is_compute(ctx->stage) && ctx->info->derivative_group != DERIVATIVE_GROUP_NONE;
if (ctx->stage == MESA_SHADER_FRAGMENT || cs_derivs) {
/* Prevent texture instructions with implicit derivatives from being
* sinked into branches. */
@ -2766,7 +2766,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
}
break;
case nir_intrinsic_load_subgroup_id:
assert(gl_shader_stage_is_compute(ctx->stage) && ctx->ac.gfx_level >= GFX12);
assert(mesa_shader_stage_is_compute(ctx->stage) && ctx->ac.gfx_level >= GFX12);
result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wave.id", ctx->ac.i32, NULL, 0, 0);
break;
case nir_intrinsic_first_invocation:

View file

@ -1545,7 +1545,7 @@ agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
/* Nothing to do for subgroup barriers */
if (nir_intrinsic_execution_scope(instr) >= SCOPE_WORKGROUP) {
assert(gl_shader_stage_is_compute(b->shader->nir->info.stage));
assert(mesa_shader_stage_is_compute(b->shader->nir->info.stage));
agx_threadgroup_barrier(b);
}

View file

@ -1342,7 +1342,7 @@ agx_ra(agx_context *ctx)
/* Compute shaders need to have their entire workgroup together, so our
* register usage is bounded by the workgroup size.
*/
if (gl_shader_stage_is_compute(ctx->stage)) {
if (mesa_shader_stage_is_compute(ctx->stage)) {
unsigned threads_per_workgroup;
/* If we don't know the workgroup size, worst case it. TODO: Optimize

View file

@ -1054,7 +1054,7 @@ type_size_vec4(const struct glsl_type *type, bool bindless)
void
nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs)
{
if (gl_shader_stage_is_compute(nir->info.stage))
if (mesa_shader_stage_is_compute(nir->info.stage))
return;
bool lower_indirect_inputs =

View file

@ -84,7 +84,7 @@ typedef enum mesa_shader_stage {
} mesa_shader_stage;
static inline bool
gl_shader_stage_is_compute(mesa_shader_stage stage)
mesa_shader_stage_is_compute(mesa_shader_stage stage)
{
return stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL;
}

View file

@ -5597,7 +5597,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
ir = so->ir = ctx->ir;
if (gl_shader_stage_is_compute(so->type)) {
if (mesa_shader_stage_is_compute(so->type)) {
so->local_size[0] = ctx->s->info.workgroup_size[0];
so->local_size[1] = ctx->s->info.workgroup_size[1];
so->local_size[2] = ctx->s->info.workgroup_size[2];
@ -6042,7 +6042,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler,
!so->writes_stencilref;
}
if (gl_shader_stage_is_compute(so->type)) {
if (mesa_shader_stage_is_compute(so->type)) {
so->cs.local_invocation_id =
ir3_find_sysval_regid(so, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
so->cs.work_group_id =

View file

@ -2672,7 +2672,7 @@ ir3_ra(struct ir3_shader_variant *v)
limit_pressure.shared = RA_SHARED_SIZE;
limit_pressure.shared_half = RA_SHARED_HALF_SIZE;
if (gl_shader_stage_is_compute(v->type) && v->has_barrier) {
if (mesa_shader_stage_is_compute(v->type) && v->has_barrier) {
calc_limit_pressure_for_cs_with_barrier(v, &limit_pressure);
}

View file

@ -2434,7 +2434,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr)
static void
ntt_emit_barrier(struct ntt_compile *c, nir_intrinsic_instr *intr)
{
bool compute = gl_shader_stage_is_compute(c->s->info.stage);
bool compute = mesa_shader_stage_is_compute(c->s->info.stage);
if (nir_intrinsic_memory_scope(intr) != SCOPE_NONE) {
nir_variable_mode modes = nir_intrinsic_memory_modes(intr);

View file

@ -320,7 +320,7 @@ void nir_tgsi_scan_shader(const struct nir_shader *nir,
}
}
if (gl_shader_stage_is_compute(nir->info.stage) ||
if (mesa_shader_stage_is_compute(nir->info.stage) ||
gl_shader_stage_is_mesh(nir->info.stage)) {
info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.workgroup_size[0];
info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.workgroup_size[1];

View file

@ -670,7 +670,7 @@ ir3_emit_cs_consts(const struct ir3_shader_variant *v,
struct fd_ringbuffer *ring, struct fd_context *ctx,
const struct pipe_grid_info *info) assert_dt
{
assert(gl_shader_stage_is_compute(v->type));
assert(mesa_shader_stage_is_compute(v->type));
emit_common_consts(v, ring, ctx, MESA_SHADER_COMPUTE);

View file

@ -136,7 +136,7 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
* Compute CSOs call this function during create time, so preprocessing
* happens at CSO create time regardless.
*/
if (gl_shader_stage_is_compute(s->info.stage))
if (mesa_shader_stage_is_compute(s->info.stage))
pan_shader_preprocess(s, panfrost_device_gpu_id(dev));
struct pan_compile_inputs inputs = {

View file

@ -1295,7 +1295,7 @@ static void si_assign_param_offsets(nir_shader *nir, struct si_shader *shader,
bool si_should_clear_lds(struct si_screen *sscreen, const struct nir_shader *shader)
{
return gl_shader_stage_is_compute(shader->info.stage) &&
return mesa_shader_stage_is_compute(shader->info.stage) &&
shader->info.shared_size > 0 && sscreen->options.clear_lds;
}
@ -2019,7 +2019,7 @@ bool si_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compi
}
/* Validate SGPR and VGPR usage for compute to detect compiler bugs. */
if (gl_shader_stage_is_compute(nir->info.stage)) {
if (mesa_shader_stage_is_compute(nir->info.stage)) {
unsigned max_vgprs =
sscreen->info.num_physical_wave64_vgprs_per_simd * (shader->wave_size == 32 ? 2 : 1);
unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd;

View file

@ -337,7 +337,7 @@ static void si_lower_nir(struct si_screen *sscreen, struct nir_shader *nir)
NIR_PASS(_, nir, nir_lower_gs_intrinsics, flags);
}
if (gl_shader_stage_is_compute(nir->info.stage)) {
if (mesa_shader_stage_is_compute(nir->info.stage)) {
nir_lower_compute_system_values_options options = {0};
/* gl_LocalInvocationIndex must be derived from gl_LocalInvocationID.xyz to make it correct

View file

@ -675,7 +675,7 @@ create_shared_block(struct ntv_context *ctx, unsigned bit_size)
SpvId type = spirv_builder_type_uint(&ctx->builder, bit_size);
SpvId array;
assert(gl_shader_stage_is_compute(ctx->nir->info.stage));
assert(mesa_shader_stage_is_compute(ctx->nir->info.stage));
if (ctx->nir->info.cs.has_variable_shared_mem) {
assert(ctx->shared_mem_size);
SpvId const_shared_size = emit_uint_const(ctx, 32, ctx->nir->info.shared_size);
@ -4715,7 +4715,7 @@ nir_to_spirv(struct nir_shader *s, const struct zink_shader_info *sinfo, const s
spirv_builder_emit_source(&ctx.builder, SpvSourceLanguageUnknown, 0);
SpvAddressingModel model = SpvAddressingModelLogical;
if (gl_shader_stage_is_compute(s->info.stage)) {
if (mesa_shader_stage_is_compute(s->info.stage)) {
if (s->info.cs.ptr_size == 32)
model = SpvAddressingModelPhysical32;
else if (s->info.cs.ptr_size == 64) {

View file

@ -3598,7 +3598,7 @@ static bool
lower_zs_swizzle_tex(nir_shader *nir, const void *swizzle, bool shadow_only)
{
/* We don't use nir_lower_tex to do our swizzling, because of this base_sampler_id. */
unsigned base_sampler_id = gl_shader_stage_is_compute(nir->info.stage) ? 0 : PIPE_MAX_SAMPLERS * nir->info.stage;
unsigned base_sampler_id = mesa_shader_stage_is_compute(nir->info.stage) ? 0 : PIPE_MAX_SAMPLERS * nir->info.stage;
struct lower_zs_swizzle_state state = {shadow_only, base_sampler_id, swizzle};
return nir_shader_instructions_pass(nir, lower_zs_swizzle_tex_instr,
nir_metadata_control_flow,
@ -4520,7 +4520,7 @@ zink_binding(mesa_shader_stage stage, VkDescriptorType type, int index, bool com
} else {
unsigned base = stage;
/* clamp compute bindings for better driver efficiency */
if (gl_shader_stage_is_compute(stage))
if (mesa_shader_stage_is_compute(stage))
base = 0;
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
@ -6223,7 +6223,7 @@ zink_shader_init(struct zink_screen *screen, struct zink_shader *zs)
if (nir->info.stage == MESA_SHADER_FRAGMENT)
zs->flat_flags = zink_flat_flags(nir);
if (!gl_shader_stage_is_compute(nir->info.stage) && nir->info.separate_shader)
if (!mesa_shader_stage_is_compute(nir->info.stage) && nir->info.separate_shader)
NIR_PASS(_, nir, fixup_io_locations);
NIR_PASS(_, nir, lower_basevertex);

View file

@ -61,7 +61,7 @@ cs_fill_push_const_info(const struct intel_device_info *devinfo,
static bool
run_cs(brw_shader &s, bool allow_spilling)
{
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
s.payload_ = new brw_cs_thread_payload(s);

View file

@ -153,7 +153,7 @@ brw_from_nir_setup_uniforms(brw_shader &s)
s.uniforms = s.nir->num_uniforms / 4;
if (gl_shader_stage_is_compute(s.stage) && devinfo->verx10 < 125) {
if (mesa_shader_stage_is_compute(s.stage) && devinfo->verx10 < 125) {
/* Add uniforms for builtins after regular NIR uniforms. */
assert(s.uniforms == s.prog_data->nr_params);
@ -173,7 +173,7 @@ emit_work_group_id_setup(nir_to_brw_state &ntb)
brw_shader &s = ntb.s;
const brw_builder &bld = ntb.bld.scalar_group();
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
brw_reg id = bld.vgrf(BRW_TYPE_UD, 3);
@ -257,7 +257,7 @@ emit_system_values_block(nir_to_brw_state &ntb, nir_block *block)
case nir_intrinsic_load_workgroup_id:
if (gl_shader_stage_is_mesh(s.stage))
UNREACHABLE("should be lowered by nir_lower_compute_system_values().");
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
reg = &ntb.system_values[SYSTEM_VALUE_WORKGROUP_ID];
if (reg->file == BAD_FILE)
*reg = emit_work_group_id_setup(ntb);
@ -3040,7 +3040,7 @@ emit_barrier(nir_to_brw_state &ntb)
if (devinfo->verx10 >= 125) {
setup_barrier_message_payload_gfx125(bld, payload);
} else {
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
brw_reg barrier_id_mask =
brw_imm_ud(devinfo->ver == 9 ? 0x8f000000u : 0x7f000000u);

View file

@ -659,7 +659,7 @@ brw_shader::assign_curb_setup()
uint64_t used = 0;
const bool pull_constants =
devinfo->verx10 >= 125 &&
(gl_shader_stage_is_compute(stage) ||
(mesa_shader_stage_is_compute(stage) ||
gl_shader_stage_is_mesh(stage)) &&
uniform_push_length;
@ -667,7 +667,7 @@ brw_shader::assign_curb_setup()
const bool pull_constants_a64 =
(gl_shader_stage_is_rt(stage) &&
brw_bs_prog_data(prog_data)->uses_inline_push_addr) ||
((gl_shader_stage_is_compute(stage) ||
((mesa_shader_stage_is_compute(stage) ||
gl_shader_stage_is_mesh(stage)) &&
brw_cs_prog_data(prog_data)->uses_inline_push_addr);
assert(devinfo->has_lsc);

View file

@ -404,7 +404,7 @@ brw_cs_thread_payload::load_subgroup_id(const brw_builder &bld,
bld.AND(dest, subgroup_id_, brw_imm_ud(INTEL_MASK(7, 0)));
} else {
assert(devinfo->verx10 < 125);
assert(gl_shader_stage_is_compute(bld.shader->stage));
assert(mesa_shader_stage_is_compute(bld.shader->stage));
int index = brw_get_subgroup_id_param_index(devinfo,
bld.shader->prog_data);
bld.MOV(dest, brw_uniform_reg(index, BRW_TYPE_UD));

View file

@ -5990,7 +5990,7 @@ elk_fs_visitor::allocate_registers(bool allow_spilling)
prog_data->total_scratch = MAX2(elk_get_scratch_size(last_scratch),
prog_data->total_scratch);
if (gl_shader_stage_is_compute(stage)) {
if (mesa_shader_stage_is_compute(stage)) {
if (devinfo->platform == INTEL_PLATFORM_HSW) {
/* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space"
* field documentation, Haswell supports a minimum of 2kB of
@ -6328,7 +6328,7 @@ elk_fs_visitor::run_fs(bool allow_spilling, bool do_rep_send)
bool
elk_fs_visitor::run_cs(bool allow_spilling)
{
assert(gl_shader_stage_is_compute(stage));
assert(mesa_shader_stage_is_compute(stage));
assert(devinfo->ver >= 7);
const fs_builder bld = fs_builder(this).at_end();

View file

@ -143,7 +143,7 @@ fs_nir_setup_uniforms(elk_fs_visitor &s)
s.uniforms = s.nir->num_uniforms / 4;
if (gl_shader_stage_is_compute(s.stage)) {
if (mesa_shader_stage_is_compute(s.stage)) {
/* Add uniforms for builtins after regular NIR uniforms. */
assert(s.uniforms == s.prog_data->nr_params);
@ -163,7 +163,7 @@ emit_work_group_id_setup(nir_to_elk_state &ntb)
elk_fs_visitor &s = ntb.s;
const fs_builder &bld = ntb.bld;
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
elk_fs_reg id = bld.vgrf(ELK_REGISTER_TYPE_UD, 3);
@ -240,7 +240,7 @@ emit_system_values_block(nir_to_elk_state &ntb, nir_block *block)
break;
case nir_intrinsic_load_workgroup_id:
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
reg = &ntb.system_values[SYSTEM_VALUE_WORKGROUP_ID];
if (reg->file == BAD_FILE)
*reg = emit_work_group_id_setup(ntb);
@ -2670,7 +2670,7 @@ emit_barrier(nir_to_elk_state &ntb)
/* Clear the message payload */
bld.exec_all().group(8, 0).MOV(payload, elk_imm_ud(0u));
assert(gl_shader_stage_is_compute(s.stage));
assert(mesa_shader_stage_is_compute(s.stage));
uint32_t barrier_id_mask;
switch (devinfo->ver) {

View file

@ -397,7 +397,7 @@ elk_cs_thread_payload::load_subgroup_id(const fs_builder &bld,
auto devinfo = bld.shader->devinfo;
dest = retype(dest, ELK_REGISTER_TYPE_UD);
assert(gl_shader_stage_is_compute(bld.shader->stage));
assert(mesa_shader_stage_is_compute(bld.shader->stage));
int index = elk_get_subgroup_id_param_index(devinfo,
bld.shader->stage_prog_data);
bld.MOV(dest, elk_fs_reg(UNIFORM, index, ELK_REGISTER_TYPE_UD));

View file

@ -305,7 +305,7 @@ elk_nir_lower_cs_intrinsics(nir_shader *nir,
};
/* Constraints from NV_compute_shader_derivatives. */
if (gl_shader_stage_is_compute(nir->info.stage) &&
if (mesa_shader_stage_is_compute(nir->info.stage) &&
!nir->info.workgroup_size_variable) {
if (nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
assert(nir->info.workgroup_size[0] % 2 == 0);

View file

@ -1159,7 +1159,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
}
}
if (gl_shader_stage_is_compute(nir->info.stage) ||
if (mesa_shader_stage_is_compute(nir->info.stage) ||
gl_shader_stage_is_mesh(nir->info.stage)) {
NIR_PASS(_, nir, brw_nir_lower_cs_intrinsics, compiler->devinfo,
&stage->prog_data.cs);

View file

@ -577,7 +577,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
}
}
if (gl_shader_stage_is_compute(nir->info.stage)) {
if (mesa_shader_stage_is_compute(nir->info.stage)) {
NIR_PASS(_, nir, elk_nir_lower_cs_intrinsics, compiler->devinfo,
&stage->prog_data.cs);
}

View file

@ -2105,7 +2105,7 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr)
break;
case SCOPE_WORKGROUP:
assert(gl_shader_stage_is_compute(b->shader->stage));
assert(mesa_shader_stage_is_compute(b->shader->stage));
bi_barrier(b);
/*
* Blob doesn't seem to do anything for memory barriers, so no need to
@ -6464,7 +6464,7 @@ bifrost_compile_shader_nir(nir_shader *nir,
bi_compile_variant(nir, inputs, binary, info, BI_IDVS_NONE);
}
if (gl_shader_stage_is_compute(nir->info.stage)) {
if (mesa_shader_stage_is_compute(nir->info.stage)) {
/* Workgroups may be merged if the structure of the workgroup is
* not software visible. This is true if neither shared memory
* nor barriers are used. The hardware may be able to optimize

View file

@ -413,7 +413,7 @@ midgard_preprocess_nir(nir_shader *nir, unsigned gpu_id)
/* Could be eventually useful for Vulkan, but we don't expect it to have
* the support, so limit it to compute */
if (gl_shader_stage_is_compute(nir->info.stage)) {
if (mesa_shader_stage_is_compute(nir->info.stage)) {
nir_lower_mem_access_bit_sizes_options mem_size_options = {
.modes = nir_var_mem_ubo | nir_var_mem_ssbo |
nir_var_mem_constant | nir_var_mem_task_payload |