all: rename gl_shader_stage_uses_workgroup to mesa_shader_stage_uses_workgroup

Acked-by: Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Yonggang Luo <luoyonggang@gmail.com>
Acked-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36569>
This commit is contained in:
Qiang Yu 2025-08-05 16:50:43 +08:00
parent 4ff341f2fc
commit 4847e0b380
24 changed files with 38 additions and 38 deletions

View file

@ -2378,7 +2378,7 @@ bool
nir_shader_supports_implicit_lod(nir_shader *shader)
{
return (shader->info.stage == MESA_SHADER_FRAGMENT ||
(gl_shader_stage_uses_workgroup(shader->info.stage) &&
(mesa_shader_stage_uses_workgroup(shader->info.stage) &&
shader->info.derivative_group != DERIVATIVE_GROUP_NONE));
}

View file

@ -510,7 +510,7 @@ visit_intrinsic(nir_intrinsic_instr *instr, struct divergence_state *state)
case nir_intrinsic_load_workgroup_index:
case nir_intrinsic_load_workgroup_id:
assert(gl_shader_stage_uses_workgroup(stage) || stage == MESA_SHADER_TESS_CTRL);
assert(mesa_shader_stage_uses_workgroup(stage) || stage == MESA_SHADER_TESS_CTRL);
if (stage == MESA_SHADER_COMPUTE)
is_divergent |= (options & nir_divergence_multiple_workgroup_per_compute_subgroup);
break;

View file

@ -803,7 +803,7 @@ bool
nir_lower_compute_system_values(nir_shader *shader,
const nir_lower_compute_system_values_options *options)
{
if (!gl_shader_stage_uses_workgroup(shader->info.stage))
if (!mesa_shader_stage_uses_workgroup(shader->info.stage))
return false;
struct lower_sysval_state state;

View file

@ -173,7 +173,7 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
}
}
if (gl_shader_stage_uses_workgroup(shader->info.stage)) {
if (mesa_shader_stage_uses_workgroup(shader->info.stage)) {
unsigned dims_needed = 0;
for (unsigned i = 0; i < 3; i++)
dims_needed |= (shader->info.workgroup_size_variable ||
@ -324,7 +324,7 @@ nir_opt_uniform_atomics(nir_shader *shader, bool fs_atomics_predicated)
/* A 1x1x1 workgroup only ever has one active lane, so there's no point in
* optimizing any atomics.
*/
if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
if (mesa_shader_stage_uses_workgroup(shader->info.stage) &&
!shader->info.workgroup_size_variable &&
shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
shader->info.workgroup_size[2] == 1)

View file

@ -2653,7 +2653,7 @@ print_shader_info(const struct shader_info *info, FILE *fp)
print_nz_bool(fp, "internal", info->internal);
if (gl_shader_stage_uses_workgroup(info->stage)) {
if (mesa_shader_stage_uses_workgroup(info->stage)) {
fprintf(fp, "workgroup_size: %u, %u, %u%s\n",
info->workgroup_size[0],
info->workgroup_size[1],

View file

@ -1664,7 +1664,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
* They can safely use the same code path here as variable sized
* compute-like shader stages.
*/
if (!gl_shader_stage_uses_workgroup(shader->info.stage) ||
if (!mesa_shader_stage_uses_workgroup(shader->info.stage) ||
shader->info.workgroup_size_variable) {
*result = config->max_workgroup_invocations - 1;
} else {
@ -1725,7 +1725,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
case nir_intrinsic_load_subgroup_id:
case nir_intrinsic_load_num_subgroups: {
uint32_t workgroup_size = config->max_workgroup_invocations;
if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
if (mesa_shader_stage_uses_workgroup(shader->info.stage) &&
!shader->info.workgroup_size_variable) {
workgroup_size = shader->info.workgroup_size[0] *
shader->info.workgroup_size[1] *

View file

@ -97,7 +97,7 @@ mesa_shader_stage_is_mesh(mesa_shader_stage stage)
}
static inline bool
gl_shader_stage_uses_workgroup(mesa_shader_stage stage)
mesa_shader_stage_uses_workgroup(mesa_shader_stage stage)
{
return stage == MESA_SHADER_COMPUTE ||
stage == MESA_SHADER_KERNEL ||

View file

@ -2914,7 +2914,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
}
/* Now that we have the value, update the workgroup size if needed */
if (gl_shader_stage_uses_workgroup(b->entry_point_stage))
if (mesa_shader_stage_uses_workgroup(b->entry_point_stage))
vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb,
NULL);
}
@ -5416,7 +5416,7 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
break;
case SpvExecutionModeLocalSize:
if (gl_shader_stage_uses_workgroup(b->shader->info.stage)) {
if (mesa_shader_stage_uses_workgroup(b->shader->info.stage)) {
b->shader->info.workgroup_size[0] = mode->operands[0];
b->shader->info.workgroup_size[1] = mode->operands[1];
b->shader->info.workgroup_size[2] = mode->operands[2];
@ -5559,12 +5559,12 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
break;
case SpvExecutionModeDerivativeGroupQuadsKHR:
vtn_assert(gl_shader_stage_uses_workgroup(b->shader->info.stage));
vtn_assert(mesa_shader_stage_uses_workgroup(b->shader->info.stage));
b->shader->info.derivative_group = DERIVATIVE_GROUP_QUADS;
break;
case SpvExecutionModeDerivativeGroupLinearKHR:
vtn_assert(gl_shader_stage_uses_workgroup(b->shader->info.stage));
vtn_assert(mesa_shader_stage_uses_workgroup(b->shader->info.stage));
b->shader->info.derivative_group = DERIVATIVE_GROUP_LINEAR;
break;
@ -5750,7 +5750,7 @@ vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_poin
switch (mode->exec_mode) {
case SpvExecutionModeLocalSizeId:
if (gl_shader_stage_uses_workgroup(b->shader->info.stage)) {
if (mesa_shader_stage_uses_workgroup(b->shader->info.stage)) {
b->shader->info.workgroup_size[0] = vtn_constant_uint(b, mode->operands[0]);
b->shader->info.workgroup_size[1] = vtn_constant_uint(b, mode->operands[1]);
b->shader->info.workgroup_size[2] = vtn_constant_uint(b, mode->operands[2]);
@ -7292,7 +7292,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
vtn_handle_execution_mode_id, NULL);
if (b->workgroup_size_builtin) {
vtn_assert(gl_shader_stage_uses_workgroup(stage));
vtn_assert(mesa_shader_stage_uses_workgroup(stage));
vtn_assert(b->workgroup_size_builtin->type->type ==
glsl_vector_type(GLSL_TYPE_UINT, 3));

View file

@ -1355,7 +1355,7 @@ DEFINE_PROG_DATA_DOWNCAST(tcs, prog_data->stage == MESA_SHADER_TESS_CTRL)
DEFINE_PROG_DATA_DOWNCAST(tes, prog_data->stage == MESA_SHADER_TESS_EVAL)
DEFINE_PROG_DATA_DOWNCAST(gs, prog_data->stage == MESA_SHADER_GEOMETRY)
DEFINE_PROG_DATA_DOWNCAST(wm, prog_data->stage == MESA_SHADER_FRAGMENT)
DEFINE_PROG_DATA_DOWNCAST(cs, gl_shader_stage_uses_workgroup(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(cs, mesa_shader_stage_uses_workgroup(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(bs, brw_shader_stage_is_bindless(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(vue, prog_data->stage == MESA_SHADER_VERTEX ||

View file

@ -3032,7 +3032,7 @@ emit_barrier(nir_to_brw_state &ntb)
brw_shader &s = ntb.s;
/* We are getting the barrier ID from the compute shader header */
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
/* Zero-initialize the payload */
brw_reg payload = hbld.MOV(brw_imm_ud(0u));
@ -4674,7 +4674,7 @@ set_memory_address(nir_to_brw_state &ntb,
static unsigned
brw_workgroup_size(brw_shader &s)
{
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
assert(!s.nir->info.workgroup_size_variable);
const struct brw_cs_prog_data *cs = brw_cs_prog_data(s.prog_data);
return cs->local_size[0] * cs->local_size[1] * cs->local_size[2];
@ -4688,7 +4688,7 @@ brw_from_nir_emit_cs_intrinsic(nir_to_brw_state &ntb,
const brw_builder &bld = ntb.bld;
brw_shader &s = ntb.s;
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(s.prog_data);
brw_reg dest;
@ -6119,7 +6119,7 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb,
break;
if (s.nir->info.shared_size > 0) {
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
} else {
slm_fence = false;
}

View file

@ -326,7 +326,7 @@ brw_nir_lower_cs_intrinsics(nir_shader *nir,
const struct intel_device_info *devinfo,
struct brw_cs_prog_data *prog_data)
{
assert(gl_shader_stage_uses_workgroup(nir->info.stage));
assert(mesa_shader_stage_uses_workgroup(nir->info.stage));
struct lower_intrinsics_state state = {
.nir = nir,

View file

@ -180,7 +180,7 @@ public:
DEFINE_PAYLOAD_ACCESSOR(brw_gs_thread_payload, gs_payload, stage == MESA_SHADER_GEOMETRY);
DEFINE_PAYLOAD_ACCESSOR(brw_fs_thread_payload, fs_payload, stage == MESA_SHADER_FRAGMENT);
DEFINE_PAYLOAD_ACCESSOR(brw_cs_thread_payload, cs_payload,
gl_shader_stage_uses_workgroup(stage));
mesa_shader_stage_uses_workgroup(stage));
DEFINE_PAYLOAD_ACCESSOR(brw_task_mesh_thread_payload, task_mesh_payload,
stage == MESA_SHADER_TASK || stage == MESA_SHADER_MESH);
DEFINE_PAYLOAD_ACCESSOR(brw_bs_thread_payload, bs_payload,

View file

@ -1405,7 +1405,7 @@ DEFINE_PROG_DATA_DOWNCAST(tcs, prog_data->stage == MESA_SHADER_TESS_CTRL)
DEFINE_PROG_DATA_DOWNCAST(tes, prog_data->stage == MESA_SHADER_TESS_EVAL)
DEFINE_PROG_DATA_DOWNCAST(gs, prog_data->stage == MESA_SHADER_GEOMETRY)
DEFINE_PROG_DATA_DOWNCAST(wm, prog_data->stage == MESA_SHADER_FRAGMENT)
DEFINE_PROG_DATA_DOWNCAST(cs, gl_shader_stage_uses_workgroup(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(cs, mesa_shader_stage_uses_workgroup(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(vue, prog_data->stage == MESA_SHADER_VERTEX ||
prog_data->stage == MESA_SHADER_TESS_CTRL ||

View file

@ -7204,7 +7204,7 @@ elk_fs_test_dispatch_packing(const fs_builder &bld)
unsigned
elk_fs_visitor::workgroup_size() const
{
assert(gl_shader_stage_uses_workgroup(stage));
assert(mesa_shader_stage_uses_workgroup(stage));
const struct elk_cs_prog_data *cs = elk_cs_prog_data(prog_data);
return cs->local_size[0] * cs->local_size[1] * cs->local_size[2];
}

View file

@ -382,7 +382,7 @@ public:
};
elk_cs_thread_payload &cs_payload() {
assert(gl_shader_stage_uses_workgroup(stage));
assert(mesa_shader_stage_uses_workgroup(stage));
return *static_cast<elk_cs_thread_payload *>(this->payload_);
}

View file

@ -2663,7 +2663,7 @@ emit_barrier(nir_to_elk_state &ntb)
elk_fs_visitor &s = ntb.s;
/* We are getting the barrier ID from the compute shader header */
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
elk_fs_reg payload = elk_fs_reg(VGRF, s.alloc.allocate(1), ELK_REGISTER_TYPE_UD);
@ -3973,7 +3973,7 @@ fs_nir_emit_cs_intrinsic(nir_to_elk_state &ntb,
const fs_builder &bld = ntb.bld;
elk_fs_visitor &s = ntb.s;
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
struct elk_cs_prog_data *cs_prog_data = elk_cs_prog_data(s.prog_data);
elk_fs_reg dest;
@ -4821,7 +4821,7 @@ fs_nir_emit_intrinsic(nir_to_elk_state &ntb,
break;
if (s.nir->info.shared_size > 0) {
assert(gl_shader_stage_uses_workgroup(s.stage));
assert(mesa_shader_stage_uses_workgroup(s.stage));
} else {
slm_fence = false;
}

View file

@ -1714,7 +1714,7 @@ get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
case SUBGROUP_SIZE_REQUIRE_8:
case SUBGROUP_SIZE_REQUIRE_16:
case SUBGROUP_SIZE_REQUIRE_32:
assert(gl_shader_stage_uses_workgroup(info->stage) ||
assert(mesa_shader_stage_uses_workgroup(info->stage) ||
(info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE));
/* These enum values are expressly chosen to be equal to the subgroup
* size that they require.

View file

@ -297,7 +297,7 @@ elk_nir_lower_cs_intrinsics(nir_shader *nir,
const struct intel_device_info *devinfo,
struct elk_cs_prog_data *prog_data)
{
assert(gl_shader_stage_uses_workgroup(nir->info.stage));
assert(mesa_shader_stage_uses_workgroup(nir->info.stage));
struct lower_intrinsics_state state = {
.nir = nir,

View file

@ -31,7 +31,7 @@ unsigned
elk_required_dispatch_width(const struct shader_info *info)
{
if ((int)info->subgroup_size >= (int)SUBGROUP_SIZE_REQUIRE_8) {
assert(gl_shader_stage_uses_workgroup(info->stage));
assert(mesa_shader_stage_uses_workgroup(info->stage));
/* These enum values are expressly chosen to be equal to the subgroup
* size that they require.
*/

View file

@ -1136,7 +1136,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
NIR_PASS(_, nir, anv_nir_lower_resource_intel, pdevice,
stage->bind_map.layout_type);
if (gl_shader_stage_uses_workgroup(nir->info.stage)) {
if (mesa_shader_stage_uses_workgroup(nir->info.stage)) {
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, shared_type_info);
@ -4489,7 +4489,7 @@ VkResult anv_GetPipelineExecutableStatisticsKHR(
"Number of bytes of workgroup shared memory used by this "
"shader including any padding.");
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
if (gl_shader_stage_uses_workgroup(exe->stage))
if (mesa_shader_stage_uses_workgroup(exe->stage))
stat->value.u64 = prog_data->total_shared;
else
stat->value.u64 = 0;

View file

@ -554,7 +554,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
pdevice, stage->key.base.robust_flags,
prog_data, &stage->bind_map, mem_ctx);
if (gl_shader_stage_uses_workgroup(nir->info.stage)) {
if (mesa_shader_stage_uses_workgroup(nir->info.stage)) {
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, shared_type_info);
@ -2045,7 +2045,7 @@ VkResult anv_GetPipelineExecutableStatisticsKHR(
stat->value.u64 = prog_data->total_scratch;
}
if (gl_shader_stage_uses_workgroup(exe->stage)) {
if (mesa_shader_stage_uses_workgroup(exe->stage)) {
vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
VK_COPY_STR(stat->name, "Workgroup Memory Size");
VK_COPY_STR(stat->description,

View file

@ -1054,7 +1054,7 @@ nak_postprocess_nir(nir_shader *nir,
* relies on the workgroup size being the actual HW workgroup size in
* nir_intrinsic_load_subgroup_id.
*/
if (gl_shader_stage_uses_workgroup(nir->info.stage) &&
if (mesa_shader_stage_uses_workgroup(nir->info.stage) &&
nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
assert(nir->info.workgroup_size[0] % 2 == 0);
assert(nir->info.workgroup_size[1] % 2 == 0);

View file

@ -734,7 +734,7 @@ mir_is_64(const midgard_instruction *ins)
static bool
needs_contiguous_workgroup(const compiler_context *ctx)
{
return gl_shader_stage_uses_workgroup(ctx->stage);
return mesa_shader_stage_uses_workgroup(ctx->stage);
}
/*

View file

@ -854,7 +854,7 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
nir_metadata_control_flow, NULL);
#endif
if (gl_shader_stage_uses_workgroup(stage)) {
if (mesa_shader_stage_uses_workgroup(stage)) {
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared,
shared_type_info);