diff --git a/src/compiler/nir/nir.c b/src/compiler/nir/nir.c index ab9429d5dd9..0135f73b533 100644 --- a/src/compiler/nir/nir.c +++ b/src/compiler/nir/nir.c @@ -2378,7 +2378,7 @@ bool nir_shader_supports_implicit_lod(nir_shader *shader) { return (shader->info.stage == MESA_SHADER_FRAGMENT || - (gl_shader_stage_uses_workgroup(shader->info.stage) && + (mesa_shader_stage_uses_workgroup(shader->info.stage) && shader->info.derivative_group != DERIVATIVE_GROUP_NONE)); } diff --git a/src/compiler/nir/nir_divergence_analysis.c b/src/compiler/nir/nir_divergence_analysis.c index e997e7845fd..640073c1823 100644 --- a/src/compiler/nir/nir_divergence_analysis.c +++ b/src/compiler/nir/nir_divergence_analysis.c @@ -510,7 +510,7 @@ visit_intrinsic(nir_intrinsic_instr *instr, struct divergence_state *state) case nir_intrinsic_load_workgroup_index: case nir_intrinsic_load_workgroup_id: - assert(gl_shader_stage_uses_workgroup(stage) || stage == MESA_SHADER_TESS_CTRL); + assert(mesa_shader_stage_uses_workgroup(stage) || stage == MESA_SHADER_TESS_CTRL); if (stage == MESA_SHADER_COMPUTE) is_divergent |= (options & nir_divergence_multiple_workgroup_per_compute_subgroup); break; diff --git a/src/compiler/nir/nir_lower_system_values.c b/src/compiler/nir/nir_lower_system_values.c index bd7160b764e..61d5282db80 100644 --- a/src/compiler/nir/nir_lower_system_values.c +++ b/src/compiler/nir/nir_lower_system_values.c @@ -803,7 +803,7 @@ bool nir_lower_compute_system_values(nir_shader *shader, const nir_lower_compute_system_values_options *options) { - if (!gl_shader_stage_uses_workgroup(shader->info.stage)) + if (!mesa_shader_stage_uses_workgroup(shader->info.stage)) return false; struct lower_sysval_state state; diff --git a/src/compiler/nir/nir_opt_uniform_atomics.c b/src/compiler/nir/nir_opt_uniform_atomics.c index 3cf385f5619..a4b629c9dd0 100644 --- a/src/compiler/nir/nir_opt_uniform_atomics.c +++ b/src/compiler/nir/nir_opt_uniform_atomics.c @@ -173,7 +173,7 @@ is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr) } } - if (gl_shader_stage_uses_workgroup(shader->info.stage)) { + if (mesa_shader_stage_uses_workgroup(shader->info.stage)) { unsigned dims_needed = 0; for (unsigned i = 0; i < 3; i++) dims_needed |= (shader->info.workgroup_size_variable || @@ -324,7 +324,7 @@ nir_opt_uniform_atomics(nir_shader *shader, bool fs_atomics_predicated) /* A 1x1x1 workgroup only ever has one active lane, so there's no point in * optimizing any atomics. */ - if (gl_shader_stage_uses_workgroup(shader->info.stage) && + if (mesa_shader_stage_uses_workgroup(shader->info.stage) && !shader->info.workgroup_size_variable && shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 && shader->info.workgroup_size[2] == 1) diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c index a93eaab8a9b..9dbce03bc89 100644 --- a/src/compiler/nir/nir_print.c +++ b/src/compiler/nir/nir_print.c @@ -2653,7 +2653,7 @@ print_shader_info(const struct shader_info *info, FILE *fp) print_nz_bool(fp, "internal", info->internal); - if (gl_shader_stage_uses_workgroup(info->stage)) { + if (mesa_shader_stage_uses_workgroup(info->stage)) { fprintf(fp, "workgroup_size: %u, %u, %u%s\n", info->workgroup_size[0], info->workgroup_size[1], diff --git a/src/compiler/nir/nir_range_analysis.c b/src/compiler/nir/nir_range_analysis.c index d13eeb18b64..b1ea5fac9f1 100644 --- a/src/compiler/nir/nir_range_analysis.c +++ b/src/compiler/nir/nir_range_analysis.c @@ -1664,7 +1664,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re * They can safely use the same code path here as variable sized * compute-like shader stages. */ - if (!gl_shader_stage_uses_workgroup(shader->info.stage) || + if (!mesa_shader_stage_uses_workgroup(shader->info.stage) || shader->info.workgroup_size_variable) { *result = config->max_workgroup_invocations - 1; } else { @@ -1725,7 +1725,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re case nir_intrinsic_load_subgroup_id: case nir_intrinsic_load_num_subgroups: { uint32_t workgroup_size = config->max_workgroup_invocations; - if (gl_shader_stage_uses_workgroup(shader->info.stage) && + if (mesa_shader_stage_uses_workgroup(shader->info.stage) && !shader->info.workgroup_size_variable) { workgroup_size = shader->info.workgroup_size[0] * shader->info.workgroup_size[1] * diff --git a/src/compiler/shader_enums.h b/src/compiler/shader_enums.h index 556959921af..dc74ef4fcbb 100644 --- a/src/compiler/shader_enums.h +++ b/src/compiler/shader_enums.h @@ -97,7 +97,7 @@ mesa_shader_stage_is_mesh(mesa_shader_stage stage) } static inline bool -gl_shader_stage_uses_workgroup(mesa_shader_stage stage) +mesa_shader_stage_uses_workgroup(mesa_shader_stage stage) { return stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL || diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 8310f55d8dd..57b9ce4009d 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -2914,7 +2914,7 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, } /* Now that we have the value, update the workgroup size if needed */ - if (gl_shader_stage_uses_workgroup(b->entry_point_stage)) + if (mesa_shader_stage_uses_workgroup(b->entry_point_stage)) vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL); } @@ -5416,7 +5416,7 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModeLocalSize: - if (gl_shader_stage_uses_workgroup(b->shader->info.stage)) { + if (mesa_shader_stage_uses_workgroup(b->shader->info.stage)) { b->shader->info.workgroup_size[0] = mode->operands[0]; b->shader->info.workgroup_size[1] = mode->operands[1]; b->shader->info.workgroup_size[2] = mode->operands[2]; @@ -5559,12 +5559,12 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModeDerivativeGroupQuadsKHR: - vtn_assert(gl_shader_stage_uses_workgroup(b->shader->info.stage)); + vtn_assert(mesa_shader_stage_uses_workgroup(b->shader->info.stage)); b->shader->info.derivative_group = DERIVATIVE_GROUP_QUADS; break; case SpvExecutionModeDerivativeGroupLinearKHR: - vtn_assert(gl_shader_stage_uses_workgroup(b->shader->info.stage)); + vtn_assert(mesa_shader_stage_uses_workgroup(b->shader->info.stage)); b->shader->info.derivative_group = DERIVATIVE_GROUP_LINEAR; break; @@ -5750,7 +5750,7 @@ vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_poin switch (mode->exec_mode) { case SpvExecutionModeLocalSizeId: - if (gl_shader_stage_uses_workgroup(b->shader->info.stage)) { + if (mesa_shader_stage_uses_workgroup(b->shader->info.stage)) { b->shader->info.workgroup_size[0] = vtn_constant_uint(b, mode->operands[0]); b->shader->info.workgroup_size[1] = vtn_constant_uint(b, mode->operands[1]); b->shader->info.workgroup_size[2] = vtn_constant_uint(b, mode->operands[2]); @@ -7292,7 +7292,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count, vtn_handle_execution_mode_id, NULL); if (b->workgroup_size_builtin) { - vtn_assert(gl_shader_stage_uses_workgroup(stage)); + vtn_assert(mesa_shader_stage_uses_workgroup(stage)); vtn_assert(b->workgroup_size_builtin->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3)); diff --git a/src/intel/compiler/brw_compiler.h b/src/intel/compiler/brw_compiler.h index 4f8f312d957..4709dbbd393 100644 --- a/src/intel/compiler/brw_compiler.h +++ b/src/intel/compiler/brw_compiler.h @@ -1355,7 +1355,7 @@ DEFINE_PROG_DATA_DOWNCAST(tcs, prog_data->stage == MESA_SHADER_TESS_CTRL) DEFINE_PROG_DATA_DOWNCAST(tes, prog_data->stage == MESA_SHADER_TESS_EVAL) DEFINE_PROG_DATA_DOWNCAST(gs, prog_data->stage == MESA_SHADER_GEOMETRY) DEFINE_PROG_DATA_DOWNCAST(wm, prog_data->stage == MESA_SHADER_FRAGMENT) -DEFINE_PROG_DATA_DOWNCAST(cs, gl_shader_stage_uses_workgroup(prog_data->stage)) +DEFINE_PROG_DATA_DOWNCAST(cs, mesa_shader_stage_uses_workgroup(prog_data->stage)) DEFINE_PROG_DATA_DOWNCAST(bs, brw_shader_stage_is_bindless(prog_data->stage)) DEFINE_PROG_DATA_DOWNCAST(vue, prog_data->stage == MESA_SHADER_VERTEX || diff --git a/src/intel/compiler/brw_from_nir.cpp b/src/intel/compiler/brw_from_nir.cpp index cf8cc102755..efd95fc1bc4 100644 --- a/src/intel/compiler/brw_from_nir.cpp +++ b/src/intel/compiler/brw_from_nir.cpp @@ -3032,7 +3032,7 @@ emit_barrier(nir_to_brw_state &ntb) brw_shader &s = ntb.s; /* We are getting the barrier ID from the compute shader header */ - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); /* Zero-initialize the payload */ brw_reg payload = hbld.MOV(brw_imm_ud(0u)); @@ -4674,7 +4674,7 @@ set_memory_address(nir_to_brw_state &ntb, static unsigned brw_workgroup_size(brw_shader &s) { - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); assert(!s.nir->info.workgroup_size_variable); const struct brw_cs_prog_data *cs = brw_cs_prog_data(s.prog_data); return cs->local_size[0] * cs->local_size[1] * cs->local_size[2]; @@ -4688,7 +4688,7 @@ brw_from_nir_emit_cs_intrinsic(nir_to_brw_state &ntb, const brw_builder &bld = ntb.bld; brw_shader &s = ntb.s; - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(s.prog_data); brw_reg dest; @@ -6119,7 +6119,7 @@ brw_from_nir_emit_intrinsic(nir_to_brw_state &ntb, break; if (s.nir->info.shared_size > 0) { - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); } else { slm_fence = false; } diff --git a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c index 4fc1e43050c..b05747652c1 100644 --- a/src/intel/compiler/brw_nir_lower_cs_intrinsics.c +++ b/src/intel/compiler/brw_nir_lower_cs_intrinsics.c @@ -326,7 +326,7 @@ brw_nir_lower_cs_intrinsics(nir_shader *nir, const struct intel_device_info *devinfo, struct brw_cs_prog_data *prog_data) { - assert(gl_shader_stage_uses_workgroup(nir->info.stage)); + assert(mesa_shader_stage_uses_workgroup(nir->info.stage)); struct lower_intrinsics_state state = { .nir = nir, diff --git a/src/intel/compiler/brw_shader.h b/src/intel/compiler/brw_shader.h index 8fe8e6d270b..a68025a8ad3 100644 --- a/src/intel/compiler/brw_shader.h +++ b/src/intel/compiler/brw_shader.h @@ -180,7 +180,7 @@ public: DEFINE_PAYLOAD_ACCESSOR(brw_gs_thread_payload, gs_payload, stage == MESA_SHADER_GEOMETRY); DEFINE_PAYLOAD_ACCESSOR(brw_fs_thread_payload, fs_payload, stage == MESA_SHADER_FRAGMENT); DEFINE_PAYLOAD_ACCESSOR(brw_cs_thread_payload, cs_payload, - gl_shader_stage_uses_workgroup(stage)); + mesa_shader_stage_uses_workgroup(stage)); DEFINE_PAYLOAD_ACCESSOR(brw_task_mesh_thread_payload, task_mesh_payload, stage == MESA_SHADER_TASK || stage == MESA_SHADER_MESH); DEFINE_PAYLOAD_ACCESSOR(brw_bs_thread_payload, bs_payload, diff --git a/src/intel/compiler/elk/elk_compiler.h b/src/intel/compiler/elk/elk_compiler.h index 3bec57946dd..8bf65b27588 100644 --- a/src/intel/compiler/elk/elk_compiler.h +++ b/src/intel/compiler/elk/elk_compiler.h @@ -1405,7 +1405,7 @@ DEFINE_PROG_DATA_DOWNCAST(tcs, prog_data->stage == MESA_SHADER_TESS_CTRL) DEFINE_PROG_DATA_DOWNCAST(tes, prog_data->stage == MESA_SHADER_TESS_EVAL) DEFINE_PROG_DATA_DOWNCAST(gs, prog_data->stage == MESA_SHADER_GEOMETRY) DEFINE_PROG_DATA_DOWNCAST(wm, prog_data->stage == MESA_SHADER_FRAGMENT) -DEFINE_PROG_DATA_DOWNCAST(cs, gl_shader_stage_uses_workgroup(prog_data->stage)) +DEFINE_PROG_DATA_DOWNCAST(cs, mesa_shader_stage_uses_workgroup(prog_data->stage)) DEFINE_PROG_DATA_DOWNCAST(vue, prog_data->stage == MESA_SHADER_VERTEX || prog_data->stage == MESA_SHADER_TESS_CTRL || diff --git a/src/intel/compiler/elk/elk_fs.cpp b/src/intel/compiler/elk/elk_fs.cpp index 35517fe2cd3..772f5c43673 100644 --- a/src/intel/compiler/elk/elk_fs.cpp +++ b/src/intel/compiler/elk/elk_fs.cpp @@ -7204,7 +7204,7 @@ elk_fs_test_dispatch_packing(const fs_builder &bld) unsigned elk_fs_visitor::workgroup_size() const { - assert(gl_shader_stage_uses_workgroup(stage)); + assert(mesa_shader_stage_uses_workgroup(stage)); const struct elk_cs_prog_data *cs = elk_cs_prog_data(prog_data); return cs->local_size[0] * cs->local_size[1] * cs->local_size[2]; } diff --git a/src/intel/compiler/elk/elk_fs.h b/src/intel/compiler/elk/elk_fs.h index 333cb85aaad..300e5b42a3f 100644 --- a/src/intel/compiler/elk/elk_fs.h +++ b/src/intel/compiler/elk/elk_fs.h @@ -382,7 +382,7 @@ public: }; elk_cs_thread_payload &cs_payload() { - assert(gl_shader_stage_uses_workgroup(stage)); + assert(mesa_shader_stage_uses_workgroup(stage)); return *static_cast(this->payload_); } diff --git a/src/intel/compiler/elk/elk_fs_nir.cpp b/src/intel/compiler/elk/elk_fs_nir.cpp index 14bb9125d34..5fe66b8f7b9 100644 --- a/src/intel/compiler/elk/elk_fs_nir.cpp +++ b/src/intel/compiler/elk/elk_fs_nir.cpp @@ -2663,7 +2663,7 @@ emit_barrier(nir_to_elk_state &ntb) elk_fs_visitor &s = ntb.s; /* We are getting the barrier ID from the compute shader header */ - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); elk_fs_reg payload = elk_fs_reg(VGRF, s.alloc.allocate(1), ELK_REGISTER_TYPE_UD); @@ -3973,7 +3973,7 @@ fs_nir_emit_cs_intrinsic(nir_to_elk_state &ntb, const fs_builder &bld = ntb.bld; elk_fs_visitor &s = ntb.s; - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); struct elk_cs_prog_data *cs_prog_data = elk_cs_prog_data(s.prog_data); elk_fs_reg dest; @@ -4821,7 +4821,7 @@ fs_nir_emit_intrinsic(nir_to_elk_state &ntb, break; if (s.nir->info.shared_size > 0) { - assert(gl_shader_stage_uses_workgroup(s.stage)); + assert(mesa_shader_stage_uses_workgroup(s.stage)); } else { slm_fence = false; } diff --git a/src/intel/compiler/elk/elk_nir.c b/src/intel/compiler/elk/elk_nir.c index bca79d8fab5..9e249c03512 100644 --- a/src/intel/compiler/elk/elk_nir.c +++ b/src/intel/compiler/elk/elk_nir.c @@ -1714,7 +1714,7 @@ get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size) case SUBGROUP_SIZE_REQUIRE_8: case SUBGROUP_SIZE_REQUIRE_16: case SUBGROUP_SIZE_REQUIRE_32: - assert(gl_shader_stage_uses_workgroup(info->stage) || + assert(mesa_shader_stage_uses_workgroup(info->stage) || (info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE)); /* These enum values are expressly chosen to be equal to the subgroup * size that they require. diff --git a/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c b/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c index c44d24ae809..f9534af7e2f 100644 --- a/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c +++ b/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c @@ -297,7 +297,7 @@ elk_nir_lower_cs_intrinsics(nir_shader *nir, const struct intel_device_info *devinfo, struct elk_cs_prog_data *prog_data) { - assert(gl_shader_stage_uses_workgroup(nir->info.stage)); + assert(mesa_shader_stage_uses_workgroup(nir->info.stage)); struct lower_intrinsics_state state = { .nir = nir, diff --git a/src/intel/compiler/elk/elk_simd_selection.cpp b/src/intel/compiler/elk/elk_simd_selection.cpp index c479cde51aa..a418c48b7f8 100644 --- a/src/intel/compiler/elk/elk_simd_selection.cpp +++ b/src/intel/compiler/elk/elk_simd_selection.cpp @@ -31,7 +31,7 @@ unsigned elk_required_dispatch_width(const struct shader_info *info) { if ((int)info->subgroup_size >= (int)SUBGROUP_SIZE_REQUIRE_8) { - assert(gl_shader_stage_uses_workgroup(info->stage)); + assert(mesa_shader_stage_uses_workgroup(info->stage)); /* These enum values are expressly chosen to be equal to the subgroup * size that they require. */ diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c index d3c7868e204..5a40d8074ed 100644 --- a/src/intel/vulkan/anv_pipeline.c +++ b/src/intel/vulkan/anv_pipeline.c @@ -1136,7 +1136,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, NIR_PASS(_, nir, anv_nir_lower_resource_intel, pdevice, stage->bind_map.layout_type); - if (gl_shader_stage_uses_workgroup(nir->info.stage)) { + if (mesa_shader_stage_uses_workgroup(nir->info.stage)) { NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_type_info); @@ -4489,7 +4489,7 @@ VkResult anv_GetPipelineExecutableStatisticsKHR( "Number of bytes of workgroup shared memory used by this " "shader including any padding."); stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR; - if (gl_shader_stage_uses_workgroup(exe->stage)) + if (mesa_shader_stage_uses_workgroup(exe->stage)) stat->value.u64 = prog_data->total_shared; else stat->value.u64 = 0; diff --git a/src/intel/vulkan_hasvk/anv_pipeline.c b/src/intel/vulkan_hasvk/anv_pipeline.c index a12a44f5f3a..02d37242bdc 100644 --- a/src/intel/vulkan_hasvk/anv_pipeline.c +++ b/src/intel/vulkan_hasvk/anv_pipeline.c @@ -554,7 +554,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, pdevice, stage->key.base.robust_flags, prog_data, &stage->bind_map, mem_ctx); - if (gl_shader_stage_uses_workgroup(nir->info.stage)) { + if (mesa_shader_stage_uses_workgroup(nir->info.stage)) { NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_type_info); @@ -2045,7 +2045,7 @@ VkResult anv_GetPipelineExecutableStatisticsKHR( stat->value.u64 = prog_data->total_scratch; } - if (gl_shader_stage_uses_workgroup(exe->stage)) { + if (mesa_shader_stage_uses_workgroup(exe->stage)) { vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) { VK_COPY_STR(stat->name, "Workgroup Memory Size"); VK_COPY_STR(stat->description, diff --git a/src/nouveau/compiler/nak_nir.c b/src/nouveau/compiler/nak_nir.c index 26563f9d948..d6c8e8d8322 100644 --- a/src/nouveau/compiler/nak_nir.c +++ b/src/nouveau/compiler/nak_nir.c @@ -1054,7 +1054,7 @@ nak_postprocess_nir(nir_shader *nir, * relies on the workgroup size being the actual HW workgroup size in * nir_intrinsic_load_subgroup_id. */ - if (gl_shader_stage_uses_workgroup(nir->info.stage) && + if (mesa_shader_stage_uses_workgroup(nir->info.stage) && nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) { assert(nir->info.workgroup_size[0] % 2 == 0); assert(nir->info.workgroup_size[1] % 2 == 0); diff --git a/src/panfrost/midgard/midgard_ra.c b/src/panfrost/midgard/midgard_ra.c index c0d3908d673..849b0539c3f 100644 --- a/src/panfrost/midgard/midgard_ra.c +++ b/src/panfrost/midgard/midgard_ra.c @@ -734,7 +734,7 @@ mir_is_64(const midgard_instruction *ins) static bool needs_contiguous_workgroup(const compiler_context *ctx) { - return gl_shader_stage_uses_workgroup(ctx->stage); + return mesa_shader_stage_uses_workgroup(ctx->stage); } /* diff --git a/src/panfrost/vulkan/panvk_vX_shader.c b/src/panfrost/vulkan/panvk_vX_shader.c index 79932644ce6..08f85ab301e 100644 --- a/src/panfrost/vulkan/panvk_vX_shader.c +++ b/src/panfrost/vulkan/panvk_vX_shader.c @@ -854,7 +854,7 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir, nir_metadata_control_flow, NULL); #endif - if (gl_shader_stage_uses_workgroup(stage)) { + if (mesa_shader_stage_uses_workgroup(stage)) { NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_type_info);