diff --git a/src/amd/llvm/ac_nir_to_llvm.c b/src/amd/llvm/ac_nir_to_llvm.c index 00840d39eae..a3a8f89de0d 100644 --- a/src/amd/llvm/ac_nir_to_llvm.c +++ b/src/amd/llvm/ac_nir_to_llvm.c @@ -1486,7 +1486,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te break; case nir_texop_tex: if (ctx->stage != MESA_SHADER_FRAGMENT && - (!gl_shader_stage_is_compute(ctx->stage) || + (!mesa_shader_stage_is_compute(ctx->stage) || ctx->info->derivative_group == DERIVATIVE_GROUP_NONE)) { assert(!args->lod); args->level_zero = true; @@ -1522,7 +1522,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te args->attributes = AC_ATTR_INVARIANT_LOAD; bool cs_derivs = - gl_shader_stage_is_compute(ctx->stage) && ctx->info->derivative_group != DERIVATIVE_GROUP_NONE; + mesa_shader_stage_is_compute(ctx->stage) && ctx->info->derivative_group != DERIVATIVE_GROUP_NONE; if (ctx->stage == MESA_SHADER_FRAGMENT || cs_derivs) { /* Prevent texture instructions with implicit derivatives from being * sinked into branches. */ @@ -2766,7 +2766,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins } break; case nir_intrinsic_load_subgroup_id: - assert(gl_shader_stage_is_compute(ctx->stage) && ctx->ac.gfx_level >= GFX12); + assert(mesa_shader_stage_is_compute(ctx->stage) && ctx->ac.gfx_level >= GFX12); result = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.wave.id", ctx->ac.i32, NULL, 0, 0); break; case nir_intrinsic_first_invocation: diff --git a/src/asahi/compiler/agx_compile.c b/src/asahi/compiler/agx_compile.c index a6c86b6f37a..718d374e22a 100644 --- a/src/asahi/compiler/agx_compile.c +++ b/src/asahi/compiler/agx_compile.c @@ -1545,7 +1545,7 @@ agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr) /* Nothing to do for subgroup barriers */ if (nir_intrinsic_execution_scope(instr) >= SCOPE_WORKGROUP) { - assert(gl_shader_stage_is_compute(b->shader->nir->info.stage)); + assert(mesa_shader_stage_is_compute(b->shader->nir->info.stage)); agx_threadgroup_barrier(b); } diff --git a/src/asahi/compiler/agx_register_allocate.c b/src/asahi/compiler/agx_register_allocate.c index 330e2c29ae8..bbd88ab05be 100644 --- a/src/asahi/compiler/agx_register_allocate.c +++ b/src/asahi/compiler/agx_register_allocate.c @@ -1342,7 +1342,7 @@ agx_ra(agx_context *ctx) /* Compute shaders need to have their entire workgroup together, so our * register usage is bounded by the workgroup size. */ - if (gl_shader_stage_is_compute(ctx->stage)) { + if (mesa_shader_stage_is_compute(ctx->stage)) { unsigned threads_per_workgroup; /* If we don't know the workgroup size, worst case it. TODO: Optimize diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c index 523682e49c5..3f14af15c43 100644 --- a/src/compiler/nir/nir_lower_io.c +++ b/src/compiler/nir/nir_lower_io.c @@ -1054,7 +1054,7 @@ type_size_vec4(const struct glsl_type *type, bool bindless) void nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs) { - if (gl_shader_stage_is_compute(nir->info.stage)) + if (mesa_shader_stage_is_compute(nir->info.stage)) return; bool lower_indirect_inputs = diff --git a/src/compiler/shader_enums.h b/src/compiler/shader_enums.h index 053c6cd9990..68cf39629f1 100644 --- a/src/compiler/shader_enums.h +++ b/src/compiler/shader_enums.h @@ -84,7 +84,7 @@ typedef enum mesa_shader_stage { } mesa_shader_stage; static inline bool -gl_shader_stage_is_compute(mesa_shader_stage stage) +mesa_shader_stage_is_compute(mesa_shader_stage stage) { return stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL; } diff --git a/src/freedreno/ir3/ir3_compiler_nir.c b/src/freedreno/ir3/ir3_compiler_nir.c index 07a060bee53..6806f455f28 100644 --- a/src/freedreno/ir3/ir3_compiler_nir.c +++ b/src/freedreno/ir3/ir3_compiler_nir.c @@ -5597,7 +5597,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, ir = so->ir = ctx->ir; - if (gl_shader_stage_is_compute(so->type)) { + if (mesa_shader_stage_is_compute(so->type)) { so->local_size[0] = ctx->s->info.workgroup_size[0]; so->local_size[1] = ctx->s->info.workgroup_size[1]; so->local_size[2] = ctx->s->info.workgroup_size[2]; @@ -6042,7 +6042,7 @@ ir3_compile_shader_nir(struct ir3_compiler *compiler, !so->writes_stencilref; } - if (gl_shader_stage_is_compute(so->type)) { + if (mesa_shader_stage_is_compute(so->type)) { so->cs.local_invocation_id = ir3_find_sysval_regid(so, SYSTEM_VALUE_LOCAL_INVOCATION_ID); so->cs.work_group_id = diff --git a/src/freedreno/ir3/ir3_ra.c b/src/freedreno/ir3/ir3_ra.c index dbb4cea9d9c..1cb68689891 100644 --- a/src/freedreno/ir3/ir3_ra.c +++ b/src/freedreno/ir3/ir3_ra.c @@ -2672,7 +2672,7 @@ ir3_ra(struct ir3_shader_variant *v) limit_pressure.shared = RA_SHARED_SIZE; limit_pressure.shared_half = RA_SHARED_HALF_SIZE; - if (gl_shader_stage_is_compute(v->type) && v->has_barrier) { + if (mesa_shader_stage_is_compute(v->type) && v->has_barrier) { calc_limit_pressure_for_cs_with_barrier(v, &limit_pressure); } diff --git a/src/gallium/auxiliary/nir/nir_to_tgsi.c b/src/gallium/auxiliary/nir/nir_to_tgsi.c index 35b0cf04492..d24d8b7e5b0 100644 --- a/src/gallium/auxiliary/nir/nir_to_tgsi.c +++ b/src/gallium/auxiliary/nir/nir_to_tgsi.c @@ -2434,7 +2434,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr) static void ntt_emit_barrier(struct ntt_compile *c, nir_intrinsic_instr *intr) { - bool compute = gl_shader_stage_is_compute(c->s->info.stage); + bool compute = mesa_shader_stage_is_compute(c->s->info.stage); if (nir_intrinsic_memory_scope(intr) != SCOPE_NONE) { nir_variable_mode modes = nir_intrinsic_memory_modes(intr); diff --git a/src/gallium/auxiliary/nir/nir_to_tgsi_info.c b/src/gallium/auxiliary/nir/nir_to_tgsi_info.c index b35b125eb37..4aae1216b0a 100644 --- a/src/gallium/auxiliary/nir/nir_to_tgsi_info.c +++ b/src/gallium/auxiliary/nir/nir_to_tgsi_info.c @@ -320,7 +320,7 @@ void nir_tgsi_scan_shader(const struct nir_shader *nir, } } - if (gl_shader_stage_is_compute(nir->info.stage) || + if (mesa_shader_stage_is_compute(nir->info.stage) || gl_shader_stage_is_mesh(nir->info.stage)) { info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] = nir->info.workgroup_size[0]; info->properties[TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT] = nir->info.workgroup_size[1]; diff --git a/src/gallium/drivers/freedreno/ir3/ir3_const.h b/src/gallium/drivers/freedreno/ir3/ir3_const.h index 05c752474f1..49fcf5dad28 100644 --- a/src/gallium/drivers/freedreno/ir3/ir3_const.h +++ b/src/gallium/drivers/freedreno/ir3/ir3_const.h @@ -670,7 +670,7 @@ ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, struct fd_context *ctx, const struct pipe_grid_info *info) assert_dt { - assert(gl_shader_stage_is_compute(v->type)); + assert(mesa_shader_stage_is_compute(v->type)); emit_common_consts(v, ring, ctx, MESA_SHADER_COMPUTE); diff --git a/src/gallium/drivers/panfrost/pan_shader.c b/src/gallium/drivers/panfrost/pan_shader.c index 93c48419d8e..bbf63b6f416 100644 --- a/src/gallium/drivers/panfrost/pan_shader.c +++ b/src/gallium/drivers/panfrost/pan_shader.c @@ -136,7 +136,7 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir, * Compute CSOs call this function during create time, so preprocessing * happens at CSO create time regardless. */ - if (gl_shader_stage_is_compute(s->info.stage)) + if (mesa_shader_stage_is_compute(s->info.stage)) pan_shader_preprocess(s, panfrost_device_gpu_id(dev)); struct pan_compile_inputs inputs = { diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c index 0c7302fbbb7..54ab2356f87 100644 --- a/src/gallium/drivers/radeonsi/si_shader.c +++ b/src/gallium/drivers/radeonsi/si_shader.c @@ -1295,7 +1295,7 @@ static void si_assign_param_offsets(nir_shader *nir, struct si_shader *shader, bool si_should_clear_lds(struct si_screen *sscreen, const struct nir_shader *shader) { - return gl_shader_stage_is_compute(shader->info.stage) && + return mesa_shader_stage_is_compute(shader->info.stage) && shader->info.shared_size > 0 && sscreen->options.clear_lds; } @@ -2019,7 +2019,7 @@ bool si_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compi } /* Validate SGPR and VGPR usage for compute to detect compiler bugs. */ - if (gl_shader_stage_is_compute(nir->info.stage)) { + if (mesa_shader_stage_is_compute(nir->info.stage)) { unsigned max_vgprs = sscreen->info.num_physical_wave64_vgprs_per_simd * (shader->wave_size == 32 ? 2 : 1); unsigned max_sgprs = sscreen->info.num_physical_sgprs_per_simd; diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c index 0229ca741de..45925168b07 100644 --- a/src/gallium/drivers/radeonsi/si_shader_nir.c +++ b/src/gallium/drivers/radeonsi/si_shader_nir.c @@ -337,7 +337,7 @@ static void si_lower_nir(struct si_screen *sscreen, struct nir_shader *nir) NIR_PASS(_, nir, nir_lower_gs_intrinsics, flags); } - if (gl_shader_stage_is_compute(nir->info.stage)) { + if (mesa_shader_stage_is_compute(nir->info.stage)) { nir_lower_compute_system_values_options options = {0}; /* gl_LocalInvocationIndex must be derived from gl_LocalInvocationID.xyz to make it correct diff --git a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c index 674e19d6218..754582b50c1 100644 --- a/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c +++ b/src/gallium/drivers/zink/nir_to_spirv/nir_to_spirv.c @@ -675,7 +675,7 @@ create_shared_block(struct ntv_context *ctx, unsigned bit_size) SpvId type = spirv_builder_type_uint(&ctx->builder, bit_size); SpvId array; - assert(gl_shader_stage_is_compute(ctx->nir->info.stage)); + assert(mesa_shader_stage_is_compute(ctx->nir->info.stage)); if (ctx->nir->info.cs.has_variable_shared_mem) { assert(ctx->shared_mem_size); SpvId const_shared_size = emit_uint_const(ctx, 32, ctx->nir->info.shared_size); @@ -4715,7 +4715,7 @@ nir_to_spirv(struct nir_shader *s, const struct zink_shader_info *sinfo, const s spirv_builder_emit_source(&ctx.builder, SpvSourceLanguageUnknown, 0); SpvAddressingModel model = SpvAddressingModelLogical; - if (gl_shader_stage_is_compute(s->info.stage)) { + if (mesa_shader_stage_is_compute(s->info.stage)) { if (s->info.cs.ptr_size == 32) model = SpvAddressingModelPhysical32; else if (s->info.cs.ptr_size == 64) { diff --git a/src/gallium/drivers/zink/zink_compiler.c b/src/gallium/drivers/zink/zink_compiler.c index 4512498a94e..4adb78cce07 100644 --- a/src/gallium/drivers/zink/zink_compiler.c +++ b/src/gallium/drivers/zink/zink_compiler.c @@ -3598,7 +3598,7 @@ static bool lower_zs_swizzle_tex(nir_shader *nir, const void *swizzle, bool shadow_only) { /* We don't use nir_lower_tex to do our swizzling, because of this base_sampler_id. */ - unsigned base_sampler_id = gl_shader_stage_is_compute(nir->info.stage) ? 0 : PIPE_MAX_SAMPLERS * nir->info.stage; + unsigned base_sampler_id = mesa_shader_stage_is_compute(nir->info.stage) ? 0 : PIPE_MAX_SAMPLERS * nir->info.stage; struct lower_zs_swizzle_state state = {shadow_only, base_sampler_id, swizzle}; return nir_shader_instructions_pass(nir, lower_zs_swizzle_tex_instr, nir_metadata_control_flow, @@ -4520,7 +4520,7 @@ zink_binding(mesa_shader_stage stage, VkDescriptorType type, int index, bool com } else { unsigned base = stage; /* clamp compute bindings for better driver efficiency */ - if (gl_shader_stage_is_compute(stage)) + if (mesa_shader_stage_is_compute(stage)) base = 0; switch (type) { case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: @@ -6223,7 +6223,7 @@ zink_shader_init(struct zink_screen *screen, struct zink_shader *zs) if (nir->info.stage == MESA_SHADER_FRAGMENT) zs->flat_flags = zink_flat_flags(nir); - if (!gl_shader_stage_is_compute(nir->info.stage) && nir->info.separate_shader) + if (!mesa_shader_stage_is_compute(nir->info.stage) && nir->info.separate_shader) NIR_PASS(_, nir, fixup_io_locations); NIR_PASS(_, nir, lower_basevertex); diff --git a/src/intel/compiler/brw_compile_cs.cpp b/src/intel/compiler/brw_compile_cs.cpp index 92fd6f48b22..7131e67b752 100644 --- a/src/intel/compiler/brw_compile_cs.cpp +++ b/src/intel/compiler/brw_compile_cs.cpp @@ -61,7 +61,7 @@ cs_fill_push_const_info(const struct intel_device_info *devinfo, static bool run_cs(brw_shader &s, bool allow_spilling) { - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); s.payload_ = new brw_cs_thread_payload(s); diff --git a/src/intel/compiler/brw_from_nir.cpp b/src/intel/compiler/brw_from_nir.cpp index 3e3c439e9ab..1f29e1a6366 100644 --- a/src/intel/compiler/brw_from_nir.cpp +++ b/src/intel/compiler/brw_from_nir.cpp @@ -153,7 +153,7 @@ brw_from_nir_setup_uniforms(brw_shader &s) s.uniforms = s.nir->num_uniforms / 4; - if (gl_shader_stage_is_compute(s.stage) && devinfo->verx10 < 125) { + if (mesa_shader_stage_is_compute(s.stage) && devinfo->verx10 < 125) { /* Add uniforms for builtins after regular NIR uniforms. */ assert(s.uniforms == s.prog_data->nr_params); @@ -173,7 +173,7 @@ emit_work_group_id_setup(nir_to_brw_state &ntb) brw_shader &s = ntb.s; const brw_builder &bld = ntb.bld.scalar_group(); - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); brw_reg id = bld.vgrf(BRW_TYPE_UD, 3); @@ -257,7 +257,7 @@ emit_system_values_block(nir_to_brw_state &ntb, nir_block *block) case nir_intrinsic_load_workgroup_id: if (gl_shader_stage_is_mesh(s.stage)) UNREACHABLE("should be lowered by nir_lower_compute_system_values()."); - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); reg = &ntb.system_values[SYSTEM_VALUE_WORKGROUP_ID]; if (reg->file == BAD_FILE) *reg = emit_work_group_id_setup(ntb); @@ -3040,7 +3040,7 @@ emit_barrier(nir_to_brw_state &ntb) if (devinfo->verx10 >= 125) { setup_barrier_message_payload_gfx125(bld, payload); } else { - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); brw_reg barrier_id_mask = brw_imm_ud(devinfo->ver == 9 ? 0x8f000000u : 0x7f000000u); diff --git a/src/intel/compiler/brw_shader.cpp b/src/intel/compiler/brw_shader.cpp index e5fe3db1f14..9c8d6b6affd 100644 --- a/src/intel/compiler/brw_shader.cpp +++ b/src/intel/compiler/brw_shader.cpp @@ -659,7 +659,7 @@ brw_shader::assign_curb_setup() uint64_t used = 0; const bool pull_constants = devinfo->verx10 >= 125 && - (gl_shader_stage_is_compute(stage) || + (mesa_shader_stage_is_compute(stage) || gl_shader_stage_is_mesh(stage)) && uniform_push_length; @@ -667,7 +667,7 @@ brw_shader::assign_curb_setup() const bool pull_constants_a64 = (gl_shader_stage_is_rt(stage) && brw_bs_prog_data(prog_data)->uses_inline_push_addr) || - ((gl_shader_stage_is_compute(stage) || + ((mesa_shader_stage_is_compute(stage) || gl_shader_stage_is_mesh(stage)) && brw_cs_prog_data(prog_data)->uses_inline_push_addr); assert(devinfo->has_lsc); diff --git a/src/intel/compiler/brw_thread_payload.cpp b/src/intel/compiler/brw_thread_payload.cpp index 1ddb5316bf9..1fc6751f1a4 100644 --- a/src/intel/compiler/brw_thread_payload.cpp +++ b/src/intel/compiler/brw_thread_payload.cpp @@ -404,7 +404,7 @@ brw_cs_thread_payload::load_subgroup_id(const brw_builder &bld, bld.AND(dest, subgroup_id_, brw_imm_ud(INTEL_MASK(7, 0))); } else { assert(devinfo->verx10 < 125); - assert(gl_shader_stage_is_compute(bld.shader->stage)); + assert(mesa_shader_stage_is_compute(bld.shader->stage)); int index = brw_get_subgroup_id_param_index(devinfo, bld.shader->prog_data); bld.MOV(dest, brw_uniform_reg(index, BRW_TYPE_UD)); diff --git a/src/intel/compiler/elk/elk_fs.cpp b/src/intel/compiler/elk/elk_fs.cpp index 0fd594d9afe..35517fe2cd3 100644 --- a/src/intel/compiler/elk/elk_fs.cpp +++ b/src/intel/compiler/elk/elk_fs.cpp @@ -5990,7 +5990,7 @@ elk_fs_visitor::allocate_registers(bool allow_spilling) prog_data->total_scratch = MAX2(elk_get_scratch_size(last_scratch), prog_data->total_scratch); - if (gl_shader_stage_is_compute(stage)) { + if (mesa_shader_stage_is_compute(stage)) { if (devinfo->platform == INTEL_PLATFORM_HSW) { /* According to the MEDIA_VFE_STATE's "Per Thread Scratch Space" * field documentation, Haswell supports a minimum of 2kB of @@ -6328,7 +6328,7 @@ elk_fs_visitor::run_fs(bool allow_spilling, bool do_rep_send) bool elk_fs_visitor::run_cs(bool allow_spilling) { - assert(gl_shader_stage_is_compute(stage)); + assert(mesa_shader_stage_is_compute(stage)); assert(devinfo->ver >= 7); const fs_builder bld = fs_builder(this).at_end(); diff --git a/src/intel/compiler/elk/elk_fs_nir.cpp b/src/intel/compiler/elk/elk_fs_nir.cpp index 1b9ab2cdc39..14bb9125d34 100644 --- a/src/intel/compiler/elk/elk_fs_nir.cpp +++ b/src/intel/compiler/elk/elk_fs_nir.cpp @@ -143,7 +143,7 @@ fs_nir_setup_uniforms(elk_fs_visitor &s) s.uniforms = s.nir->num_uniforms / 4; - if (gl_shader_stage_is_compute(s.stage)) { + if (mesa_shader_stage_is_compute(s.stage)) { /* Add uniforms for builtins after regular NIR uniforms. */ assert(s.uniforms == s.prog_data->nr_params); @@ -163,7 +163,7 @@ emit_work_group_id_setup(nir_to_elk_state &ntb) elk_fs_visitor &s = ntb.s; const fs_builder &bld = ntb.bld; - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); elk_fs_reg id = bld.vgrf(ELK_REGISTER_TYPE_UD, 3); @@ -240,7 +240,7 @@ emit_system_values_block(nir_to_elk_state &ntb, nir_block *block) break; case nir_intrinsic_load_workgroup_id: - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); reg = &ntb.system_values[SYSTEM_VALUE_WORKGROUP_ID]; if (reg->file == BAD_FILE) *reg = emit_work_group_id_setup(ntb); @@ -2670,7 +2670,7 @@ emit_barrier(nir_to_elk_state &ntb) /* Clear the message payload */ bld.exec_all().group(8, 0).MOV(payload, elk_imm_ud(0u)); - assert(gl_shader_stage_is_compute(s.stage)); + assert(mesa_shader_stage_is_compute(s.stage)); uint32_t barrier_id_mask; switch (devinfo->ver) { diff --git a/src/intel/compiler/elk/elk_fs_thread_payload.cpp b/src/intel/compiler/elk/elk_fs_thread_payload.cpp index 3de915d64fe..a488ad17716 100644 --- a/src/intel/compiler/elk/elk_fs_thread_payload.cpp +++ b/src/intel/compiler/elk/elk_fs_thread_payload.cpp @@ -397,7 +397,7 @@ elk_cs_thread_payload::load_subgroup_id(const fs_builder &bld, auto devinfo = bld.shader->devinfo; dest = retype(dest, ELK_REGISTER_TYPE_UD); - assert(gl_shader_stage_is_compute(bld.shader->stage)); + assert(mesa_shader_stage_is_compute(bld.shader->stage)); int index = elk_get_subgroup_id_param_index(devinfo, bld.shader->stage_prog_data); bld.MOV(dest, elk_fs_reg(UNIFORM, index, ELK_REGISTER_TYPE_UD)); diff --git a/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c b/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c index e9385101d87..c44d24ae809 100644 --- a/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c +++ b/src/intel/compiler/elk/elk_nir_lower_cs_intrinsics.c @@ -305,7 +305,7 @@ elk_nir_lower_cs_intrinsics(nir_shader *nir, }; /* Constraints from NV_compute_shader_derivatives. */ - if (gl_shader_stage_is_compute(nir->info.stage) && + if (mesa_shader_stage_is_compute(nir->info.stage) && !nir->info.workgroup_size_variable) { if (nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) { assert(nir->info.workgroup_size[0] % 2 == 0); diff --git a/src/intel/vulkan/anv_pipeline.c b/src/intel/vulkan/anv_pipeline.c index 3d21fed5dd1..98576e46358 100644 --- a/src/intel/vulkan/anv_pipeline.c +++ b/src/intel/vulkan/anv_pipeline.c @@ -1159,7 +1159,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, } } - if (gl_shader_stage_is_compute(nir->info.stage) || + if (mesa_shader_stage_is_compute(nir->info.stage) || gl_shader_stage_is_mesh(nir->info.stage)) { NIR_PASS(_, nir, brw_nir_lower_cs_intrinsics, compiler->devinfo, &stage->prog_data.cs); diff --git a/src/intel/vulkan_hasvk/anv_pipeline.c b/src/intel/vulkan_hasvk/anv_pipeline.c index 6e4d6bc16ea..a12a44f5f3a 100644 --- a/src/intel/vulkan_hasvk/anv_pipeline.c +++ b/src/intel/vulkan_hasvk/anv_pipeline.c @@ -577,7 +577,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline, } } - if (gl_shader_stage_is_compute(nir->info.stage)) { + if (mesa_shader_stage_is_compute(nir->info.stage)) { NIR_PASS(_, nir, elk_nir_lower_cs_intrinsics, compiler->devinfo, &stage->prog_data.cs); } diff --git a/src/panfrost/compiler/bifrost_compile.c b/src/panfrost/compiler/bifrost_compile.c index fd465374979..698c2fe4eac 100644 --- a/src/panfrost/compiler/bifrost_compile.c +++ b/src/panfrost/compiler/bifrost_compile.c @@ -2105,7 +2105,7 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr) break; case SCOPE_WORKGROUP: - assert(gl_shader_stage_is_compute(b->shader->stage)); + assert(mesa_shader_stage_is_compute(b->shader->stage)); bi_barrier(b); /* * Blob doesn't seem to do anything for memory barriers, so no need to @@ -6464,7 +6464,7 @@ bifrost_compile_shader_nir(nir_shader *nir, bi_compile_variant(nir, inputs, binary, info, BI_IDVS_NONE); } - if (gl_shader_stage_is_compute(nir->info.stage)) { + if (mesa_shader_stage_is_compute(nir->info.stage)) { /* Workgroups may be merged if the structure of the workgroup is * not software visible. This is true if neither shared memory * nor barriers are used. The hardware may be able to optimize diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c index 08a7ec172ea..135b1bd9fdd 100644 --- a/src/panfrost/midgard/midgard_compile.c +++ b/src/panfrost/midgard/midgard_compile.c @@ -413,7 +413,7 @@ midgard_preprocess_nir(nir_shader *nir, unsigned gpu_id) /* Could be eventually useful for Vulkan, but we don't expect it to have * the support, so limit it to compute */ - if (gl_shader_stage_is_compute(nir->info.stage)) { + if (mesa_shader_stage_is_compute(nir->info.stage)) { nir_lower_mem_access_bit_sizes_options mem_size_options = { .modes = nir_var_mem_ubo | nir_var_mem_ssbo | nir_var_mem_constant | nir_var_mem_task_payload |