diff --git a/src/intel/compiler/elk/elk_compiler.c b/src/intel/compiler/elk/elk_compiler.c index 86dd0668fae..dea7c1c7478 100644 --- a/src/intel/compiler/elk/elk_compiler.c +++ b/src/intel/compiler/elk/elk_compiler.c @@ -47,8 +47,6 @@ elk_compiler_create(void *mem_ctx, const struct intel_device_info *devinfo) compiler->precise_trig = debug_get_bool_option("INTEL_PRECISE_TRIG", false); - compiler->use_tcs_multi_patch = devinfo->ver >= 12; - /* Default to the sampler since that's what we've done since forever */ compiler->indirect_ubos_use_sampler = true; @@ -129,12 +127,6 @@ elk_compiler_create(void *mem_ctx, const struct intel_device_info *devinfo) elk_nir_no_indirect_mask(compiler, i); nir_options->force_indirect_unrolling_sampler = devinfo->ver < 7; - if (compiler->use_tcs_multi_patch) { - /* TCS MULTI_PATCH mode has multiple patches per subgroup */ - nir_options->divergence_analysis_options &= - ~nir_divergence_single_patch_per_tcs_subgroup; - } - if (devinfo->ver < 12) nir_options->divergence_analysis_options |= nir_divergence_single_prim_per_subgroup; diff --git a/src/intel/compiler/elk/elk_compiler.h b/src/intel/compiler/elk/elk_compiler.h index 59c7a1bbcb2..c716e486008 100644 --- a/src/intel/compiler/elk/elk_compiler.h +++ b/src/intel/compiler/elk/elk_compiler.h @@ -87,7 +87,6 @@ struct elk_compiler { void (*shader_perf_log)(void *, unsigned *id, const char *str, ...) PRINTFLIKE(3, 4); bool scalar_stage[MESA_ALL_SHADER_STAGES]; - bool use_tcs_multi_patch; struct nir_shader_compiler_options *nir_options[MESA_ALL_SHADER_STAGES]; /** diff --git a/src/intel/compiler/elk/elk_nir.c b/src/intel/compiler/elk/elk_nir.c index a917767392f..0720bf307fd 100644 --- a/src/intel/compiler/elk/elk_nir.c +++ b/src/intel/compiler/elk/elk_nir.c @@ -1009,14 +1009,6 @@ elk_preprocess_nir(const struct elk_compiler *compiler, nir_shader *nir, nir_var_mem_ubo | nir_var_mem_ssbo, nir_lower_direct_array_deref_of_vec_load); - /* Clamp load_per_vertex_input of the TCS stage so that we do not generate - * loads reading out of bounds. We can do this here because we called - * nir_lower_system_values above. - */ - if (nir->info.stage == MESA_SHADER_TESS_CTRL && - compiler->use_tcs_multi_patch) - OPT(intel_nir_clamp_per_vertex_loads); - /* Get rid of split copies */ elk_nir_optimize(nir, is_scalar, devinfo); } diff --git a/src/intel/compiler/elk/elk_vec4_tcs.cpp b/src/intel/compiler/elk/elk_vec4_tcs.cpp index f1efa660f71..f4e6a6c34ae 100644 --- a/src/intel/compiler/elk/elk_vec4_tcs.cpp +++ b/src/intel/compiler/elk/elk_vec4_tcs.cpp @@ -390,21 +390,12 @@ elk_compile_tcs(const struct elk_compiler *compiler, elk_postprocess_nir(nir, compiler, debug_enabled, key->base.robust_flags); - bool has_primitive_id = - BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_PRIMITIVE_ID); - prog_data->patch_count_threshold = elk::get_patch_count_threshold(key->input_vertices); - if (compiler->use_tcs_multi_patch) { - vue_prog_data->dispatch_mode = INTEL_DISPATCH_MODE_TCS_MULTI_PATCH; - prog_data->instances = nir->info.tess.tcs_vertices_out; - prog_data->include_primitive_id = has_primitive_id; - } else { - unsigned verts_per_thread = is_scalar ? 8 : 2; - vue_prog_data->dispatch_mode = INTEL_DISPATCH_MODE_TCS_SINGLE_PATCH; - prog_data->instances = - DIV_ROUND_UP(nir->info.tess.tcs_vertices_out, verts_per_thread); - } + unsigned verts_per_thread = is_scalar ? 8 : 2; + vue_prog_data->dispatch_mode = INTEL_DISPATCH_MODE_TCS_SINGLE_PATCH; + prog_data->instances = + DIV_ROUND_UP(nir->info.tess.tcs_vertices_out, verts_per_thread); /* Compute URB entry size. The maximum allowed URB entry size is 32k. * That divides up as follows: