diff --git a/src/amd/vulkan/nir/radv_nir_opt_tid_function.c b/src/amd/vulkan/nir/radv_nir_opt_tid_function.c index 6ada120b8cd..7ed86e662be 100644 --- a/src/amd/vulkan/nir/radv_nir_opt_tid_function.c +++ b/src/amd/vulkan/nir/radv_nir_opt_tid_function.c @@ -43,12 +43,6 @@ alu_src_get_fotid_mask(nir_alu_instr *instr, unsigned idx) static void update_fotid_alu(nir_builder *b, nir_alu_instr *instr, const radv_nir_opt_tid_function_options *options) { - /* For legacy reasons these are ALU instructions - * when they should be intrinsics. - */ - if (nir_op_is_derivative(instr->op)) - return; - const nir_op_info *info = &nir_op_infos[instr->op]; unsigned res = BITFIELD_MASK(instr->def.num_components); diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index f875d290800..737b6db1275 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1528,12 +1528,6 @@ typedef enum { * comparison. */ NIR_OP_IS_SELECTION = (1 << 2), - - /** - * Operation where a screen-space derivative is taken of src[0]. Must not be - * moved into non-uniform control flow. - */ - NIR_OP_IS_DERIVATIVE = (1 << 3), } nir_op_algebraic_property; /* vec16 is the widest ALU op in NIR, making the max number of input of ALU @@ -1603,12 +1597,6 @@ nir_op_is_selection(nir_op op) return (nir_op_infos[op].algebraic_properties & NIR_OP_IS_SELECTION) != 0; } -static inline bool -nir_op_is_derivative(nir_op op) -{ - return (nir_op_infos[op].algebraic_properties & NIR_OP_IS_DERIVATIVE) != 0; -} - /***/ typedef struct nir_alu_instr { /** Base instruction */ diff --git a/src/compiler/nir/nir_gather_info.c b/src/compiler/nir/nir_gather_info.c index a5776cb7844..27db703ee86 100644 --- a/src/compiler/nir/nir_gather_info.c +++ b/src/compiler/nir/nir_gather_info.c @@ -908,12 +908,6 @@ gather_tex_info(nir_tex_instr *instr, nir_shader *shader) static void gather_alu_info(nir_alu_instr *instr, nir_shader *shader) { - if (nir_op_is_derivative(instr->op) && - shader->info.stage == MESA_SHADER_FRAGMENT) { - - shader->info.fs.needs_quad_helper_invocations = true; - } - const nir_op_info *info = &nir_op_infos[instr->op]; for (unsigned i = 0; i < info->num_inputs; i++) { diff --git a/src/compiler/nir/nir_opcodes.py b/src/compiler/nir/nir_opcodes.py index e0a9f76e1f5..592b0a263d2 100644 --- a/src/compiler/nir/nir_opcodes.py +++ b/src/compiler/nir/nir_opcodes.py @@ -150,7 +150,6 @@ def type_base_type(type_): _2src_commutative = "2src_commutative " associative = "associative " selection = "selection " -derivative = "derivative " # global dictionary of opcodes opcodes = {} diff --git a/src/compiler/nir/nir_opt_gcm.c b/src/compiler/nir/nir_opt_gcm.c index 7ecabb7365a..0654f4d95b5 100644 --- a/src/compiler/nir/nir_opt_gcm.c +++ b/src/compiler/nir/nir_opt_gcm.c @@ -315,11 +315,7 @@ gcm_pin_instructions(nir_function_impl *impl, struct gcm_state *state) case nir_instr_type_alu: { nir_alu_instr *alu = nir_instr_as_alu(instr); - if (nir_op_is_derivative(alu->op)) { - /* These can only go in uniform control flow */ - instr->pass_flags = GCM_INSTR_SCHEDULE_EARLIER_ONLY; - } else if (alu->op == nir_op_mov && - !is_src_scalarizable(&alu->src[0].src)) { + if (alu->op == nir_op_mov && !is_src_scalarizable(&alu->src[0].src)) { instr->pass_flags = GCM_INSTR_PINNED; } else { instr->pass_flags = 0; diff --git a/src/compiler/nir/nir_opt_move_discards_to_top.c b/src/compiler/nir/nir_opt_move_discards_to_top.c index 52103825318..80531d5dc58 100644 --- a/src/compiler/nir/nir_opt_move_discards_to_top.c +++ b/src/compiler/nir/nir_opt_move_discards_to_top.c @@ -133,13 +133,7 @@ opt_move_discards_to_top_impl(nir_function_impl *impl) instr->pass_flags = 0; switch (instr->type) { - case nir_instr_type_alu: { - nir_alu_instr *alu = nir_instr_as_alu(instr); - if (nir_op_is_derivative(alu->op)) - consider_discards = false; - continue; - } - + case nir_instr_type_alu: case nir_instr_type_deref: case nir_instr_type_load_const: case nir_instr_type_undef: diff --git a/src/compiler/nir/nir_opt_preamble.c b/src/compiler/nir/nir_opt_preamble.c index 8b2cb1e5f05..155dea37094 100644 --- a/src/compiler/nir/nir_opt_preamble.c +++ b/src/compiler/nir/nir_opt_preamble.c @@ -277,17 +277,9 @@ can_move_instr(nir_instr *instr, opt_preamble_ctx *ctx) } return can_move_srcs(instr, ctx); } - case nir_instr_type_alu: { - /* The preamble is presumably run with only one thread, so we can't run - * derivatives in it. - * TODO: Replace derivatives with 0 instead, if real apps hit this. - */ - nir_alu_instr *alu = nir_instr_as_alu(instr); - if (nir_op_is_derivative(alu->op)) - return false; - else - return can_move_srcs(instr, ctx); - } + case nir_instr_type_alu: + return can_move_srcs(instr, ctx); + case nir_instr_type_intrinsic: return can_move_intrinsic(nir_instr_as_intrinsic(instr), ctx); diff --git a/src/compiler/nir/nir_opt_sink.c b/src/compiler/nir/nir_opt_sink.c index f5e97b272f9..b871c8f035b 100644 --- a/src/compiler/nir/nir_opt_sink.c +++ b/src/compiler/nir/nir_opt_sink.c @@ -70,14 +70,6 @@ can_sink_instr(nir_instr *instr, nir_move_options options, bool *can_mov_out_of_ case nir_instr_type_alu: { nir_alu_instr *alu = nir_instr_as_alu(instr); - /* Derivatives cannot be moved into non-uniform control flow, including - * past a discard_if in the same block. Even if they could, sinking - * derivatives extends the lifetime of helper invocations which may be - * worse than the register pressure decrease. Bail on derivatives. - */ - if (nir_op_is_derivative(alu->op)) - return false; - if (nir_op_is_vec_or_mov(alu->op) || alu->op == nir_op_b2i32) return options & nir_move_copies; if (nir_alu_instr_is_comparison(alu)) diff --git a/src/compiler/nir/nir_opt_varyings.c b/src/compiler/nir/nir_opt_varyings.c index c1385e344d9..eb15ec96993 100644 --- a/src/compiler/nir/nir_opt_varyings.c +++ b/src/compiler/nir/nir_opt_varyings.c @@ -2879,12 +2879,6 @@ update_movable_flags(struct linkage_info *linkage, nir_instr *instr) unsigned num_srcs = nir_op_infos[alu->op].num_inputs; unsigned alu_interp; - /* These are shader-dependent and thus unmovable. */ - if (nir_op_is_derivative(alu->op)) { - instr->pass_flags |= FLAG_UNMOVABLE; - return; - } - /* Make vector ops unmovable. They are technically movable but more * complicated, and NIR should be scalarized for this pass anyway. * The only remaining vector ops should be vecN for intrinsic sources.