diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build index 516397fdafc..8a0ba5951ca 100644 --- a/src/compiler/nir/meson.build +++ b/src/compiler/nir/meson.build @@ -373,6 +373,14 @@ if with_tests 'tests/opt_if_tests.cpp', 'tests/opt_peephole_select.cpp', 'tests/opt_shrink_vectors_tests.cpp', + 'tests/opt_varyings_tests_bicm_binary_alu.cpp', + 'tests/opt_varyings_tests_dead_input.cpp', + 'tests/opt_varyings_tests_dead_output.cpp', + 'tests/opt_varyings_tests_dedup.cpp', + 'tests/opt_varyings_tests_prop_const.cpp', + 'tests/opt_varyings_tests_prop_ubo.cpp', + 'tests/opt_varyings_tests_prop_uniform.cpp', + 'tests/opt_varyings_tests_prop_uniform_expr.cpp', 'tests/serialize_tests.cpp', 'tests/range_analysis_tests.cpp', 'tests/vars_tests.cpp', diff --git a/src/compiler/nir/tests/nir_opt_varyings_test.h b/src/compiler/nir/tests/nir_opt_varyings_test.h new file mode 100644 index 00000000000..1e5553fc2c0 --- /dev/null +++ b/src/compiler/nir/tests/nir_opt_varyings_test.h @@ -0,0 +1,660 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include + +#include "nir.h" +#include "nir_builder.h" + +namespace { + +enum { + INTERP_FLAT, + INTERP_PERSP_PIXEL, + INTERP_PERSP_CENTROID, + INTERP_PERSP_SAMPLE, + INTERP_PERSP_AT_OFFSET, + INTERP_LINEAR_PIXEL, + INTERP_LINEAR_CENTROID, + INTERP_LINEAR_SAMPLE, + INTERP_LINEAR_AT_OFFSET, + INTERP_COLOR_PIXEL, + INTERP_COLOR_CENTROID, + INTERP_COLOR_SAMPLE, + INTERP_COLOR_AT_OFFSET, + INTERP_CONVERGENT, + INTERP_TES_TRIANGLE, + INTERP_TES_TRIANGLE_UVW_FADD, + INTERP_TES_TRIANGLE_WUV_FADD, + INTERP_TES_TRIANGLE_UVW_FFMA, + INTERP_TES_TRIANGLE_WUV_FFMA, +}; + +static inline bool +is_interp_at_offset(unsigned interp) +{ + return interp == INTERP_PERSP_AT_OFFSET || + interp == INTERP_LINEAR_AT_OFFSET || + interp == INTERP_COLOR_AT_OFFSET; +} + +class nir_opt_varyings_test : public ::testing::Test { +protected: + nir_opt_varyings_test() + { + glsl_type_singleton_init_or_ref(); + + b1 = &_producer_builder; + b2 = &_consumer_builder; + + memset(&options, 0, sizeof(options)); + options.varying_expression_max_cost = varying_expression_max_cost; + } + + virtual ~nir_opt_varyings_test() + { + if (HasFailure()) { + printf("\nPRODUCER:\n"); + nir_print_shader(b1->shader, stdout); + printf("CONSUMER:\n"); + nir_print_shader(b2->shader, stdout); + } + + ralloc_free(b1->shader); + ralloc_free(b2->shader); + glsl_type_singleton_decref(); + } + + static inline unsigned + varying_expression_max_cost(struct nir_shader *consumer, + struct nir_shader *producer) + { + return UINT_MAX; + } + + void create_shaders(gl_shader_stage producer_stage, + gl_shader_stage consumer_stage) + { + _producer_builder = + nir_builder_init_simple_shader(producer_stage, &options, + "producer_shader"); + _consumer_builder = + nir_builder_init_simple_shader(consumer_stage, &options, + "consumer_shader"); + + const struct glsl_type *hvec4 = glsl_vector_type(GLSL_TYPE_FLOAT16, 4); + + prod_uniform_vec4_32 = + nir_variable_create(b1->shader, nir_var_uniform, + glsl_vec4_type(), "prod_uniform_vec4_32"); + prod_uniform_vec4_16 = + nir_variable_create(b1->shader, nir_var_uniform, + hvec4, "prod_uniform_vec4_16"); + + prod_ubo_vec4_32 = + nir_variable_create(b1->shader, nir_var_mem_ubo, + glsl_array_type(glsl_vec4_type(), 256, 0), + "prod_ubo_vec4_32"); + prod_ubo_vec4_32->interface_type = prod_ubo_vec4_32->type; + + prod_ubo_vec4_16 = + nir_variable_create(b1->shader, nir_var_mem_ubo, + glsl_array_type(hvec4, 256, 0), + "prod_ubo_vec4_16"); + prod_ubo_vec4_16->interface_type = prod_ubo_vec4_16->type; + } + + nir_variable *get_uniform(nir_builder *b, unsigned bit_size) + { + if (b == b1) { + return bit_size == 16 ? prod_uniform_vec4_16 : + bit_size == 32 ? prod_uniform_vec4_32 : NULL; + } + + return NULL; + } + + nir_variable *get_ubo(nir_builder *b, unsigned bit_size) + { + if (b == b1) { + return bit_size == 16 ? prod_ubo_vec4_16 : + bit_size == 32 ? prod_ubo_vec4_32 : NULL; + } + + return NULL; + } + + nir_def *load_uniform(nir_builder *b, unsigned bit_size, unsigned index) + { + if (b == b1) { + nir_variable *var = get_uniform(b, bit_size); + nir_deref_instr *deref = nir_build_deref_var(b, var); + + /* Load vec4, but use only 1 component. */ + return nir_channel(b, nir_load_deref(b, deref), index); + } + + return NULL; + } + + nir_def *load_ubo(nir_builder *b, unsigned bit_size, unsigned index) + { + if (b == b1) { + nir_variable *var = get_ubo(b, bit_size); + nir_deref_instr *deref = + nir_build_deref_array(b, nir_build_deref_var(b, var), + nir_imm_int(b, 16 + index)); + + /* Load vec4, but use only 1 component. */ + return nir_channel(b, nir_load_deref(b, deref), 1); + } + + return NULL; + } + + nir_def *build_uniform_expr(nir_builder *b, unsigned bit_size, unsigned index) + { + return nir_fsqrt(b, nir_ffma(b, load_uniform(b, bit_size, index), + nir_imm_floatN_t(b, 3.14, bit_size), + load_ubo(b, bit_size, index))); + } + + bool shader_contains_uniform(nir_builder *target_b, unsigned bit_size, + unsigned index) + { + nir_builder *src_b = target_b == b1 ? b2 : b1; + nir_shader *target = target_b->shader; + nir_variable *var = get_uniform(src_b, bit_size); + + nir_foreach_uniform_variable(it, target) { + if (!strcmp(it->name, var->name)) + return true; + } + + return false; + } + + bool shader_contains_ubo(nir_builder *target_b, unsigned bit_size, + unsigned index) + { + nir_builder *src_b = target_b == b1 ? b2 : b1; + nir_shader *target = target_b->shader; + nir_variable *var = get_ubo(src_b, bit_size); + + nir_foreach_variable_with_modes(it, target, nir_var_mem_ubo) { + if (!strcmp(it->name, var->name)) + return true; + } + + return false; + } + + static bool + has_non_io_offset_non_vertex_index_use(nir_builder *b, nir_def *def) + { + nir_foreach_use(src, def) { + nir_instr *instr = nir_src_parent_instr(src); + + if (instr->type == nir_instr_type_intrinsic) { + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); + nir_src *offset_src = nir_get_io_offset_src(intr); + nir_src *index_src = nir_get_io_arrayed_index_src(intr); + + if (src == offset_src || src == index_src) + continue; + } + return true; + } + return false; + } + + static bool + shader_contains_const_float(nir_builder *b, float f, unsigned bit_size) + { + if (bit_size == 16) + f = _mesa_half_to_float(_mesa_float_to_half(f)); + + nir_foreach_block(block, b->impl) { + nir_foreach_instr(instr, block) { + if (instr->type == nir_instr_type_load_const) { + nir_load_const_instr *lc = nir_instr_as_load_const(instr); + + if (lc->def.num_components == 1 && + lc->def.bit_size == bit_size && + nir_const_value_as_float(lc->value[0], lc->def.bit_size) == f && + has_non_io_offset_non_vertex_index_use(b, &lc->def)) + return true; + } + } + } + return false; + } + + static bool + shader_contains_alu_op(nir_builder *b, nir_op op, unsigned bit_size) + { + nir_foreach_block(block, b->impl) { + nir_foreach_instr(instr, block) { + if (instr->type == nir_instr_type_alu) { + if (nir_instr_as_alu(instr)->op == op) + return true; + } + } + } + return false; + } + + bool shader_contains_uniform_expr(nir_builder *b, unsigned bit_size, + unsigned index, bool contains) + { + if (contains) { + return shader_contains_uniform(b, bit_size, index) && + shader_contains_ubo(b, bit_size, index) && + shader_contains_alu_op(b, nir_op_ffma, bit_size) && + shader_contains_alu_op(b, nir_op_fsqrt, bit_size) && + shader_contains_const_float(b, 3.14, bit_size); + } else { + return !shader_contains_uniform(b, bit_size, index) && + !shader_contains_ubo(b, bit_size, index) && + !shader_contains_alu_op(b, nir_op_ffma, bit_size) && + !shader_contains_alu_op(b, nir_op_fsqrt, bit_size) && + !shader_contains_const_float(b, 3.14, bit_size); + } + } + + void optimize() + { + NIR_PASS(_, b1->shader, nir_copy_prop); + NIR_PASS(_, b1->shader, nir_opt_dce); + NIR_PASS(_, b1->shader, nir_opt_cse); + + NIR_PASS(_, b2->shader, nir_copy_prop); + NIR_PASS(_, b2->shader, nir_opt_dce); + NIR_PASS(_, b2->shader, nir_opt_cse); + } + + nir_opt_varyings_progress opt_varyings() + { + optimize(); + + if (debug_get_bool_option("PRINT_BEFORE", false)) { + printf("\nPRODUCER:\n"); + nir_print_shader(b1->shader, stdout); + printf("CONSUMER:\n"); + nir_print_shader(b2->shader, stdout); + } + + nir_opt_varyings_progress progress = + nir_opt_varyings(b1->shader, b2->shader, true, 4096, 15); + nir_validate_shader(b1->shader, "validate producer shader"); + nir_validate_shader(b2->shader, "validate consumer shader"); + + optimize(); + nir_shader_gather_info(b1->shader, b1->impl); + nir_shader_gather_info(b2->shader, b2->impl); + return progress; + } + + nir_shader_compiler_options options; + nir_builder *b1; + nir_builder *b2; + nir_builder _consumer_builder; + nir_builder _producer_builder; + nir_variable *prod_uniform_vec4_32; + nir_variable *prod_uniform_vec4_16; + nir_variable *prod_ubo_vec4_32; + nir_variable *prod_ubo_vec4_16; +}; + +static inline bool +shader_contains_instr(nir_builder *b, nir_instr *i) +{ + nir_foreach_block(block, b->impl) { + nir_foreach_instr(instr, block) { + if (instr == i) + return true; + } + } + return false; +} + +static inline bool +shader_contains_def(nir_builder *b, nir_def *def) +{ + return shader_contains_instr(b, def->parent_instr); +} + +static inline bool +shader_contains_undef(nir_builder *b, unsigned bit_size) +{ + nir_foreach_block(block, b->impl) { + nir_foreach_instr(instr, block) { + if (instr->type == nir_instr_type_undef && + nir_instr_as_undef(instr)->def.bit_size == bit_size && + nir_instr_as_undef(instr)->def.num_components == 1) + return true; + } + } + return false; +} + +static inline bool +is_patch(gl_varying_slot slot) +{ + return slot == VARYING_SLOT_TESS_LEVEL_INNER || + slot == VARYING_SLOT_TESS_LEVEL_OUTER || + (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_PATCH31); +} + +static inline bool +is_color(nir_builder *b, gl_varying_slot slot) +{ + return b->shader->info.stage == MESA_SHADER_FRAGMENT && + (slot == VARYING_SLOT_COL0 || slot == VARYING_SLOT_COL1 || + slot == VARYING_SLOT_BFC0 || slot == VARYING_SLOT_BFC0); +} + +static inline bool +is_texcoord(nir_builder *b, gl_varying_slot slot) +{ + return b->shader->info.stage == MESA_SHADER_FRAGMENT && + slot >= VARYING_SLOT_TEX0 && slot <= VARYING_SLOT_TEX7; +} + +static inline bool +is_per_vertex(nir_builder *b, gl_varying_slot slot, bool is_input) +{ + return !is_patch(slot) && + (b->shader->info.stage == MESA_SHADER_TESS_CTRL || + (is_input && (b->shader->info.stage == MESA_SHADER_TESS_EVAL || + b->shader->info.stage == MESA_SHADER_GEOMETRY)) || + (!is_input && b->shader->info.stage == MESA_SHADER_MESH)); +} + +static inline nir_def * +load_input_output(nir_builder *b, gl_varying_slot slot, unsigned component, + nir_alu_type type, unsigned vertex_index, bool output) +{ + unsigned bit_size = type & ~(nir_type_float | nir_type_int | nir_type_uint); + nir_def *zero = nir_imm_int(b, 0); + nir_def *def; + + if (is_per_vertex(b, slot, true)) { + if (output) { + def = nir_load_per_vertex_output(b, 1, bit_size, + nir_imm_int(b, vertex_index), zero); + } else { + def = nir_load_per_vertex_input(b, 1, bit_size, + nir_imm_int(b, vertex_index), zero); + } + } else { + if (output) + def = nir_load_output(b, 1, bit_size, zero); + else + def = nir_load_input(b, 1, bit_size, zero); + } + + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(def->parent_instr); + nir_intrinsic_set_base(intr, 0); /* we don't care */ + nir_intrinsic_set_range(intr, 1); + nir_intrinsic_set_component(intr, component); + nir_intrinsic_set_dest_type(intr, type); + + nir_io_semantics sem; + memset(&sem, 0, sizeof(sem)); + sem.location = slot; + sem.num_slots = 1; + nir_intrinsic_set_io_semantics(intr, sem); + + return def; +} + +static inline nir_def * +load_input_interp(nir_builder *b, gl_varying_slot slot, unsigned component, + nir_alu_type type, unsigned interp) +{ + assert(b->shader->info.stage == MESA_SHADER_FRAGMENT && + interp != INTERP_FLAT && interp != INTERP_CONVERGENT && + interp < INTERP_TES_TRIANGLE); + assert(type & nir_type_float); + + unsigned bit_size = type & ~nir_type_float; + nir_def *zero = nir_imm_int(b, 0); + nir_def *baryc; + + switch (interp) { + case INTERP_PERSP_PIXEL: + case INTERP_LINEAR_PIXEL: + case INTERP_COLOR_PIXEL: + baryc = nir_load_barycentric_pixel(b, 32); + break; + case INTERP_PERSP_CENTROID: + case INTERP_LINEAR_CENTROID: + case INTERP_COLOR_CENTROID: + baryc = nir_load_barycentric_centroid(b, 32); + break; + case INTERP_PERSP_SAMPLE: + case INTERP_LINEAR_SAMPLE: + case INTERP_COLOR_SAMPLE: + baryc = nir_load_barycentric_sample(b, 32); + break; + case INTERP_PERSP_AT_OFFSET: + case INTERP_LINEAR_AT_OFFSET: + case INTERP_COLOR_AT_OFFSET: + baryc = nir_load_barycentric_at_offset(b, 32, nir_imm_ivec2(b, 1, 2)); + break; + default: + unreachable("invalid interp mode"); + } + + switch (interp) { + case INTERP_PERSP_PIXEL: + case INTERP_PERSP_CENTROID: + case INTERP_PERSP_SAMPLE: + case INTERP_PERSP_AT_OFFSET: + nir_intrinsic_set_interp_mode(nir_instr_as_intrinsic(baryc->parent_instr), + INTERP_MODE_SMOOTH); + break; + case INTERP_LINEAR_PIXEL: + case INTERP_LINEAR_CENTROID: + case INTERP_LINEAR_SAMPLE: + case INTERP_LINEAR_AT_OFFSET: + nir_intrinsic_set_interp_mode(nir_instr_as_intrinsic(baryc->parent_instr), + INTERP_MODE_NOPERSPECTIVE); + break; + case INTERP_COLOR_PIXEL: + case INTERP_COLOR_CENTROID: + case INTERP_COLOR_SAMPLE: + case INTERP_COLOR_AT_OFFSET: + nir_intrinsic_set_interp_mode(nir_instr_as_intrinsic(baryc->parent_instr), + INTERP_MODE_NONE); + break; + default: + unreachable("invalid interp mode"); + } + + nir_def *def = nir_load_interpolated_input(b, 1, bit_size, baryc, zero); + + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(def->parent_instr); + nir_intrinsic_set_base(intr, 0); /* we don't care */ + nir_intrinsic_set_component(intr, component); + nir_intrinsic_set_dest_type(intr, type); + + nir_io_semantics sem; + memset(&sem, 0, sizeof(sem)); + sem.location = slot; + sem.num_slots = 1; + nir_intrinsic_set_io_semantics(intr, sem); + + return def; +} + +static inline nir_def * +load_interpolated_input_tes(nir_builder *b, gl_varying_slot slot, + unsigned component, nir_alu_type type, + unsigned interp) +{ + assert(b->shader->info.stage == MESA_SHADER_TESS_EVAL && !is_patch(slot)); + assert(type & nir_type_float); + unsigned bit_size = type & ~nir_type_float; + nir_def *zero = nir_imm_int(b, 0); + nir_def *tesscoord = nir_load_tess_coord(b); + nir_def *def[3]; + + if (bit_size != 32) + tesscoord = nir_f2fN(b, tesscoord, bit_size); + + unsigned remap_uvw[3] = {0, 1, 2}; + unsigned remap_wuv[3] = {2, 0, 1}; + unsigned *remap; + + switch (interp) { + case INTERP_TES_TRIANGLE_UVW_FADD: + case INTERP_TES_TRIANGLE_UVW_FFMA: + remap = remap_uvw; + break; + case INTERP_TES_TRIANGLE_WUV_FADD: + case INTERP_TES_TRIANGLE_WUV_FFMA: + remap = remap_wuv; + break; + default: + unreachable("unexpected TES interp mode"); + } + + bool use_ffma = interp == INTERP_TES_TRIANGLE_UVW_FFMA || + interp == INTERP_TES_TRIANGLE_WUV_FFMA; + + for (unsigned i = 0; i < 3; i++) { + def[i] = nir_load_per_vertex_input(b, 1, bit_size, nir_imm_int(b, i), + zero); + + nir_intrinsic_instr *intr = nir_instr_as_intrinsic(def[i]->parent_instr); + nir_intrinsic_set_base(intr, 0); /* we don't care */ + nir_intrinsic_set_range(intr, 1); + nir_intrinsic_set_component(intr, component); + nir_intrinsic_set_dest_type(intr, type); + + nir_io_semantics sem; + memset(&sem, 0, sizeof(sem)); + sem.location = slot; + sem.num_slots = 1; + nir_intrinsic_set_io_semantics(intr, sem); + + if (use_ffma) { + if (i == 0) + def[i] = nir_fmul(b, def[i], nir_channel(b, tesscoord, remap[i])); + else + def[i] = nir_ffma(b, def[i], nir_channel(b, tesscoord, remap[i]), + def[i - 1]); + } else { + def[i] = nir_fmul(b, def[i], nir_channel(b, tesscoord, remap[i])); + } + } + + if (use_ffma) + return def[2]; + else + return nir_fadd(b, nir_fadd(b, def[0], def[1]), def[2]); +} + +static inline nir_def * +load_input(nir_builder *b, gl_varying_slot slot, unsigned component, + nir_alu_type type, unsigned vertex_index, unsigned interp) +{ + if (b->shader->info.stage == MESA_SHADER_FRAGMENT && interp != INTERP_FLAT) { + return load_input_interp(b, slot, component, type, interp); + } else if (b->shader->info.stage == MESA_SHADER_TESS_EVAL && + interp >= INTERP_TES_TRIANGLE) { + return load_interpolated_input_tes(b, slot, component, type, interp); + } else { + assert(interp == INTERP_FLAT); + return load_input_output(b, slot, component, type, vertex_index, false); + } +} + +static inline nir_def * +load_output(nir_builder *b, gl_varying_slot slot, unsigned component, + nir_alu_type type, unsigned vertex_index) +{ + return load_input_output(b, slot, component, type, vertex_index, true); +} + +static inline nir_intrinsic_instr * +store_output(nir_builder *b, gl_varying_slot slot, unsigned component, + nir_alu_type type, nir_def *src, int vertex_index) +{ + nir_def *zero = nir_imm_int(b, 0); + nir_intrinsic_instr *intr; + + if (is_per_vertex(b, slot, false)) { + assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL || + vertex_index >= 0); + nir_def *index = vertex_index >= 0 ? nir_imm_int(b, vertex_index) : + nir_load_invocation_id(b); + intr = nir_store_per_vertex_output(b, src, index, zero); + } else { + intr = nir_store_output(b, src, zero); + } + + nir_intrinsic_set_base(intr, 0); /* we don't care */ + nir_intrinsic_set_write_mask(intr, 0x1); + nir_intrinsic_set_component(intr, component); + nir_intrinsic_set_src_type(intr, type); + + nir_io_semantics sem; + memset(&sem, 0, sizeof(sem)); + sem.location = slot; + sem.num_slots = 1; + nir_intrinsic_set_io_semantics(intr, sem); + + return intr; +} + +static inline nir_intrinsic_instr * +store_ssbo(nir_builder *b, nir_def *src) +{ + return nir_store_ssbo(b, src, nir_imm_int(b, 0), nir_imm_int(b, 0)); +} + +/* See can_move_alu_across_interp. */ +static inline bool +movable_across_interp(nir_builder *b, nir_op op, unsigned interp[3], + bool divergent[3], unsigned bit_size) +{ + if ((interp[0] == INTERP_FLAT || !divergent[0]) && + (interp[1] == INTERP_FLAT || !divergent[1]) && + (interp[2] == INTERP_FLAT || !divergent[2])) + return true; + + /* nir_opt_varyings doesn't have an equation for: + * v0 * f2f16(u) + v1 * f2f16(v) + v2 * f2f16(w) + */ + if (b->shader->info.stage == MESA_SHADER_TESS_EVAL && bit_size == 16) + return false; + + switch (op) { + case nir_op_fadd: + case nir_op_fsub: + case nir_op_fneg: + case nir_op_mov: + return true; + + case nir_op_fmul: + case nir_op_fmulz: + case nir_op_ffma: + case nir_op_ffmaz: + return !divergent[0] || !divergent[1]; + + case nir_op_fdiv: + return !divergent[1]; + + case nir_op_flrp: + return (!divergent[0] && !divergent[1]) || !divergent[2]; + + default: + return false; + } +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_bicm_binary_alu.cpp b/src/compiler/nir/tests/opt_varyings_tests_bicm_binary_alu.cpp new file mode 100644 index 00000000000..bafa0e07f11 --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_bicm_binary_alu.cpp @@ -0,0 +1,144 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +/* Tests for Backward Inter-Shader Code Motion. */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_bicm_binary_alu : public nir_opt_varyings_test +{}; + +#define TEST_ALU_BINARY(producer_stage, consumer_stage, type, bitsize, slot1, slot2, interp1, interp2, alu) \ +TEST_F(nir_opt_varyings_test_bicm_binary_alu, \ + alu##_##type##bitsize##_##producer_stage##_##consumer_stage##_##slot1##_##slot2##_##interp1##_##interp2) \ +{ \ + unsigned pslot[2] = {VARYING_SLOT_##slot1, VARYING_SLOT_##slot2}; \ + unsigned cslot[2] = {VARYING_SLOT_##slot1, VARYING_SLOT_##slot2}; \ + unsigned interp[3] = {INTERP_##interp1, INTERP_##interp2}; \ + bool divergent[3] = {interp[0] != INTERP_CONVERGENT, interp[1] != INTERP_CONVERGENT}; \ + \ + /* Choose a random TES interpolation mode, but it must be the same for both inputs. */ \ + if (interp[0] == INTERP_TES_TRIANGLE && interp[1] == INTERP_TES_TRIANGLE) \ + interp[0] = interp[1] = INTERP_TES_TRIANGLE + 1 + rand() % 4; \ + if (interp[0] == INTERP_TES_TRIANGLE) \ + interp[0] = INTERP_TES_TRIANGLE + 1 + rand() % 4; \ + if (interp[1] == INTERP_TES_TRIANGLE) \ + interp[1] = INTERP_TES_TRIANGLE + 1 + rand() % 4; \ + \ + if (!divergent[0]) \ + interp[0] = INTERP_LINEAR_CENTROID; \ + if (!divergent[1]) \ + interp[1] = INTERP_LINEAR_CENTROID; \ + \ + options.varying_expression_max_cost = NULL; /* don't propagate uniforms */ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + \ + nir_intrinsic_instr *store[2] = {NULL}; \ + for (unsigned s = 0; s < 2; s++) { \ + nir_def *input; \ + if (!divergent[s]) \ + input = load_uniform(b1, bitsize, 0); \ + else \ + input = load_input(b1, (gl_varying_slot)0, s, nir_type_##type##bitsize, 0, 0); \ + store[s] = store_output(b1, (gl_varying_slot)pslot[s], s, nir_type_##type##bitsize, input, -1); \ + } \ + \ + nir_def *load[2] = {NULL}; \ + for (unsigned s = 0; s < 2; s++) \ + load[s] = load_input(b2, (gl_varying_slot)cslot[s], s, nir_type_##type##bitsize, 0, interp[s]); \ + \ + nir_def *value = nir_##alu(b2, load[0], load[1]); \ + if (value->bit_size == 1) \ + value = nir_u2u##bitsize(b2, value); \ + \ + store_output(b2, VARYING_SLOT_VAR0, 0, nir_type_##type##bitsize, value, 0); \ + \ + divergent[0] &= !is_patch((gl_varying_slot)pslot[0]); \ + divergent[1] &= !is_patch((gl_varying_slot)pslot[1]); \ + \ + if ((INTERP_##interp1 == INTERP_##interp2 || !divergent[0] || !divergent[1]) &&\ + movable_across_interp(b2, nir_op_##alu, interp, divergent, bitsize)) { \ + ASSERT_EQ(opt_varyings(), (nir_progress_producer | nir_progress_consumer)); \ + ASSERT_TRUE(shader_contains_alu_op(b1, nir_op_##alu, bitsize)); \ + /* TES uses fadd and fmul for interpolation, so it's always present. */ \ + if (MESA_SHADER_##consumer_stage != MESA_SHADER_TESS_EVAL || \ + (nir_op_##alu != nir_op_fadd && nir_op_##alu != nir_op_fmul && \ + nir_op_##alu != nir_op_ffma)) { \ + ASSERT_TRUE(!shader_contains_alu_op(b2, nir_op_##alu, bitsize)); \ + } \ + ASSERT_TRUE(shader_contains_instr(b1, &store[0]->instr)); \ + ASSERT_TRUE(!shader_contains_instr(b1, &store[1]->instr)); \ + ASSERT_TRUE(!shader_contains_def(b2, load[0])); \ + ASSERT_TRUE(!shader_contains_def(b2, load[1])); \ + } else { \ + ASSERT_EQ(opt_varyings(), 0); \ + ASSERT_TRUE(!shader_contains_alu_op(b1, nir_op_##alu, bitsize)); \ + ASSERT_TRUE(shader_contains_alu_op(b2, nir_op_##alu, bitsize)); \ + ASSERT_TRUE(shader_contains_instr(b1, &store[0]->instr)); \ + ASSERT_TRUE(shader_contains_instr(b1, &store[1]->instr)); \ + ASSERT_TRUE(shader_contains_def(b2, load[0])); \ + ASSERT_TRUE(shader_contains_def(b2, load[1])); \ + } \ +} + +#define TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, interp1, interp2) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 16, slot1, slot2, interp1, interp2, fadd) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fadd) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fdiv) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, feq) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fge) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fmin) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fmax) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fmod) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fmul) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fmulz) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fneu) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fpow) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, frem) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, fsub) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, seq) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, float, 32, slot1, slot2, interp1, interp2, sge) + +#define TEST_ALU_BINARY_INT_OPS(producer_stage, consumer_stage, slot1, slot2) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 16, slot1, slot2, FLAT, FLAT, iadd) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, iadd) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, iand) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, idiv) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, ieq) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, ige) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, imax) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, ishl) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, udiv) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, uge) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, umin) \ + TEST_ALU_BINARY(producer_stage, consumer_stage, int, 32, slot1, slot2, FLAT, FLAT, umul_high) + +#define TEST_ALU_BINARY_OPS(producer_stage, consumer_stage, slot1, slot2) \ + TEST_ALU_BINARY_INT_OPS(producer_stage, consumer_stage, slot1, slot2) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, FLAT, FLAT) + +#define TEST_ALU_BINARY_OPS_FS_INTERP(producer_stage, consumer_stage, slot1, slot2) \ + TEST_ALU_BINARY_OPS(producer_stage, consumer_stage, slot1, slot2) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, FLAT, PERSP_PIXEL) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, FLAT, CONVERGENT) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, CONVERGENT, FLAT) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, PERSP_PIXEL, PERSP_PIXEL) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, PERSP_PIXEL, PERSP_CENTROID) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, PERSP_PIXEL, CONVERGENT) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, CONVERGENT, PERSP_PIXEL) \ + TEST_ALU_BINARY_FP_OPS(producer_stage, consumer_stage, slot1, slot2, CONVERGENT, CONVERGENT) + +TEST_ALU_BINARY_FP_OPS(VERTEX, TESS_EVAL, VAR0, VAR1, TES_TRIANGLE, TES_TRIANGLE) +TEST_ALU_BINARY_FP_OPS(TESS_CTRL, TESS_EVAL, VAR0, VAR1, TES_TRIANGLE, TES_TRIANGLE) +TEST_ALU_BINARY_FP_OPS(TESS_CTRL, TESS_EVAL, VAR0, PATCH0, TES_TRIANGLE, FLAT) +TEST_ALU_BINARY_OPS(TESS_CTRL, TESS_EVAL, PATCH0, PATCH1) + +TEST_ALU_BINARY_OPS_FS_INTERP(VERTEX, FRAGMENT, VAR0, VAR1) +TEST_ALU_BINARY_OPS_FS_INTERP(TESS_EVAL, FRAGMENT, VAR0, VAR1) + +// TODO: unary/ternary, uniform/UBO load/constant + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_dead_input.cpp b/src/compiler/nir/tests/opt_varyings_tests_dead_input.cpp new file mode 100644 index 00000000000..2cc801b4233 --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_dead_input.cpp @@ -0,0 +1,251 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_dead_input : public nir_opt_varyings_test +{}; + +#define TEST_DEAD_INPUT_TO_UNDEF(producer_stage, consumer_stage, slot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_input, producer_stage##_##consumer_stage##_##slot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_def *input = load_input(b2, VARYING_SLOT_##slot, 0, nir_type_float##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_POS, 0, nir_type_float##bitsize, input, 0); \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_undef(b2, bitsize)); \ +} + +#define TEST_DEAD_INPUT_TO_CONST(producer_stage, consumer_stage, slot, comp, bitsize, value) \ +TEST_F(nir_opt_varyings_test_dead_input, producer_stage##_##consumer_stage##_##slot##_##comp##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_def *input = load_input(b2, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_POS, 0, nir_type_float##bitsize, input, 0); \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_const_float(b2, value, bitsize)); \ +} + +#define TEST_DEAD_INPUT_KEPT(producer_stage, consumer_stage, slot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_input, producer_stage##_##consumer_stage##_##slot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_def *input = load_input(b2, VARYING_SLOT_##slot, 0, nir_type_float##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_POS, 0, nir_type_float##bitsize, input, 0); \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + ASSERT_TRUE(b2->shader->info.inputs_read == VARYING_BIT_##slot); \ + ASSERT_TRUE(shader_contains_def(b2, input)); \ +} + +#define TEST_OUTPUT_INPUT_ROUTING_KEPT(producer_stage, consumer_stage, pslot, cslot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_input, \ + routing_##producer_stage##_##pslot##_##consumer_stage##_##cslot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + store_output(b1, VARYING_SLOT_##pslot, 0, nir_type_float##bitsize, \ + load_input(b1, VARYING_SLOT_POS, 0, nir_type_float##bitsize, 0, 0), 0); \ + \ + nir_def *input = load_input(b2, VARYING_SLOT_##cslot, 0, nir_type_float##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_POS, 0, nir_type_float##bitsize, input, 0); \ + \ + /* Compaction moves COL1 to COL0. */ \ + unsigned pindex = VARYING_SLOT_##pslot; \ + unsigned cindex = VARYING_SLOT_##cslot; \ + if (cindex == VARYING_SLOT_COL1) { \ + pindex--; \ + cindex--; \ + } \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.inputs_read == BITFIELD64_BIT(cindex)); \ + ASSERT_TRUE(shader_contains_def(b2, input)); \ +} + +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, POS, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, FOGC, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, TEX0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, PSIZ, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, BFC0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, BFC1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, CLIP_VERTEX, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, CULL_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, LAYER, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, VIEWPORT, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_CTRL, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, POS, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, FOGC, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, TEX0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, PSIZ, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, BFC0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, BFC1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, CLIP_VERTEX, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, CULL_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, LAYER, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, VIEWPORT, 32) +TEST_DEAD_INPUT_KEPT(VERTEX, TESS_EVAL, TESS_LEVEL_INNER, 32) +TEST_DEAD_INPUT_KEPT(VERTEX, TESS_EVAL, TESS_LEVEL_OUTER, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, TESS_EVAL, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, POS, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, FOGC, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, TEX0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, PSIZ, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, BFC0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, BFC1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, CULL_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, LAYER, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, VIEWPORT, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 16) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, PATCH0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_CTRL, TESS_EVAL, PATCH0, 16) + +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, POS, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, FOGC, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, TEX0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, PSIZ, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, BFC0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, BFC1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, CLIP_VERTEX, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, CULL_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, LAYER, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, VIEWPORT, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, GEOMETRY, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, POS, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, FOGC, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, TEX0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, PSIZ, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, BFC0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, BFC1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, CLIP_VERTEX, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, CULL_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, LAYER, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, VIEWPORT, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, GEOMETRY, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, FOGC, 32) +TEST_DEAD_INPUT_KEPT(VERTEX, FRAGMENT, TEX0, 32) +TEST_DEAD_INPUT_TO_CONST(VERTEX, FRAGMENT, TEX0, 2, 32, 0) +TEST_DEAD_INPUT_TO_CONST(VERTEX, FRAGMENT, TEX0, 3, 32, 1) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_INPUT_KEPT(VERTEX, FRAGMENT, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_CONST(VERTEX, FRAGMENT, LAYER, 0, 32, 0) +TEST_DEAD_INPUT_TO_CONST(VERTEX, FRAGMENT, VIEWPORT, 0, 32, 0) +TEST_DEAD_INPUT_KEPT(VERTEX, FRAGMENT, PNTC, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(VERTEX, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, FOGC, 32) +TEST_DEAD_INPUT_KEPT(TESS_EVAL, FRAGMENT, TEX0, 32) +TEST_DEAD_INPUT_TO_CONST(TESS_EVAL, FRAGMENT, TEX0, 2, 32, 0) +TEST_DEAD_INPUT_TO_CONST(TESS_EVAL, FRAGMENT, TEX0, 3, 32, 1) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_INPUT_KEPT(TESS_EVAL, FRAGMENT, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_CONST(TESS_EVAL, FRAGMENT, LAYER, 0, 32, 0) +TEST_DEAD_INPUT_TO_CONST(TESS_EVAL, FRAGMENT, VIEWPORT, 0, 32, 0) +TEST_DEAD_INPUT_KEPT(TESS_EVAL, FRAGMENT, PNTC, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(TESS_EVAL, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, COL0, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, COL1, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, FOGC, 32) +TEST_DEAD_INPUT_KEPT(GEOMETRY, FRAGMENT, TEX0, 32) +TEST_DEAD_INPUT_TO_CONST(GEOMETRY, FRAGMENT, TEX0, 2, 32, 0) +TEST_DEAD_INPUT_TO_CONST(GEOMETRY, FRAGMENT, TEX0, 3, 32, 1) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 32) +TEST_DEAD_INPUT_TO_CONST(GEOMETRY, FRAGMENT, LAYER, 0, 32, 0) +TEST_DEAD_INPUT_TO_CONST(GEOMETRY, FRAGMENT, VIEWPORT, 0, 32, 0) +TEST_DEAD_INPUT_KEPT(GEOMETRY, FRAGMENT, PNTC, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(GEOMETRY, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_INPUT_KEPT(MESH, FRAGMENT, PNTC, 32) +TEST_DEAD_INPUT_TO_UNDEF(MESH, FRAGMENT, VAR0, 32) +TEST_DEAD_INPUT_TO_UNDEF(MESH, FRAGMENT, VAR0, 16) +TEST_DEAD_INPUT_TO_UNDEF(MESH, FRAGMENT, VAR0_16BIT, 16) + +TEST_OUTPUT_INPUT_ROUTING_KEPT(VERTEX, FRAGMENT, BFC0, COL0, 32) +TEST_OUTPUT_INPUT_ROUTING_KEPT(VERTEX, FRAGMENT, BFC1, COL1, 32) +TEST_OUTPUT_INPUT_ROUTING_KEPT(TESS_EVAL, FRAGMENT, BFC0, COL0, 32) +TEST_OUTPUT_INPUT_ROUTING_KEPT(TESS_EVAL, FRAGMENT, BFC1, COL1, 32) +TEST_OUTPUT_INPUT_ROUTING_KEPT(GEOMETRY, FRAGMENT, BFC0, COL0, 32) +TEST_OUTPUT_INPUT_ROUTING_KEPT(GEOMETRY, FRAGMENT, BFC1, COL1, 32) + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_dead_output.cpp b/src/compiler/nir/tests/opt_varyings_tests_dead_output.cpp new file mode 100644 index 00000000000..88352954211 --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_dead_output.cpp @@ -0,0 +1,352 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_dead_output : public nir_opt_varyings_test +{}; + +#define TEST_DEAD_OUTPUT_REMOVED(producer_stage, consumer_stage, slot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_output, \ + store_##producer_stage##_##consumer_stage##_##slot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_intrinsic_instr *intr = \ + store_output(b1, VARYING_SLOT_##slot, 0, nir_type_float##bitsize, \ + nir_imm_float(b1, 0), 0); \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_producer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == 0 && \ + b1->shader->info.patch_outputs_written == 0 && \ + b1->shader->info.outputs_written_16bit == 0); \ + ASSERT_TRUE(!shader_contains_instr(b1, &intr->instr)); \ +} + +#define TEST_DEAD_OUTPUT_KEPT(producer_stage, consumer_stage, slot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_output, \ + store_##producer_stage##_##consumer_stage##_##slot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_intrinsic_instr *intr = \ + store_output(b1, VARYING_SLOT_##slot, 0, nir_type_float##bitsize, \ + nir_imm_float(b1, 0), 0); \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + ASSERT_TRUE(b1->shader->info.outputs_written == VARYING_BIT_##slot); \ + ASSERT_TRUE(shader_contains_instr(b1, &intr->instr)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(intr).no_varying == \ + (VARYING_SLOT_##slot != VARYING_SLOT_POS && \ + VARYING_SLOT_##slot != VARYING_SLOT_PSIZ && \ + VARYING_SLOT_##slot != VARYING_SLOT_CLIP_VERTEX)); \ +} + +#define TEST_DEAD_OUTPUT_KEPT_XFB(producer_stage, consumer_stage, slot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_output, \ + xfb_store_##producer_stage##_##consumer_stage##_##slot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_intrinsic_instr *intr = \ + store_output(b1, VARYING_SLOT_##slot, 0, nir_type_float##bitsize, \ + nir_imm_float(b1, 0), 0); \ + \ + struct nir_io_xfb xfb; \ + memset(&xfb, 0, sizeof(xfb)); \ + xfb.out[0].num_components = 1; \ + nir_intrinsic_set_io_xfb(intr, xfb); \ + \ + /* Compaction moves COL1 to COL0. */ \ + unsigned index = VARYING_SLOT_##slot; \ + if (index == VARYING_SLOT_COL1 || index == VARYING_SLOT_BFC1) \ + index--; \ + \ + /* Compaction moves all these to VAR0. */ \ + /* It's correct for TEX0 because it's not used by FS. */ \ + if (index == VARYING_SLOT_FOGC || index == VARYING_SLOT_PRIMITIVE_ID || \ + index == VARYING_SLOT_TEX0 || index == VARYING_SLOT_VAR0_16BIT) \ + index = VARYING_SLOT_VAR0; \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + if (index >= VARYING_SLOT_VAR0_16BIT) { \ + ASSERT_TRUE(b1->shader->info.outputs_written_16bit == \ + BITFIELD_BIT(index - VARYING_SLOT_VAR0_16BIT)); \ + } else { \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(index)); \ + } \ + ASSERT_TRUE(shader_contains_instr(b1, &intr->instr)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(intr).no_varying == \ + (VARYING_SLOT_##slot != VARYING_SLOT_POS && \ + VARYING_SLOT_##slot != VARYING_SLOT_PSIZ && \ + VARYING_SLOT_##slot != VARYING_SLOT_CLIP_VERTEX)); \ + ASSERT_TRUE(nir_intrinsic_io_xfb(intr).out[0].num_components == 1); \ +} + +#define TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(producer_stage, consumer_stage, slot, bitsize) \ +TEST_F(nir_opt_varyings_test_dead_output, \ + load_##producer_stage##_##consumer_stage##_##slot##_##bitsize) \ +{ \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_def *output = load_output(b1, VARYING_SLOT_##slot, 0, nir_type_float##bitsize, 0); \ + store_ssbo(b1, output); \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_producer); \ + ASSERT_TRUE(b1->shader->info.outputs_read == 0 && \ + b1->shader->info.patch_outputs_read == 0 && \ + b1->shader->info.outputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b1, output)); \ + ASSERT_TRUE(shader_contains_undef(b1, bitsize)); \ +} + +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, TEX0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, BFC1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, LAYER, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_CTRL, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, TEX0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, BFC1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, LAYER, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, TESS_EVAL, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, TEX0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, BFC1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, LAYER, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, VIEWPORT, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 16) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, PATCH0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_CTRL, TESS_EVAL, PATCH0, 16) + +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, POS, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, COL0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, COL1, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, FOGC, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, TEX0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, PSIZ, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, BFC0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, BFC1, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, LAYER, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, VIEWPORT, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, VAR0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, VAR0, 16) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 16) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, PATCH0, 32) +TEST_DEAD_OUTPUT_LOAD_TO_UNDEF(TESS_CTRL, TESS_EVAL, PATCH0, 16) + +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, TEX0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, BFC1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, LAYER, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, GEOMETRY, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, TEX0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, BFC1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, LAYER, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, GEOMETRY, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, TEX0, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, BFC1, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT(VERTEX, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(VERTEX, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, TEX0, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, BFC1, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT(TESS_EVAL, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(TESS_EVAL, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, COL0, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, COL1, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, FOGC, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, TEX0, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, BFC0, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, BFC1, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT(GEOMETRY, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(GEOMETRY, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, COL0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, COL1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, FOGC, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, TEX0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, BFC0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, BFC1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_KEPT_XFB(VERTEX, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, COL0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, COL1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, FOGC, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, TEX0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, BFC0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, BFC1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_KEPT_XFB(TESS_EVAL, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, COL0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, COL1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, FOGC, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, TEX0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, BFC0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, BFC1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, CLIP_VERTEX, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_KEPT_XFB(GEOMETRY, FRAGMENT, VAR0_16BIT, 16) + +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, POS, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, PSIZ, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, CLIP_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, CLIP_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, CULL_DIST0, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, CULL_DIST1, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, LAYER, 32) +TEST_DEAD_OUTPUT_KEPT(MESH, FRAGMENT, VIEWPORT, 32) +TEST_DEAD_OUTPUT_REMOVED(MESH, FRAGMENT, VAR0, 32) +TEST_DEAD_OUTPUT_REMOVED(MESH, FRAGMENT, VAR0, 16) +TEST_DEAD_OUTPUT_REMOVED(MESH, FRAGMENT, VAR0_16BIT, 16) + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_dedup.cpp b/src/compiler/nir/tests/opt_varyings_tests_dedup.cpp new file mode 100644 index 00000000000..66cb13a32f7 --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_dedup.cpp @@ -0,0 +1,152 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_dedup : public nir_opt_varyings_test +{}; + +#define TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, interp1, interp2) \ +TEST_F(nir_opt_varyings_test_dedup, \ + producer_stage##_##consumer_stage##_##slot1##_##slot2##_##bitsize##_##interp1##_##interp2) \ +{ \ + unsigned pslot[2] = {VARYING_SLOT_##slot1, VARYING_SLOT_##slot2}; \ + unsigned cslot[2] = {VARYING_SLOT_##slot1, VARYING_SLOT_##slot2}; \ + unsigned interp[2] = {INTERP_##interp1, INTERP_##interp2}; \ + \ + /* BFCn becomes COLn in FS. */ \ + for (unsigned s = 0; s < 2; s++) { \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT && \ + (pslot[s] == VARYING_SLOT_BFC0 || pslot[s] == VARYING_SLOT_BFC1)) \ + pslot[s] -= VARYING_SLOT_BFC0 - VARYING_SLOT_COL0; \ + } \ + \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + nir_intrinsic_instr *store[2][3] = {{NULL}}; \ + for (unsigned s = 0; s < 2; s++) { \ + nir_def *input = load_input(b1, (gl_varying_slot)0, 0, nir_type_float##bitsize, 0, 0); \ + for (unsigned v = 0; v < (is_per_vertex(b1, (gl_varying_slot)pslot[s], false) ? 3 : 1); v++) { \ + store[s][v] = \ + store_output(b1, (gl_varying_slot)pslot[s], s, nir_type_float##bitsize, input, v); \ + } \ + } \ + \ + nir_def *load[2][3] = {{NULL}}; \ + for (unsigned s = 0; s < 2; s++) { \ + for (unsigned v = 0; v < (is_per_vertex(b2, (gl_varying_slot)cslot[s], true) ? 3 : 1); v++) { \ + load[s][v] = load_input(b2, (gl_varying_slot)cslot[s], s, \ + nir_type_float##bitsize, v, interp[s]); \ + store_output(b2, VARYING_SLOT_VAR0, 0, nir_type_float##bitsize, load[s][v], 0); \ + } \ + } \ + \ + if (is_patch((gl_varying_slot)pslot[0]) == is_patch((gl_varying_slot)pslot[1]) && \ + is_color(b2, (gl_varying_slot)cslot[0]) == is_color(b2, (gl_varying_slot)cslot[1]) && \ + !is_texcoord(b2, (gl_varying_slot)cslot[0]) && !is_texcoord(b2, (gl_varying_slot)cslot[1]) && \ + INTERP_##interp1 == INTERP_##interp2 && !is_interp_at_offset(INTERP_##interp1)) { \ + ASSERT_EQ(opt_varyings(), (nir_progress_producer | nir_progress_consumer)); \ + for (unsigned v = 0; v < (is_per_vertex(b1, (gl_varying_slot)pslot[1], false) ? 3 : 1); v++) { \ + ASSERT_TRUE(shader_contains_instr(b1, &store[0][v]->instr)); \ + if (nir_slot_is_sysval_output((gl_varying_slot)pslot[1], MESA_SHADER_##consumer_stage)) { \ + ASSERT_TRUE(shader_contains_instr(b1, &store[1][v]->instr)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store[1][v]).no_varying); \ + } else { \ + ASSERT_TRUE(!shader_contains_instr(b1, &store[1][v]->instr)); \ + } \ + } \ + for (unsigned v = 0; v < (is_per_vertex(b2, (gl_varying_slot)cslot[1], true) ? 3 : 1); v++) { \ + ASSERT_TRUE(shader_contains_def(b2, load[0][v])); \ + ASSERT_TRUE(!shader_contains_def(b2, load[1][v])); \ + } \ + } else { \ + ASSERT_EQ(opt_varyings(), 0); \ + for (unsigned v = 0; v < (is_per_vertex(b1, (gl_varying_slot)pslot[1], false) ? 3 : 1); v++) { \ + ASSERT_TRUE(shader_contains_instr(b1, &store[0][v]->instr)); \ + ASSERT_TRUE(shader_contains_instr(b1, &store[1][v]->instr)); \ + } \ + for (unsigned v = 0; v < (is_per_vertex(b2, (gl_varying_slot)cslot[1], true) ? 3 : 1); v++) { \ + ASSERT_TRUE(shader_contains_def(b2, load[0][v])); \ + ASSERT_TRUE(shader_contains_def(b2, load[1][v])); \ + } \ + } \ +} + +#define TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, interp) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, interp, interp) + +#define TEST_DEDUP(producer_stage, consumer_stage, slot1, slot2, bitsize) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT) + +#define TEST_DEDUP_INTERP(producer_stage, consumer_stage, slot1, slot2, bitsize) \ + /* Same interpolation qualifier. */ \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_PIXEL) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_CENTROID) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_SAMPLE) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_AT_OFFSET) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_PIXEL) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_CENTROID) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_SAMPLE) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_AT_OFFSET) \ + /* Different interpolation qualifiers. */ \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT, PERSP_PIXEL) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT, PERSP_CENTROID) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT, PERSP_SAMPLE) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT, PERSP_AT_OFFSET) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT, LINEAR_PIXEL) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_PIXEL, PERSP_CENTROID) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_CENTROID, PERSP_SAMPLE) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_SAMPLE, PERSP_AT_OFFSET) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_PIXEL, LINEAR_CENTROID) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_CENTROID, LINEAR_SAMPLE) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_SAMPLE, LINEAR_AT_OFFSET) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_PIXEL, LINEAR_PIXEL) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_CENTROID, LINEAR_CENTROID) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_SAMPLE, LINEAR_SAMPLE) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, PERSP_AT_OFFSET, LINEAR_AT_OFFSET) + +#define TEST_DEDUP_INTERP_COLOR(producer_stage, consumer_stage, slot1, slot2, bitsize) \ + TEST_DEDUP_INTERP(producer_stage, consumer_stage, slot1, slot2, bitsize) \ + /* Same interpolation qualifier. */ \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_PIXEL) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_CENTROID) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_SAMPLE) \ + TEST_DEDUP_TEMPL1(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_AT_OFFSET) \ + /* Different interpolation qualifiers. */ \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, FLAT, COLOR_PIXEL) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_PIXEL, COLOR_CENTROID) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_CENTROID, COLOR_SAMPLE) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, COLOR_SAMPLE, COLOR_AT_OFFSET) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_PIXEL, COLOR_PIXEL) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_CENTROID, COLOR_CENTROID) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_SAMPLE, COLOR_SAMPLE) \ + TEST_DEDUP_TEMPL(producer_stage, consumer_stage, slot1, slot2, bitsize, LINEAR_AT_OFFSET, COLOR_AT_OFFSET) + +TEST_DEDUP(VERTEX, TESS_CTRL, VAR0, VAR1, 32) +TEST_DEDUP(VERTEX, TESS_EVAL, VAR0, VAR1, 32) +TEST_DEDUP(VERTEX, GEOMETRY, VAR0, VAR1, 32) +TEST_DEDUP(TESS_CTRL, TESS_EVAL, VAR0, VAR1, 32) +TEST_DEDUP(TESS_CTRL, TESS_EVAL, PATCH0, PATCH1, 32) +TEST_DEDUP(TESS_EVAL, GEOMETRY, VAR0, VAR1, 32) + +TEST_DEDUP_INTERP(VERTEX, FRAGMENT, VAR0, VAR1, 32) +TEST_DEDUP_INTERP(TESS_EVAL, FRAGMENT, VAR0, VAR1, 32) +TEST_DEDUP_INTERP(GEOMETRY, FRAGMENT, VAR0, VAR1, 32) + +TEST_DEDUP_INTERP_COLOR(VERTEX, FRAGMENT, COL0, COL1, 32) +TEST_DEDUP_INTERP_COLOR(TESS_EVAL, FRAGMENT, COL0, COL1, 32) +TEST_DEDUP_INTERP_COLOR(GEOMETRY, FRAGMENT, COL0, COL1, 32) + +TEST_DEDUP(TESS_CTRL, TESS_EVAL, VAR0, PATCH0, 32) +TEST_DEDUP_INTERP(VERTEX, FRAGMENT, VAR0, COL0, 32) +TEST_DEDUP_INTERP(TESS_EVAL, FRAGMENT, VAR0, COL0, 32) +TEST_DEDUP_INTERP(GEOMETRY, FRAGMENT, VAR0, COL0, 32) + +TEST_DEDUP_INTERP(VERTEX, FRAGMENT, CLIP_DIST0, VAR0, 32) +TEST_DEDUP_INTERP(VERTEX, FRAGMENT, TEX0, TEX1, 32) +TEST_DEDUP_INTERP(VERTEX, FRAGMENT, TEX0, VAR0, 32) + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_prop_const.cpp b/src/compiler/nir/tests/opt_varyings_tests_prop_const.cpp new file mode 100644 index 00000000000..1648464ba0f --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_prop_const.cpp @@ -0,0 +1,417 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_prop_const : public nir_opt_varyings_test +{}; + +#define SHADER_CONST_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, val0, val1) \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + UNUSED nir_intrinsic_instr *store, *store2 = NULL, *store3 = NULL; \ + store = \ + store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + nir_imm_##type##N_t(b1, val0, bitsize), 0); \ + if (is_per_vertex(b1, VARYING_SLOT_##slot, false) || \ + MESA_SHADER_##producer_stage == MESA_SHADER_GEOMETRY) { \ + store2 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + nir_imm_##type##N_t(b1, val1, bitsize), 1); \ + store3 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + nir_imm_##type##N_t(b1, val1, bitsize), 2); \ + } \ + \ + UNUSED unsigned pindex = VARYING_SLOT_##slot; \ + unsigned cindex = VARYING_SLOT_##slot; \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT && \ + (cindex == VARYING_SLOT_BFC0 || cindex == VARYING_SLOT_BFC1)) \ + cindex -= VARYING_SLOT_BFC0 - VARYING_SLOT_COL0; \ + \ + nir_def *input = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_VAR0, 0, nir_type_float##bitsize, input, 0); \ + nir_def *input2 = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 1, 0); \ + store_output(b2, VARYING_SLOT_VAR1, 0, nir_type_float##bitsize, input2, 0); \ + \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT) { \ + /* Compaction moves COL1 to COL0. */ \ + if (cindex == VARYING_SLOT_COL1) { \ + pindex--; \ + cindex--; \ + } \ + \ + /* Compaction moves all these to VAR0. */ \ + if (cindex == VARYING_SLOT_FOGC || cindex == VARYING_SLOT_PRIMITIVE_ID || \ + cindex == VARYING_SLOT_VAR0_16BIT) \ + pindex = cindex = VARYING_SLOT_VAR0; \ + } else { \ + /* Compaction moves everything else to POS. */ \ + if (!is_patch((gl_varying_slot)cindex)) { \ + pindex = cindex = VARYING_SLOT_POS; \ + } \ + } + +#define TEST_CONST_PROP(producer_stage, consumer_stage, slot, comp, type, bitsize, value) \ +TEST_F(nir_opt_varyings_test_prop_const, \ + prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_CONST_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, value, value) \ + \ + if (nir_slot_is_sysval_output((gl_varying_slot)pindex, MESA_SHADER_##consumer_stage)) { \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + } else { \ + ASSERT_TRUE(opt_varyings() == (nir_progress_producer | nir_progress_consumer)); \ + ASSERT_TRUE(b1->shader->info.outputs_written == 0 && \ + b1->shader->info.patch_outputs_written == 0 && \ + b1->shader->info.outputs_written_16bit == 0); \ + ASSERT_TRUE(!shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(!store2 || !shader_contains_instr(b1, &store2->instr)); \ + ASSERT_TRUE(!store3 || !shader_contains_instr(b1, &store3->instr)); \ + } \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_const_##type(b2, value, bitsize)); \ +} + +#define TEST_CONST_PROP_XFB(producer_stage, consumer_stage, slot, comp, type, bitsize, value) \ +TEST_F(nir_opt_varyings_test_prop_const, \ + xfb_prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_CONST_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, value, value) \ + \ + /* XFB-only outputs are moved to VARn. */ \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT &&\ + VARYING_SLOT_##slot == VARYING_SLOT_TEX0) \ + pindex = VARYING_SLOT_VAR0; \ + \ + nir_io_xfb xfb; \ + memset(&xfb, 0, sizeof(xfb)); \ + xfb.out[comp % 2].num_components = 1; \ + if (comp <= 1) { \ + nir_intrinsic_set_io_xfb(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb(store3, xfb); \ + } else { \ + nir_intrinsic_set_io_xfb2(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb2(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb2(store3, xfb); \ + } \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_const_##type(b2, value, bitsize)); \ +} + +#define TEST_CONST_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, val0, val1, suffix) \ +TEST_F(nir_opt_varyings_test_prop_const, \ + kept_##suffix##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_CONST_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, val0, val1) \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + if (pindex >= VARYING_SLOT_VAR0_16BIT) { \ + ASSERT_TRUE(b1->shader->info.outputs_written_16bit == \ + BITFIELD_BIT(pindex - VARYING_SLOT_VAR0_16BIT)); \ + ASSERT_TRUE(b2->shader->info.inputs_read_16bit == \ + BITFIELD_BIT(cindex - VARYING_SLOT_VAR0_16BIT)); \ + } else if (pindex >= VARYING_SLOT_PATCH0) { \ + ASSERT_TRUE(b1->shader->info.patch_outputs_written == BITFIELD_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.patch_inputs_read == BITFIELD_BIT(cindex)); \ + } else { \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.inputs_read == BITFIELD64_BIT(cindex)); \ + } \ + ASSERT_TRUE(shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(shader_contains_def(b2, input)); \ + ASSERT_TRUE(!shader_contains_const_##type(b2, val0, bitsize)); \ + ASSERT_TRUE(!shader_contains_const_##type(b2, val1, bitsize)); \ +} + +#define TEST_CONST_KEPT(producer_stage, consumer_stage, slot, comp, type, bitsize, value) \ + TEST_CONST_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, value, value, ) + +#define TEST_CONST_KEPT_DIFF(producer_stage, consumer_stage, slot, comp, type, bitsize, value) \ + TEST_CONST_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, value, 0, diff_) + +TEST_CONST_PROP(VERTEX, TESS_CTRL, POS, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, COL0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, COL1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, BFC0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, FOGC, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, TEX0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, PSIZ, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(VERTEX, TESS_CTRL, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(VERTEX, TESS_EVAL, POS, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, COL0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, COL1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, BFC0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, FOGC, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, TEX0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, PSIZ, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(VERTEX, TESS_EVAL, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, POS, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 16, 314) + +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, POS, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16, 314) +TEST_CONST_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(VERTEX, GEOMETRY, POS, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, COL0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, COL1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, BFC0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, FOGC, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, TEX0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, PSIZ, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(VERTEX, GEOMETRY, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, POS, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, COL0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, COL1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, BFC0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, FOGC, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, TEX0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, PSIZ, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(TESS_EVAL, GEOMETRY, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(VERTEX, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_PROP(VERTEX, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_PROP(VERTEX, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_PROP(VERTEX, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_PROP(VERTEX, FRAGMENT, TEX0, 2, float, 32, 0) +TEST_CONST_PROP(VERTEX, FRAGMENT, TEX0, 3, float, 32, 1) + +TEST_CONST_KEPT(VERTEX, FRAGMENT, COL0, 0, float, 32, 314) +TEST_CONST_KEPT(VERTEX, FRAGMENT, COL1, 0, float, 32, 314) +TEST_CONST_KEPT(VERTEX, FRAGMENT, BFC0, 0, float, 32, 314) +TEST_CONST_KEPT(VERTEX, FRAGMENT, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_KEPT(VERTEX, FRAGMENT, TEX0, 0, float, 32, 314) +TEST_CONST_KEPT(VERTEX, FRAGMENT, PSIZ, 0, float, 32, 314) +TEST_CONST_KEPT(VERTEX, FRAGMENT, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, TEX0, 2, float, 32, 0) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, TEX0, 3, float, 32, 1) + +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP_XFB(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, TEX0, 2, float, 32, 0) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, TEX0, 3, float, 32, 1) + +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, COL0, 0, float, 32, 314) +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, COL1, 0, float, 32, 314) +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, BFC0, 0, float, 32, 314) +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, TEX0, 0, float, 32, 314) +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, PSIZ, 0, float, 32, 314) +TEST_CONST_KEPT(TESS_EVAL, FRAGMENT, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, TEX0, 2, float, 32, 0) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, TEX0, 3, float, 32, 1) + +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP(GEOMETRY, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, TEX0, 2, float, 32, 0) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, TEX0, 3, float, 32, 1) + +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, COL0, 0, float, 32, 314) +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, COL1, 0, float, 32, 314) +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, BFC0, 0, float, 32, 314) +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, BFC1, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, TEX0, 0, float, 32, 314) +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32, 314) +TEST_CONST_KEPT(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, TEX0, 2, float, 32, 0) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, TEX0, 3, float, 32, 1) + +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP_XFB(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, COL0, 0, float, 32, 0.14) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, COL1, 0, float, 32, 0.14) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC0, 0, float, 32, 0.14) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC1, 0, float, 32, 0.14) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, FOGC, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, TEX0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +TEST_CONST_KEPT(MESH, FRAGMENT, PSIZ, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, CLIP_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, CLIP_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, CULL_DIST0, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, CULL_DIST1, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, LAYER, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, VIEWPORT, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, VAR0, 0, float, 32, 314) +TEST_CONST_PROP(MESH, FRAGMENT, VAR0, 0, float, 16, 314) +TEST_CONST_PROP(MESH, FRAGMENT, VAR0_16BIT, 0, float, 16, 314) + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_prop_ubo.cpp b/src/compiler/nir/tests/opt_varyings_tests_prop_ubo.cpp new file mode 100644 index 00000000000..2b1a0470f5c --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_prop_ubo.cpp @@ -0,0 +1,381 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_prop_ubo : public nir_opt_varyings_test +{}; + +#define SHADER_UBO_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1) \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + UNUSED nir_intrinsic_instr *store, *store2 = NULL, *store3 = NULL; \ + store = \ + store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + load_ubo(b1, bitsize, index0), 0); \ + if (is_per_vertex(b1, VARYING_SLOT_##slot, false) || \ + MESA_SHADER_##producer_stage == MESA_SHADER_GEOMETRY) { \ + store2 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + load_ubo(b1, bitsize, index1), 1); \ + store3 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + load_ubo(b1, bitsize, index1), 2); \ + } \ + \ + UNUSED unsigned pindex = VARYING_SLOT_##slot; \ + unsigned cindex = VARYING_SLOT_##slot; \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT && \ + (cindex == VARYING_SLOT_BFC0 || cindex == VARYING_SLOT_BFC1)) \ + cindex -= VARYING_SLOT_BFC0 - VARYING_SLOT_COL0; \ + \ + nir_def *input = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_VAR0, 0, nir_type_float##bitsize, input, 0); \ + nir_def *input2 = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 1, 0); \ + store_output(b2, VARYING_SLOT_VAR1, 0, nir_type_float##bitsize, input2, 0); \ + \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT) { \ + /* Compaction moves COL1 to COL0. */ \ + if (cindex == VARYING_SLOT_COL1) { \ + pindex--; \ + cindex--; \ + } \ + \ + /* Compaction moves all these to VAR0. */ \ + if (cindex == VARYING_SLOT_FOGC || cindex == VARYING_SLOT_PRIMITIVE_ID || \ + cindex == VARYING_SLOT_VAR0_16BIT) \ + pindex = cindex = VARYING_SLOT_VAR0; \ + } else { \ + /* Compaction moves everything else POS. */ \ + if (!is_patch((gl_varying_slot)cindex)) { \ + pindex = cindex = VARYING_SLOT_POS; \ + } \ + } + +#define TEST_UBO_PROP(producer_stage, consumer_stage, slot, comp, type, bitsize) \ +TEST_F(nir_opt_varyings_test_prop_ubo, \ + prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UBO_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, 1, 1) \ + \ + if (nir_slot_is_sysval_output((gl_varying_slot)pindex, MESA_SHADER_##consumer_stage)) { \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + } else { \ + ASSERT_TRUE(opt_varyings() == (nir_progress_producer | nir_progress_consumer)); \ + ASSERT_TRUE(b1->shader->info.outputs_written == 0 && \ + b1->shader->info.patch_outputs_written == 0 && \ + b1->shader->info.outputs_written_16bit == 0); \ + ASSERT_TRUE(!shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(!store2 || !shader_contains_instr(b1, &store2->instr)); \ + ASSERT_TRUE(!store3 || !shader_contains_instr(b1, &store3->instr)); \ + } \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_ubo(b2, bitsize, 1)); \ +} + +#define TEST_UBO_PROP_XFB(producer_stage, consumer_stage, slot, comp, type, bitsize) \ +TEST_F(nir_opt_varyings_test_prop_ubo, \ + xfb_prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UBO_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, 1, 1) \ + \ + /* XFB-only outputs are moved to VARn. */ \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT &&\ + VARYING_SLOT_##slot == VARYING_SLOT_TEX0) \ + pindex = VARYING_SLOT_VAR0; \ + \ + nir_io_xfb xfb; \ + memset(&xfb, 0, sizeof(xfb)); \ + xfb.out[comp % 2].num_components = 1; \ + if (comp <= 1) { \ + nir_intrinsic_set_io_xfb(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb(store3, xfb); \ + } else { \ + nir_intrinsic_set_io_xfb2(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb2(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb2(store3, xfb); \ + } \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_ubo(b2, bitsize, 1)); \ +} + +#define TEST_UBO_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1, suffix) \ +TEST_F(nir_opt_varyings_test_prop_ubo, \ + kept_##suffix##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UBO_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1) \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + if (pindex >= VARYING_SLOT_VAR0_16BIT) { \ + ASSERT_TRUE(b1->shader->info.outputs_written_16bit == \ + BITFIELD_BIT(pindex - VARYING_SLOT_VAR0_16BIT)); \ + ASSERT_TRUE(b2->shader->info.inputs_read_16bit == \ + BITFIELD_BIT(cindex - VARYING_SLOT_VAR0_16BIT)); \ + } else if (pindex >= VARYING_SLOT_PATCH0) { \ + ASSERT_TRUE(b1->shader->info.patch_outputs_written == BITFIELD_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.patch_inputs_read == BITFIELD_BIT(cindex)); \ + } else { \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.inputs_read == BITFIELD64_BIT(cindex)); \ + } \ + ASSERT_TRUE(shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(shader_contains_def(b2, input)); \ + ASSERT_TRUE(!shader_contains_ubo(b2, bitsize, 0)); \ + ASSERT_TRUE(!shader_contains_ubo(b2, bitsize, 1)); \ +} + +#define TEST_UBO_KEPT(producer_stage, consumer_stage, slot, comp, type, bitsize) \ + TEST_UBO_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, 0, 0, ) + +#define TEST_UBO_KEPT_DIFF(producer_stage, consumer_stage, slot, comp, type, bitsize) \ + TEST_UBO_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, 0, 1, diff_) + +TEST_UBO_PROP(VERTEX, TESS_CTRL, POS, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, COL0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, COL1, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, BFC0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, BFC1, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, FOGC, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, TEX0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, PSIZ, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, LAYER, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 16) +TEST_UBO_PROP(VERTEX, TESS_CTRL, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP(VERTEX, TESS_EVAL, POS, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, COL0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, COL1, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, BFC0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, BFC1, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, FOGC, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, TEX0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, LAYER, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 32) +TEST_UBO_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 16) +TEST_UBO_PROP(VERTEX, TESS_EVAL, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, POS, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 32) +TEST_UBO_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 16) + +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, POS, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16) +TEST_UBO_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP(VERTEX, GEOMETRY, POS, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, COL0, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, COL1, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, BFC0, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, BFC1, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, FOGC, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, TEX0, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, PSIZ, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, LAYER, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 32) +TEST_UBO_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 16) +TEST_UBO_PROP(VERTEX, GEOMETRY, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, POS, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, COL0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, COL1, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, BFC0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, BFC1, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, FOGC, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, TEX0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, PSIZ, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, LAYER, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 16) +TEST_UBO_PROP(TESS_EVAL, GEOMETRY, VAR0_16BIT, 0, float, 16) + +TEST_UBO_KEPT(VERTEX, FRAGMENT, COL0, 0, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, COL1, 0, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, BFC0, 0, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, BFC1, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, TEX0, 0, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, TEX0, 2, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, TEX0, 3, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, PSIZ, 0, float, 32) +TEST_UBO_KEPT(VERTEX, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP_XFB(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, COL0, 0, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, COL1, 0, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, BFC0, 0, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, BFC1, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, TEX0, 0, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, TEX0, 2, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, TEX0, 3, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, PSIZ, 0, float, 32) +TEST_UBO_KEPT(TESS_EVAL, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, COL0, 0, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, COL1, 0, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, BFC0, 0, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, BFC1, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, TEX0, 0, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, TEX0, 2, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, TEX0, 3, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32) +TEST_UBO_KEPT(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP_XFB(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, COL0, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, COL1, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC0, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC1, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, TEX0, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UBO_KEPT(MESH, FRAGMENT, PSIZ, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, LAYER, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, VAR0, 0, float, 32) +TEST_UBO_PROP(MESH, FRAGMENT, VAR0, 0, float, 16) +TEST_UBO_PROP(MESH, FRAGMENT, VAR0_16BIT, 0, float, 16) + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_prop_uniform.cpp b/src/compiler/nir/tests/opt_varyings_tests_prop_uniform.cpp new file mode 100644 index 00000000000..987b9d6d384 --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_prop_uniform.cpp @@ -0,0 +1,381 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_prop_uniform : public nir_opt_varyings_test +{}; + +#define SHADER_UNIFORM_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1) \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + UNUSED nir_intrinsic_instr *store, *store2 = NULL, *store3 = NULL; \ + store = \ + store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + load_uniform(b1, bitsize, index0), 0); \ + if (is_per_vertex(b1, VARYING_SLOT_##slot, false) || \ + MESA_SHADER_##producer_stage == MESA_SHADER_GEOMETRY) { \ + store2 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + load_uniform(b1, bitsize, index1), 1); \ + store3 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + load_uniform(b1, bitsize, index1), 2); \ + } \ + \ + UNUSED unsigned pindex = VARYING_SLOT_##slot; \ + unsigned cindex = VARYING_SLOT_##slot; \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT && \ + (cindex == VARYING_SLOT_BFC0 || cindex == VARYING_SLOT_BFC1)) \ + cindex -= VARYING_SLOT_BFC0 - VARYING_SLOT_COL0; \ + \ + nir_def *input = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_VAR0, 0, nir_type_float##bitsize, input, 0); \ + nir_def *input2 = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 1, 0); \ + store_output(b2, VARYING_SLOT_VAR1, 0, nir_type_float##bitsize, input2, 0); \ + \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT) { \ + /* Compaction moves COL1 to COL0. */ \ + if (cindex == VARYING_SLOT_COL1) { \ + pindex--; \ + cindex--; \ + } \ + \ + /* Compaction moves all these to VAR0. */ \ + if (cindex == VARYING_SLOT_FOGC || cindex == VARYING_SLOT_PRIMITIVE_ID || \ + cindex == VARYING_SLOT_VAR0_16BIT) \ + pindex = cindex = VARYING_SLOT_VAR0; \ + } else { \ + /* Compaction moves everything else to POS. */ \ + if (!is_patch((gl_varying_slot)cindex)) { \ + pindex = cindex = VARYING_SLOT_POS; \ + } \ + } + +#define TEST_UNIFORM_PROP(producer_stage, consumer_stage, slot, comp, type, bitsize) \ +TEST_F(nir_opt_varyings_test_prop_uniform, \ + prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UNIFORM_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, 1, 1) \ + \ + if (nir_slot_is_sysval_output((gl_varying_slot)pindex, MESA_SHADER_##consumer_stage)) { \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + } else { \ + ASSERT_TRUE(opt_varyings() == (nir_progress_producer | nir_progress_consumer)); \ + ASSERT_TRUE(b1->shader->info.outputs_written == 0 && \ + b1->shader->info.patch_outputs_written == 0 && \ + b1->shader->info.outputs_written_16bit == 0); \ + ASSERT_TRUE(!shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(!store2 || !shader_contains_instr(b1, &store2->instr)); \ + ASSERT_TRUE(!store3 || !shader_contains_instr(b1, &store3->instr)); \ + } \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_uniform(b2, bitsize, 1)); \ +} + +#define TEST_UNIFORM_PROP_XFB(producer_stage, consumer_stage, slot, comp, type, bitsize) \ +TEST_F(nir_opt_varyings_test_prop_uniform, \ + xfb_prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UNIFORM_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, 1, 1) \ + \ + /* XFB-only outputs are moved to VARn. */ \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT &&\ + VARYING_SLOT_##slot == VARYING_SLOT_TEX0) \ + pindex = VARYING_SLOT_VAR0; \ + \ + nir_io_xfb xfb; \ + memset(&xfb, 0, sizeof(xfb)); \ + xfb.out[comp % 2].num_components = 1; \ + if (comp <= 1) { \ + nir_intrinsic_set_io_xfb(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb(store3, xfb); \ + } else { \ + nir_intrinsic_set_io_xfb2(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb2(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb2(store3, xfb); \ + } \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_uniform(b2, bitsize, 1)); \ +} + +#define TEST_UNIFORM_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1, suffix) \ +TEST_F(nir_opt_varyings_test_prop_uniform, \ + kept_##suffix##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UNIFORM_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1) \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + if (pindex >= VARYING_SLOT_VAR0_16BIT) { \ + ASSERT_TRUE(b1->shader->info.outputs_written_16bit == \ + BITFIELD_BIT(pindex - VARYING_SLOT_VAR0_16BIT)); \ + ASSERT_TRUE(b2->shader->info.inputs_read_16bit == \ + BITFIELD_BIT(cindex - VARYING_SLOT_VAR0_16BIT)); \ + } else if (pindex >= VARYING_SLOT_PATCH0) { \ + ASSERT_TRUE(b1->shader->info.patch_outputs_written == BITFIELD_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.patch_inputs_read == BITFIELD_BIT(cindex)); \ + } else { \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.inputs_read == BITFIELD64_BIT(cindex)); \ + } \ + ASSERT_TRUE(shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(shader_contains_def(b2, input)); \ + ASSERT_TRUE(!shader_contains_uniform(b2, bitsize, 0)); \ + ASSERT_TRUE(!shader_contains_uniform(b2, bitsize, 1)); \ +} + +#define TEST_UNIFORM_KEPT(producer_stage, consumer_stage, slot, comp, type, bitsize) \ + TEST_UNIFORM_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, 0, 0, ) + +#define TEST_UNIFORM_KEPT_DIFF(producer_stage, consumer_stage, slot, comp, type, bitsize) \ + TEST_UNIFORM_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, 0, 1, diff_) + +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, POS, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, COL0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, COL1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, BFC0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, FOGC, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, TEX0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, PSIZ, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(VERTEX, TESS_CTRL, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, POS, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, COL0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, COL1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, BFC0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, FOGC, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, TEX0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(VERTEX, TESS_EVAL, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, POS, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 16) + +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, POS, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16) +TEST_UNIFORM_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, POS, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, COL0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, COL1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, BFC0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, FOGC, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, TEX0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, PSIZ, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(VERTEX, GEOMETRY, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, POS, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, COL0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, COL1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, BFC0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, FOGC, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, TEX0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, PSIZ, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(TESS_EVAL, GEOMETRY, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, COL0, 0, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, COL1, 0, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, BFC0, 0, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, TEX0, 0, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, TEX0, 2, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, TEX0, 3, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNIFORM_KEPT(VERTEX, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP_XFB(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, COL0, 0, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, COL1, 0, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, BFC0, 0, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, TEX0, 0, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, TEX0, 2, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, TEX0, 3, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNIFORM_KEPT(TESS_EVAL, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, COL0, 0, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, COL1, 0, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, BFC0, 0, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, BFC1, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, TEX0, 0, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, TEX0, 2, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, TEX0, 3, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNIFORM_KEPT(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP_XFB(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, COL0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, COL1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, TEX0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNIFORM_KEPT(MESH, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, LAYER, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, VAR0, 0, float, 32) +TEST_UNIFORM_PROP(MESH, FRAGMENT, VAR0, 0, float, 16) +TEST_UNIFORM_PROP(MESH, FRAGMENT, VAR0_16BIT, 0, float, 16) + +} diff --git a/src/compiler/nir/tests/opt_varyings_tests_prop_uniform_expr.cpp b/src/compiler/nir/tests/opt_varyings_tests_prop_uniform_expr.cpp new file mode 100644 index 00000000000..75579f34fd0 --- /dev/null +++ b/src/compiler/nir/tests/opt_varyings_tests_prop_uniform_expr.cpp @@ -0,0 +1,380 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * SPDX-License-Identifier: MIT + */ + +#include "nir_opt_varyings_test.h" + +class nir_opt_varyings_test_prop_uniform_expr : public nir_opt_varyings_test +{}; + +#define SHADER_UNI_EXPR_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1) \ + create_shaders(MESA_SHADER_##producer_stage, MESA_SHADER_##consumer_stage); \ + UNUSED nir_intrinsic_instr *store, *store2 = NULL, *store3 = NULL; \ + store = \ + store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + build_uniform_expr(b1, bitsize, index0), 0); \ + if (is_per_vertex(b1, VARYING_SLOT_##slot, false) || \ + MESA_SHADER_##producer_stage == MESA_SHADER_GEOMETRY) { \ + store2 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + build_uniform_expr(b1, bitsize, index1), 1); \ + store3 = store_output(b1, VARYING_SLOT_##slot, comp, nir_type_float##bitsize, \ + build_uniform_expr(b1, bitsize, index1), 2); \ + } \ + \ + UNUSED unsigned pindex = VARYING_SLOT_##slot; \ + unsigned cindex = VARYING_SLOT_##slot; \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT && \ + (cindex == VARYING_SLOT_BFC0 || cindex == VARYING_SLOT_BFC1)) \ + cindex -= VARYING_SLOT_BFC0 - VARYING_SLOT_COL0; \ + \ + nir_def *input = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 0, 0); \ + store_output(b2, VARYING_SLOT_VAR0, 0, nir_type_float##bitsize, input, 0); \ + nir_def *input2 = load_input(b2, (gl_varying_slot)cindex, comp, nir_type_##type##bitsize, 1, 0); \ + store_output(b2, VARYING_SLOT_VAR1, 0, nir_type_float##bitsize, input2, 0); \ + \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT) { \ + /* Compaction moves COL1 to COL0. */ \ + if (cindex == VARYING_SLOT_COL1) { \ + pindex--; \ + cindex--; \ + } \ + \ + /* Compaction moves all these to VAR0. */ \ + if (cindex == VARYING_SLOT_FOGC || cindex == VARYING_SLOT_PRIMITIVE_ID || \ + cindex == VARYING_SLOT_VAR0_16BIT) \ + pindex = cindex = VARYING_SLOT_VAR0; \ + } else { \ + /* Compaction moves everything else to POS. */ \ + if (!is_patch((gl_varying_slot)cindex)) { \ + pindex = cindex = VARYING_SLOT_POS; \ + } \ + } + +#define TEST_UNI_EXPR_PROP(producer_stage, consumer_stage, slot, comp, type, bitsize) \ +TEST_F(nir_opt_varyings_test_prop_uniform_expr, \ + prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UNI_EXPR_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, 1, 1) \ + \ + if (nir_slot_is_sysval_output((gl_varying_slot)pindex, MESA_SHADER_##consumer_stage)) { \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + } else { \ + ASSERT_TRUE(opt_varyings() == (nir_progress_producer | nir_progress_consumer)); \ + ASSERT_TRUE(b1->shader->info.outputs_written == 0 && \ + b1->shader->info.patch_outputs_written == 0 && \ + b1->shader->info.outputs_written_16bit == 0); \ + ASSERT_TRUE(!shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(!store2 || !shader_contains_instr(b1, &store2->instr)); \ + ASSERT_TRUE(!store3 || !shader_contains_instr(b1, &store3->instr)); \ + } \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_uniform_expr(b2, bitsize, 1, true)); \ +} + +#define TEST_UNI_EXPR_PROP_XFB(producer_stage, consumer_stage, slot, comp, type, bitsize) \ +TEST_F(nir_opt_varyings_test_prop_uniform_expr, \ + xfb_prop_##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UNI_EXPR_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, 1, 1) \ + \ + /* XFB-only outputs are moved to VARn. */ \ + if (MESA_SHADER_##consumer_stage == MESA_SHADER_FRAGMENT &&\ + VARYING_SLOT_##slot == VARYING_SLOT_TEX0) \ + pindex = VARYING_SLOT_VAR0; \ + \ + nir_io_xfb xfb; \ + memset(&xfb, 0, sizeof(xfb)); \ + xfb.out[comp % 2].num_components = 1; \ + if (comp <= 1) { \ + nir_intrinsic_set_io_xfb(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb(store3, xfb); \ + } else { \ + nir_intrinsic_set_io_xfb2(store, xfb); \ + if (store2) \ + nir_intrinsic_set_io_xfb2(store2, xfb); \ + if (store3) \ + nir_intrinsic_set_io_xfb2(store3, xfb); \ + } \ + \ + ASSERT_TRUE(opt_varyings() == nir_progress_consumer); \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(nir_intrinsic_io_semantics(store).no_varying); \ + ASSERT_TRUE(b2->shader->info.inputs_read == 0 && \ + b2->shader->info.patch_inputs_read == 0 && \ + b2->shader->info.inputs_read_16bit == 0); \ + ASSERT_TRUE(!shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_uniform_expr(b2, bitsize, 1, true)); \ +} + +#define TEST_UNI_EXPR_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1, suffix) \ +TEST_F(nir_opt_varyings_test_prop_uniform_expr, \ + kept_##suffix##producer_stage##_##consumer_stage##_##slot##_##comp##_##type##bitsize) \ +{ \ + SHADER_UNI_EXPR_OUTPUT(producer_stage, consumer_stage, slot, comp, type, bitsize, index0, index1) \ + \ + ASSERT_TRUE(opt_varyings() == 0); \ + if (pindex >= VARYING_SLOT_VAR0_16BIT) { \ + ASSERT_TRUE(b1->shader->info.outputs_written_16bit == \ + BITFIELD_BIT(pindex - VARYING_SLOT_VAR0_16BIT)); \ + ASSERT_TRUE(b2->shader->info.inputs_read_16bit == \ + BITFIELD_BIT(cindex - VARYING_SLOT_VAR0_16BIT)); \ + } else if (pindex >= VARYING_SLOT_PATCH0) { \ + ASSERT_TRUE(b1->shader->info.patch_outputs_written == BITFIELD_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.patch_inputs_read == BITFIELD_BIT(cindex)); \ + } else { \ + ASSERT_TRUE(b1->shader->info.outputs_written == BITFIELD64_BIT(pindex)); \ + ASSERT_TRUE(b2->shader->info.inputs_read == BITFIELD64_BIT(cindex)); \ + } \ + ASSERT_TRUE(shader_contains_instr(b1, &store->instr)); \ + ASSERT_TRUE(shader_contains_def(b2, input)); \ + ASSERT_TRUE(shader_contains_uniform_expr(b2, bitsize, 0, false)); \ +} + +#define TEST_UNI_EXPR_KEPT(producer_stage, consumer_stage, slot, comp, type, bitsize) \ + TEST_UNI_EXPR_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, 0, 0, ) + +#define TEST_UNI_EXPR_KEPT_DIFF(producer_stage, consumer_stage, slot, comp, type, bitsize) \ + TEST_UNI_EXPR_KEPT_2VAL(producer_stage, consumer_stage, slot, comp, type, bitsize, 0, 1, diff_) + +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, POS, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, COL0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, COL1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, BFC0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, TEX0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, PSIZ, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(VERTEX, TESS_CTRL, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, POS, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, COL0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, COL1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, BFC0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, TEX0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(VERTEX, TESS_EVAL, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, POS, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_INNER, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, TESS_LEVEL_OUTER, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_CTRL, TESS_EVAL, PATCH0, 0, float, 16) + +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, POS, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, COL1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, BFC1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, FOGC, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, TEX0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, PSIZ, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, LAYER, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0, 0, float, 16) +TEST_UNI_EXPR_KEPT_DIFF(TESS_CTRL, TESS_EVAL, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, POS, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, COL0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, COL1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, BFC0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, TEX0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, PSIZ, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(VERTEX, GEOMETRY, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, POS, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, COL0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, COL1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, BFC0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, TEX0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, PSIZ, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(TESS_EVAL, GEOMETRY, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, COL0, 0, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, COL1, 0, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, BFC0, 0, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, TEX0, 0, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, TEX0, 2, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, TEX0, 3, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNI_EXPR_KEPT(VERTEX, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP_XFB(VERTEX, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, COL0, 0, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, COL1, 0, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, BFC0, 0, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, TEX0, 0, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, TEX0, 2, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, TEX0, 3, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNI_EXPR_KEPT(TESS_EVAL, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP_XFB(TESS_EVAL, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, COL0, 0, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, COL1, 0, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, BFC0, 0, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, BFC1, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, TEX0, 0, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, TEX0, 2, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, TEX0, 3, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNI_EXPR_KEPT(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP_XFB(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, COL0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, COL1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, BFC1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, FOGC, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, TEX0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_VERTEX, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, PRIMITIVE_ID, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_KEPT_DIFF(GEOMETRY, FRAGMENT, VAR0_16BIT, 0, float, 16) + +TEST_UNI_EXPR_KEPT(MESH, FRAGMENT, PSIZ, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, CLIP_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, CLIP_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, CULL_DIST0, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, CULL_DIST1, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, LAYER, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, VIEWPORT, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, VAR0, 0, float, 32) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, VAR0, 0, float, 16) +TEST_UNI_EXPR_PROP(MESH, FRAGMENT, VAR0_16BIT, 0, float, 16) + +}