diff --git a/src/broadcom/compiler/nir_to_vir.c b/src/broadcom/compiler/nir_to_vir.c index bc225e6dba4..548acba19fa 100644 --- a/src/broadcom/compiler/nir_to_vir.c +++ b/src/broadcom/compiler/nir_to_vir.c @@ -2089,7 +2089,7 @@ mem_vectorize_callback(unsigned align_mul, unsigned align_offset, nir_intrinsic_instr *high, void *data) { - if (hole_size) + if (hole_size || !nir_num_components_valid(num_components)) return false; /* TMU general access only supports 32-bit vectors */ diff --git a/src/compiler/nir/tests/load_store_vectorizer_tests.cpp b/src/compiler/nir/tests/load_store_vectorizer_tests.cpp index 72db7ad0142..1ee699b96a4 100644 --- a/src/compiler/nir/tests/load_store_vectorizer_tests.cpp +++ b/src/compiler/nir/tests/load_store_vectorizer_tests.cpp @@ -340,7 +340,7 @@ bool nir_load_store_vectorize_test::mem_vectorize_callback( nir_intrinsic_instr *low, nir_intrinsic_instr *high, void *data) { - if (hole_size) + if (hole_size || !nir_num_components_valid(num_components)) return false; /* Calculate a simple alignment, like how nir_intrinsic_align() does. */ diff --git a/src/freedreno/ir3/ir3_nir.c b/src/freedreno/ir3/ir3_nir.c index cb5e9fdab79..ff34b3aa8df 100644 --- a/src/freedreno/ir3/ir3_nir.c +++ b/src/freedreno/ir3/ir3_nir.c @@ -113,7 +113,7 @@ ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset, unsigned hole_size, nir_intrinsic_instr *low, nir_intrinsic_instr *high, void *data) { - if (hole_size) + if (hole_size || !nir_num_components_valid(num_components)) return false; struct ir3_compiler *compiler = data; diff --git a/src/gallium/auxiliary/nir/nir_to_tgsi.c b/src/gallium/auxiliary/nir/nir_to_tgsi.c index 0701c0d44c5..1a76aa3e7fd 100644 --- a/src/gallium/auxiliary/nir/nir_to_tgsi.c +++ b/src/gallium/auxiliary/nir/nir_to_tgsi.c @@ -3273,7 +3273,7 @@ ntt_should_vectorize_io(unsigned align, unsigned bit_size, nir_intrinsic_instr *low, nir_intrinsic_instr *high, void *data) { - if (bit_size != 32 || hole_size) + if (bit_size != 32 || hole_size || !nir_num_components_valid(num_components)) return false; /* Our offset alignment should aways be at least 4 bytes */ diff --git a/src/intel/compiler/brw_nir.c b/src/intel/compiler/brw_nir.c index 5f11f53127a..eec86ca0424 100644 --- a/src/intel/compiler/brw_nir.c +++ b/src/intel/compiler/brw_nir.c @@ -1420,7 +1420,7 @@ brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset, * those back into 32-bit ones anyway and UBO loads aren't split in NIR so * we don't want to make a mess for the back-end. */ - if (bit_size > 32 || hole_size) + if (bit_size > 32 || hole_size || !nir_num_components_valid(num_components)) return false; if (low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel || diff --git a/src/intel/compiler/elk/elk_nir.c b/src/intel/compiler/elk/elk_nir.c index 3be383a85c0..c3fec24ece9 100644 --- a/src/intel/compiler/elk/elk_nir.c +++ b/src/intel/compiler/elk/elk_nir.c @@ -1137,7 +1137,7 @@ elk_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset, * those back into 32-bit ones anyway and UBO loads aren't split in NIR so * we don't want to make a mess for the back-end. */ - if (bit_size > 32 || hole_size) + if (bit_size > 32 || hole_size || !nir_num_components_valid(num_components)) return false; if (low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||