diff --git a/src/broadcom/compiler/vir_register_allocate.c b/src/broadcom/compiler/vir_register_allocate.c index 869df9efaba..efa86444e76 100644 --- a/src/broadcom/compiler/vir_register_allocate.c +++ b/src/broadcom/compiler/vir_register_allocate.c @@ -502,6 +502,17 @@ get_spill_batch_size(struct v3d_compile *c) return 20; } +/* Don't emit spills using the TMU until we've dropped thread count first. Also, + * don't spill if we have enabled any other optimization that can lead to + * higher register pressure, such as TMU pipelining, we rather recompile without + * the optimization in that case. + */ +static inline bool +tmu_spilling_allowed(struct v3d_compile *c, int thread_index) +{ + return thread_index == 0 && c->disable_tmu_pipelining; +} + #define CLASS_BIT_PHYS (1 << 0) #define CLASS_BIT_ACC (1 << 1) #define CLASS_BIT_R5 (1 << 4) @@ -746,13 +757,7 @@ v3d_register_allocate(struct v3d_compile *c, bool *spilled) if (i > 0 && !is_uniform) break; - /* Don't emit spills using the TMU until we've dropped - * thread count first. Also, don't spill if we have - * enabled TMU pipelining, as that can make TMU spilling - * pretty terrible. - */ - if (is_uniform || - (thread_index == 0 && c->disable_tmu_pipelining)) { + if (is_uniform || tmu_spilling_allowed(c, thread_index)) { v3d_spill_reg(c, map[node].temp); /* Ask the outer loop to call back in. */