nir/dead_write_vars: Handle volatile

We can't remove volatile writes and we can't combine them with other
volatile writes so all we can do is clear the unused bits.

Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4767>
This commit is contained in:
Jason Ekstrand 2020-04-27 01:54:40 -05:00 committed by Marge Bot
parent ed67717167
commit cb9292091b
2 changed files with 61 additions and 0 deletions

View file

@ -176,6 +176,19 @@ remove_dead_write_vars_local(void *mem_ctx, nir_block *block)
case nir_intrinsic_store_deref: {
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
/* Consider a volatile write to also be a sort of read. This
* prevents us from deleting a non-volatile write just before a
* volatile write thanks to a non-volatile write afterwards. It's
* quite the corner case, but this should be safer and more
* predictable for the programmer than allowing two non-volatile
* writes to be combined with a volatile write between them.
*/
clear_unused_for_read(&unused_writes, dst);
break;
}
nir_component_mask_t mask = nir_intrinsic_write_mask(intrin);
progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
break;
@ -185,6 +198,12 @@ remove_dead_write_vars_local(void *mem_ctx, nir_block *block)
nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
if (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE) {
clear_unused_for_read(&unused_writes, src);
clear_unused_for_read(&unused_writes, dst);
break;
}
/* Self-copy is removed. */
if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
nir_instr_remove(instr);

View file

@ -1345,6 +1345,48 @@ TEST_F(nir_dead_write_vars_test, no_dead_writes_different_components_in_block)
ASSERT_FALSE(progress);
}
TEST_F(nir_dead_write_vars_test, volatile_write)
{
nir_variable *v = create_int(nir_var_mem_ssbo, "v");
nir_store_var(b, v, nir_imm_int(b, 0), 0x1);
nir_store_var_volatile(b, v, nir_imm_int(b, 1), 0x1);
nir_store_var(b, v, nir_imm_int(b, 2), 0x1);
/* Our approach here is a bit scorched-earth. We expect the volatile store
* in the middle to cause both that store and the one before it to be kept.
* Technically, volatile only prevents combining the volatile store with
* another store and one could argue that the store before the volatile and
* the one after it could be combined. However, it seems safer to just
* treat a volatile store like an atomic and prevent any combining across
* it.
*/
bool progress = nir_opt_dead_write_vars(b->shader);
ASSERT_FALSE(progress);
}
TEST_F(nir_dead_write_vars_test, volatile_copies)
{
nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
nir_copy_var(b, v[0], v[1]);
nir_copy_deref_with_access(b, nir_build_deref_var(b, v[0]),
nir_build_deref_var(b, v[1]),
ACCESS_VOLATILE, (gl_access_qualifier)0);
nir_copy_var(b, v[0], v[1]);
/* Our approach here is a bit scorched-earth. We expect the volatile store
* in the middle to cause both that store and the one before it to be kept.
* Technically, volatile only prevents combining the volatile store with
* another store and one could argue that the store before the volatile and
* the one after it could be combined. However, it seems safer to just
* treat a volatile store like an atomic and prevent any combining across
* it.
*/
bool progress = nir_opt_dead_write_vars(b->shader);
ASSERT_FALSE(progress);
}
TEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement)
{
nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 6);