From fd2d7fe28e7a95debf6a49738c9a6f2e9ef02839 Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Mon, 8 Dec 2025 13:08:13 -0500 Subject: [PATCH 1/8] nir: Add nir_deref_instr_is_arr() helper --- src/compiler/nir/nir.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index b12223909e6..96ee2c767fa 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1789,6 +1789,14 @@ nir_deref_mode_is_in_set(const nir_deref_instr *deref, nir_variable_mode modes) static inline nir_deref_instr *nir_src_as_deref(nir_src src); +/** Returns true if deref->arr is valid */ +static inline bool +nir_deref_instr_is_arr(const nir_deref_instr *deref) +{ + return deref->deref_type == nir_deref_type_array || + deref->deref_type == nir_deref_type_ptr_as_array; +} + static inline nir_deref_instr * nir_deref_instr_parent(const nir_deref_instr *instr) { From 246287698594c84b08a59dbe9187f831eae8ae35 Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Mon, 8 Dec 2025 13:50:41 -0500 Subject: [PATCH 2/8] treewide: Use nir_deref_instr_is_arr() Via coccinelle and some manual fixups. @@ expression e1; @@ - e1->deref_type == nir_deref_type_array || e1->deref_type == nir_deref_type_ptr_as_array + nir_deref_instr_is_arr(e1) --- src/compiler/nir/nir_deref.c | 3 +-- src/compiler/nir/nir_opt_gcm.c | 3 +-- src/compiler/nir/nir_opt_loop_unroll.c | 3 +-- src/compiler/nir/nir_print.c | 3 +-- src/compiler/nir/nir_serialize.c | 3 +-- src/intel/compiler/brw/brw_nir_rt.c | 3 +-- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index d65f7803f81..397a9a98b34 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -129,8 +129,7 @@ nir_deref_instr_has_indirect(nir_deref_instr *instr) if (instr->deref_type == nir_deref_type_cast) return true; - if ((instr->deref_type == nir_deref_type_array || - instr->deref_type == nir_deref_type_ptr_as_array) && + if (nir_deref_instr_is_arr(instr) && !nir_src_is_const(instr->arr.index)) return true; diff --git a/src/compiler/nir/nir_opt_gcm.c b/src/compiler/nir/nir_opt_gcm.c index bc9c39e47e1..0a2e1f53414 100644 --- a/src/compiler/nir/nir_opt_gcm.c +++ b/src/compiler/nir/nir_opt_gcm.c @@ -281,8 +281,7 @@ pin_intrinsic(nir_intrinsic_instr *intrin) nir_var_mem_push_const)) { nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]); while (deref->deref_type != nir_deref_type_var) { - if ((deref->deref_type == nir_deref_type_array || - deref->deref_type == nir_deref_type_ptr_as_array) && + if (nir_deref_instr_is_arr(deref) && !nir_src_is_always_uniform(deref->arr.index)) { instr->pass_flags = GCM_INSTR_PINNED; return; diff --git a/src/compiler/nir/nir_opt_loop_unroll.c b/src/compiler/nir/nir_opt_loop_unroll.c index 20c19da8480..8c193721b1c 100644 --- a/src/compiler/nir/nir_opt_loop_unroll.c +++ b/src/compiler/nir/nir_opt_loop_unroll.c @@ -838,8 +838,7 @@ is_indirect_load(nir_instr *instr) if (!nir_deref_mode_may_be(deref, mem_modes)) return false; while (deref) { - if ((deref->deref_type == nir_deref_type_array || - deref->deref_type == nir_deref_type_ptr_as_array) && + if (nir_deref_instr_is_arr(deref) && !nir_src_is_const(deref->arr.index)) { return true; } diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c index b6cbd506131..92b51bda834 100644 --- a/src/compiler/nir/nir_print.c +++ b/src/compiler/nir/nir_print.c @@ -1144,8 +1144,7 @@ print_deref_instr(nir_deref_instr *instr, print_state *state) instr->cast.align_mul, instr->cast.align_offset); } - if (instr->deref_type == nir_deref_type_array || - instr->deref_type == nir_deref_type_ptr_as_array) { + if (nir_deref_instr_is_arr(instr)) { if (instr->arr.in_bounds) fprintf(fp, " (in bounds)"); } diff --git a/src/compiler/nir/nir_serialize.c b/src/compiler/nir/nir_serialize.c index 74ab945eb2e..44c57b921b2 100644 --- a/src/compiler/nir/nir_serialize.c +++ b/src/compiler/nir/nir_serialize.c @@ -915,8 +915,7 @@ write_deref(write_ctx *ctx, const nir_deref_instr *deref) header.deref_var.object_idx = var_idx; } - if (deref->deref_type == nir_deref_type_array || - deref->deref_type == nir_deref_type_ptr_as_array) { + if (nir_deref_instr_is_arr(deref)) { header.deref.packed_src_ssa_16bit = are_object_ids_16bit(ctx); header.deref.in_bounds = deref->arr.in_bounds; diff --git a/src/intel/compiler/brw/brw_nir_rt.c b/src/intel/compiler/brw/brw_nir_rt.c index 76a7f1e5a1f..f9abb0a22a0 100644 --- a/src/intel/compiler/brw/brw_nir_rt.c +++ b/src/intel/compiler/brw/brw_nir_rt.c @@ -36,8 +36,7 @@ resize_deref(nir_builder *b, nir_deref_instr *deref, /* NIR requires array indices have to match the deref bit size */ if (deref->def.bit_size != bit_size && - (deref->deref_type == nir_deref_type_array || - deref->deref_type == nir_deref_type_ptr_as_array)) { + nir_deref_instr_is_arr(deref)) { b->cursor = nir_before_instr(&deref->instr); nir_def *idx; if (nir_src_is_const(deref->arr.index)) { From af9d816f83e477a14d4efb23e9bbbd610e2da347 Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Mon, 8 Dec 2025 18:23:34 -0500 Subject: [PATCH 3/8] nir: Use instr_clone in rematerialize_deref_in_block The previous implementation seems to predate nir_instr_clone() and duplicates a lot of the deref cloning code. This also makes the pass preserve deref->arr.in_bounds correctly. --- src/compiler/nir/nir_deref.c | 42 ++++-------------------------------- 1 file changed, 4 insertions(+), 38 deletions(-) diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index 397a9a98b34..feb463fcc4a 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -791,52 +791,18 @@ rematerialize_deref_in_block(nir_deref_instr *deref, return deref; nir_builder *b = &state->builder; - nir_deref_instr *new_deref = - nir_deref_instr_create(b->shader, deref->deref_type); - new_deref->modes = deref->modes; - new_deref->type = deref->type; + nir_instr *new_instr = nir_instr_clone(b->shader, &deref->instr); + nir_deref_instr *new_deref = nir_instr_as_deref(new_instr); - if (deref->deref_type == nir_deref_type_var) { - new_deref->var = deref->var; - } else { + if (deref->deref_type != nir_deref_type_var) { nir_deref_instr *parent = nir_src_as_deref(deref->parent); if (parent) { parent = rematerialize_deref_in_block(parent, state); new_deref->parent = nir_src_for_ssa(&parent->def); - } else { - new_deref->parent = nir_src_for_ssa(deref->parent.ssa); } } - switch (deref->deref_type) { - case nir_deref_type_var: - case nir_deref_type_array_wildcard: - /* Nothing more to do */ - break; - - case nir_deref_type_cast: - new_deref->cast.ptr_stride = deref->cast.ptr_stride; - new_deref->cast.align_mul = deref->cast.align_mul; - new_deref->cast.align_offset = deref->cast.align_offset; - break; - - case nir_deref_type_array: - case nir_deref_type_ptr_as_array: - assert(!nir_src_as_deref(deref->arr.index)); - new_deref->arr.index = nir_src_for_ssa(deref->arr.index.ssa); - break; - - case nir_deref_type_struct: - new_deref->strct.index = deref->strct.index; - break; - - default: - UNREACHABLE("Invalid deref instruction type"); - } - - nir_def_init(&new_deref->instr, &new_deref->def, - deref->def.num_components, deref->def.bit_size); - nir_builder_instr_insert(b, &new_deref->instr); + nir_builder_instr_insert(b, new_instr); return new_deref; } From d44028afa93254ae5f05148a53ba682f95cfc84d Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Mon, 8 Dec 2025 16:31:54 -0500 Subject: [PATCH 4/8] nir/lower_explicit_io: Allow vecs in bounds checks --- src/compiler/nir/nir_lower_explicit_io.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/compiler/nir/nir_lower_explicit_io.c b/src/compiler/nir/nir_lower_explicit_io.c index 61d1c471d54..bd36f7753f3 100644 --- a/src/compiler/nir/nir_lower_explicit_io.c +++ b/src/compiler/nir/nir_lower_explicit_io.c @@ -810,8 +810,7 @@ build_explicit_io_load(nir_builder *b, nir_intrinsic_instr *intrin, nir_def *zero = nir_imm_zero(b, load->num_components, bit_size); /* TODO: Better handle block_intel. */ - assert(load->num_components == 1); - const unsigned load_size = bit_size / 8; + const unsigned load_size = load->num_components * bit_size / 8; nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, load_size)); nir_builder_instr_insert(b, &load->instr); @@ -1000,8 +999,7 @@ build_explicit_io_store(nir_builder *b, nir_intrinsic_instr *intrin, if (addr_format_needs_bounds_check(addr_format)) { /* TODO: Better handle block_intel. */ - assert(store->num_components == 1); - const unsigned store_size = value->bit_size / 8; + const unsigned store_size = value->num_components * value->bit_size / 8; nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, store_size)); nir_builder_instr_insert(b, &store->instr); @@ -1117,7 +1115,8 @@ build_explicit_io_atomic(nir_builder *b, nir_intrinsic_instr *intrin, assert(atomic->def.bit_size % 8 == 0); if (addr_format_needs_bounds_check(addr_format)) { - const unsigned atomic_size = atomic->def.bit_size / 8; + const unsigned atomic_size = + atomic->def.num_components * atomic->def.bit_size / 8; nir_push_if(b, addr_is_in_bounds(b, addr, addr_format, atomic_size)); nir_builder_instr_insert(b, &atomic->instr); From 1def157335682cfce2064066567b1e3d6c3a7351 Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Tue, 9 Dec 2025 15:29:33 -0500 Subject: [PATCH 5/8] nir: Add deref->arr.{base,never}_bounds_check --- src/compiler/nir/nir.h | 11 +++++++ src/compiler/nir/nir_clone.c | 2 ++ src/compiler/nir/nir_deref.c | 7 +++++ src/compiler/nir/nir_instr_set.c | 6 ++++ src/compiler/nir/nir_lower_explicit_io.c | 37 ++++++++++++++++++++++-- src/compiler/nir/nir_print.c | 4 +++ src/compiler/nir/nir_serialize.c | 8 ++++- 7 files changed, 72 insertions(+), 3 deletions(-) diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 96ee2c767fa..b229caa12ba 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1668,7 +1668,18 @@ typedef struct nir_deref_instr { union { struct { nir_src index; + + /** If true, the index is always within the bounds of parent */ bool in_bounds; + + /** + * If true, then the deref will be in bounds if the parent's base + * address is in bounds + */ + bool base_bounds_check; + + /** If true, all bounds checking should be disabled for this deref */ + bool never_bounds_check; } arr; struct { diff --git a/src/compiler/nir/nir_clone.c b/src/compiler/nir/nir_clone.c index aa9159fe73f..7386f1fde9b 100644 --- a/src/compiler/nir/nir_clone.c +++ b/src/compiler/nir/nir_clone.c @@ -322,6 +322,8 @@ clone_deref_instr(clone_state *state, const nir_deref_instr *deref) __clone_src(state, &nderef->instr, &nderef->arr.index, &deref->arr.index); nderef->arr.in_bounds = deref->arr.in_bounds; + nderef->arr.base_bounds_check = deref->arr.base_bounds_check; + nderef->arr.never_bounds_check = deref->arr.never_bounds_check; break; case nir_deref_type_array_wildcard: diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index feb463fcc4a..d06678a8cf5 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -1223,6 +1223,13 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref) { assert(deref->deref_type == nir_deref_type_ptr_as_array); + /* Neither of the optimizations below are worthwhile if they discard + * bounds checking info + */ + if (deref->arr.base_bounds_check && + deref->arr.never_bounds_check) + return false; + nir_deref_instr *parent = nir_deref_instr_parent(deref); if (nir_src_is_const(deref->arr.index) && diff --git a/src/compiler/nir/nir_instr_set.c b/src/compiler/nir/nir_instr_set.c index ed6cb6e7ca9..40c3d9c69c1 100644 --- a/src/compiler/nir/nir_instr_set.c +++ b/src/compiler/nir/nir_instr_set.c @@ -173,6 +173,8 @@ hash_deref(uint32_t hash, const nir_deref_instr *instr) case nir_deref_type_ptr_as_array: hash = hash_src(hash, &instr->arr.index); hash = HASH(hash, instr->arr.in_bounds); + hash = HASH(hash, instr->arr.base_bounds_check); + hash = HASH(hash, instr->arr.never_bounds_check); break; case nir_deref_type_cast: @@ -628,6 +630,10 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2) return false; if (deref1->arr.in_bounds != deref2->arr.in_bounds) return false; + if (deref1->arr.base_bounds_check != deref2->arr.base_bounds_check) + return false; + if (deref1->arr.never_bounds_check != deref2->arr.never_bounds_check) + return false; break; case nir_deref_type_cast: diff --git a/src/compiler/nir/nir_lower_explicit_io.c b/src/compiler/nir/nir_lower_explicit_io.c index bd36f7753f3..db601b06d86 100644 --- a/src/compiler/nir/nir_lower_explicit_io.c +++ b/src/compiler/nir/nir_lower_explicit_io.c @@ -420,10 +420,37 @@ addr_format_needs_bounds_check(nir_address_format addr_format) return addr_format == nir_address_format_64bit_bounded_global; } +static nir_deref_instr* +trailing_array_deref(nir_def *ssa) +{ + while (true) { + if (!nir_def_is_deref(ssa)) + return NULL; + + nir_deref_instr *deref = nir_def_as_deref(ssa); + if (nir_deref_instr_is_arr(deref)) { + return deref; + } else if (deref->deref_type == nir_deref_type_cast) { + ssa = deref->parent.ssa; + } else { + return NULL; + } + } +} + static nir_def * addr_is_in_bounds(nir_builder *b, nir_def *addr, nir_address_format addr_format, unsigned size) { + nir_deref_instr *arr_deref = trailing_array_deref(addr); + if (arr_deref) { + if (arr_deref->arr.never_bounds_check) { + return nir_imm_true(b); + } else if (arr_deref->arr.base_bounds_check) { + addr = arr_deref->parent.ssa; + size = 1; + } + } assert(addr_format == nir_address_format_64bit_bounded_global); assert(addr->num_components == 4); assert(size > 0); @@ -1420,8 +1447,14 @@ nir_lower_explicit_io_instr(nir_builder *b, * that information through to nir_lower_explicit_io. For now, however, * scalarizing is at least correct. */ - bool scalarize = vec_stride > scalar_size || - addr_format_needs_bounds_check(addr_format); + bool scalarize = vec_stride > scalar_size; + if (addr_format_needs_bounds_check(addr_format)) { + nir_deref_instr *arr_deref = trailing_array_deref(&deref->def); + bool skip_scalarize = arr_deref && + (arr_deref->arr.base_bounds_check || + arr_deref->arr.never_bounds_check); + scalarize |= !skip_scalarize; + } switch (intrin->intrinsic) { case nir_intrinsic_load_deref: { diff --git a/src/compiler/nir/nir_print.c b/src/compiler/nir/nir_print.c index 92b51bda834..f96272bba09 100644 --- a/src/compiler/nir/nir_print.c +++ b/src/compiler/nir/nir_print.c @@ -1147,6 +1147,10 @@ print_deref_instr(nir_deref_instr *instr, print_state *state) if (nir_deref_instr_is_arr(instr)) { if (instr->arr.in_bounds) fprintf(fp, " (in bounds)"); + if (instr->arr.base_bounds_check) + fprintf(fp, " (base bounds check)"); + if (instr->arr.never_bounds_check) + fprintf(fp, " (never bounds check)"); } if (instr->deref_type != nir_deref_type_var && diff --git a/src/compiler/nir/nir_serialize.c b/src/compiler/nir/nir_serialize.c index 44c57b921b2..1af362633ae 100644 --- a/src/compiler/nir/nir_serialize.c +++ b/src/compiler/nir/nir_serialize.c @@ -545,8 +545,10 @@ union packed_instr { unsigned deref_type : 3; unsigned cast_type_same_as_last : 1; unsigned modes : 6; /* See (de|en)code_deref_modes() */ - unsigned _pad : 8; + unsigned _pad : 6; unsigned in_bounds : 1; + unsigned base_bounds_check : 1; + unsigned never_bounds_check : 1; unsigned packed_src_ssa_16bit : 1; /* deref_var redefines this */ unsigned def : 8; } deref; @@ -919,6 +921,8 @@ write_deref(write_ctx *ctx, const nir_deref_instr *deref) header.deref.packed_src_ssa_16bit = are_object_ids_16bit(ctx); header.deref.in_bounds = deref->arr.in_bounds; + header.deref.base_bounds_check = deref->arr.base_bounds_check; + header.deref.never_bounds_check = deref->arr.never_bounds_check; } write_def(ctx, &deref->def, header, deref->instr.type); @@ -1005,6 +1009,8 @@ read_deref(read_ctx *ctx, union packed_instr header) } deref->arr.in_bounds = header.deref.in_bounds; + deref->arr.base_bounds_check = header.deref.base_bounds_check; + deref->arr.never_bounds_check = header.deref.never_bounds_check; parent = nir_src_as_deref(deref->parent); if (deref->deref_type == nir_deref_type_array) From a9d8abb4da354e4b15ece78f813559e0fd5032e6 Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Tue, 9 Dec 2025 15:32:07 -0500 Subject: [PATCH 6/8] spirv: Implement SPV_NV_raw_access_chains --- src/compiler/spirv/spirv_to_nir.c | 2 ++ src/compiler/spirv/vtn_variables.c | 38 ++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 7d4b9a7d21f..d6a0496ab3a 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -157,6 +157,7 @@ static const struct spirv_capabilities implemented_capabilities = { .RayTracingKHR = true, .RayTracingPositionFetchKHR = true, .RayTraversalPrimitiveCullingKHR = true, + .RawAccessChainsNV = true, .ReplicatedCompositesEXT = true, .RoundingModeRTE = true, .RoundingModeRTZ = true, @@ -6536,6 +6537,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, case SpvOpPtrAccessChain: case SpvOpInBoundsAccessChain: case SpvOpInBoundsPtrAccessChain: + case SpvOpRawAccessChainNV: case SpvOpArrayLength: case SpvOpConvertPtrToU: case SpvOpConvertUToPtr: diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c index 532e5e25459..17317000b3e 100644 --- a/src/compiler/spirv/vtn_variables.c +++ b/src/compiler/spirv/vtn_variables.c @@ -2853,6 +2853,44 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, break; } + case SpvOpRawAccessChainNV: { + struct vtn_type *ptr_type = vtn_get_type(b, w[1]); + nir_deref_instr *base = vtn_nir_deref(b, w[3]); + uint32_t stride = vtn_constant_uint(b, w[4]); + nir_def *index = vtn_get_nir_ssa(b, w[5]); + nir_def *offset = vtn_get_nir_ssa(b, w[6]); + + uint32_t flags = 0; + if (count >= 8) { + flags = w[7]; + } + + nir_deref_instr *deref = base; + + if (stride) { + index = nir_i2iN(&b->nb, index, base->def.bit_size); + deref = nir_build_deref_cast(&b->nb, &deref->def, base->modes, + glsl_uint8_t_type(), stride); + deref = nir_build_deref_ptr_as_array(&b->nb, deref, index); + } + + offset = nir_i2iN(&b->nb, offset, base->def.bit_size); + deref = nir_build_deref_cast(&b->nb, &deref->def, base->modes, + glsl_uint8_t_type(), /* stride */ 1); + deref = nir_build_deref_ptr_as_array(&b->nb, deref, offset); + + if (flags & SpvRawAccessChainOperandsRobustnessPerComponentNVMask) { + /* Default robustness */ + } else if (flags & SpvRawAccessChainOperandsRobustnessPerElementNVMask) { + deref->arr.base_bounds_check = true; + } else { + deref->arr.never_bounds_check = true; + } + + vtn_push_pointer(b, w[2], vtn_pointer_from_ssa(b, &deref->def, ptr_type)); + break; + } + case SpvOpCopyMemory: { struct vtn_value *dest_val = vtn_pointer_value(b, w[1]); struct vtn_value *src_val = vtn_pointer_value(b, w[2]); From dec849641ed30285cd8624eac51206fccd8add9e Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Tue, 9 Dec 2025 15:32:25 -0500 Subject: [PATCH 7/8] nvk: Enable VK_NV_raw_access_chains --- src/nouveau/vulkan/nvk_physical_device.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/nouveau/vulkan/nvk_physical_device.c b/src/nouveau/vulkan/nvk_physical_device.c index 028ca08f534..d7552743b80 100644 --- a/src/nouveau/vulkan/nvk_physical_device.c +++ b/src/nouveau/vulkan/nvk_physical_device.c @@ -302,6 +302,7 @@ nvk_get_device_extensions(const struct nvk_instance *instance, .GOOGLE_user_type = true, .MESA_image_alignment_control = true, .NV_compute_shader_derivatives = info->cls_eng3d >= TURING_A, + .NV_raw_access_chains = true, .NV_shader_sm_builtins = true, .NVX_image_view_handle = info->cls_eng3d >= MAXWELL_A, /* needs true bindless descriptors */ .VALVE_mutable_descriptor_type = true, @@ -744,6 +745,9 @@ nvk_get_device_features(const struct nv_device_info *info, /* VK_MESA_image_alignment_control */ .imageAlignmentControl = true, + /* VK_NV_raw_access_chains */ + .shaderRawAccessChains = true, + /* VK_NV_shader_sm_builtins */ .shaderSMBuiltins = true, From 76d748b199390c971ec7fe253cc33e9a6c8bfc8e Mon Sep 17 00:00:00 2001 From: Mel Henning Date: Thu, 4 Dec 2025 17:28:39 -0500 Subject: [PATCH 8/8] HACK: change handling of NVK_SSBO_BOUNDS_CHECK_ALIGNMENT to work around a CTS bug --- src/nouveau/vulkan/nvk_descriptor_set.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nouveau/vulkan/nvk_descriptor_set.c b/src/nouveau/vulkan/nvk_descriptor_set.c index 651aab18d9c..c1741032b11 100644 --- a/src/nouveau/vulkan/nvk_descriptor_set.c +++ b/src/nouveau/vulkan/nvk_descriptor_set.c @@ -320,7 +320,7 @@ ssbo_desc(struct nvk_addr_range addr_range) assert(addr_range.range <= UINT32_MAX); addr_range.addr = ROUND_DOWN_TO(addr_range.addr, NVK_MIN_SSBO_ALIGNMENT); - addr_range.range = align(addr_range.range, NVK_SSBO_BOUNDS_CHECK_ALIGNMENT); + // addr_range.range = align(addr_range.range, NVK_SSBO_BOUNDS_CHECK_ALIGNMENT); return (union nvk_buffer_descriptor) { .addr = { .base_addr = addr_range.addr,