lavapipe/rt: Do not use vk_acceleration_structure::size

vkd3d-proton sets this to the size of the backing memory which means we
will overwrite random buffer contents when performing acceleration
structure copies.

Reviewed-by: Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36197>
This commit is contained in:
Konstantin Seurer 2025-07-17 17:41:41 +02:00 committed by Marge Bot
parent 536f5d3496
commit cabcd7e9ea
4 changed files with 60 additions and 59 deletions

View file

@ -16,6 +16,5 @@ test_sample_instructions,Fail
test_rayquery,Fail
test_raytracing,Fail
test_raytracing_mismatch_global_rs_link,Fail
test_sampler_rounding,Fail
test_view_min_lod,Fail

View file

@ -54,6 +54,51 @@ lvp_init_radix_sort(struct lvp_device *device)
simple_mtx_unlock(&device->radix_sort_lock);
}
static void
lvp_get_leaf_node_size(VkGeometryTypeKHR geometry_type, uint32_t *ir_leaf_node_size,
uint32_t *output_leaf_node_size)
{
switch (geometry_type) {
case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
*ir_leaf_node_size = sizeof(struct vk_ir_triangle_node);
*output_leaf_node_size = sizeof(struct lvp_bvh_triangle_node);
break;
case VK_GEOMETRY_TYPE_AABBS_KHR:
*ir_leaf_node_size = sizeof(struct vk_ir_aabb_node);
*output_leaf_node_size = sizeof(struct lvp_bvh_aabb_node);
break;
case VK_GEOMETRY_TYPE_INSTANCES_KHR:
*ir_leaf_node_size = sizeof(struct vk_ir_instance_node);
*output_leaf_node_size = sizeof(struct lvp_bvh_instance_node);
break;
default:
break;
}
}
static VkDeviceSize
lvp_get_as_size_internal(VkGeometryTypeKHR geometry_type, uint32_t leaf_node_count)
{
uint32_t internal_node_count = MAX2(leaf_node_count, 2) - 1;
uint32_t nodes_size = internal_node_count * sizeof(struct lvp_bvh_box_node);
uint32_t ir_leaf_node_size = 0;
uint32_t output_leaf_node_size = 0;
lvp_get_leaf_node_size(geometry_type, &ir_leaf_node_size, &output_leaf_node_size);
nodes_size += leaf_node_count * output_leaf_node_size;
nodes_size = util_align_npot(nodes_size, LVP_BVH_NODE_PREFETCH_SIZE);
return sizeof(struct lvp_bvh_header) + nodes_size;
}
static VkDeviceSize
lvp_get_as_size(VkDevice device, const struct vk_acceleration_structure_build_state *state)
{
return lvp_get_as_size_internal(vk_get_as_geometry_type(state->build_info), state->leaf_node_count);
}
static void
lvp_write_buffer_cp(VkCommandBuffer cmdbuf, VkDeviceAddress addr,
void *data, uint32_t size)
@ -382,28 +427,6 @@ ret:
free(internal_nodes);
}
static void
lvp_get_leaf_node_size(VkGeometryTypeKHR geometry_type, uint32_t *ir_leaf_node_size,
uint32_t *output_leaf_node_size)
{
switch (geometry_type) {
case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
*ir_leaf_node_size = sizeof(struct vk_ir_triangle_node);
*output_leaf_node_size = sizeof(struct lvp_bvh_triangle_node);
break;
case VK_GEOMETRY_TYPE_AABBS_KHR:
*ir_leaf_node_size = sizeof(struct vk_ir_aabb_node);
*output_leaf_node_size = sizeof(struct lvp_bvh_aabb_node);
break;
case VK_GEOMETRY_TYPE_INSTANCES_KHR:
*ir_leaf_node_size = sizeof(struct vk_ir_instance_node);
*output_leaf_node_size = sizeof(struct lvp_bvh_instance_node);
break;
default:
break;
}
}
void
lvp_encode_as(struct vk_acceleration_structure *dst, VkDeviceAddress intermediate_as_addr,
VkDeviceAddress intermediate_header_addr, uint32_t leaf_count,
@ -431,8 +454,10 @@ lvp_encode_as(struct vk_acceleration_structure *dst, VkDeviceAddress intermediat
output_header->leaf_nodes_offset = sizeof(struct lvp_bvh_header) + header->ir_internal_node_count * sizeof(struct lvp_bvh_box_node);
uint32_t bvh_size = lvp_get_as_size_internal(geometry_type, leaf_count);
output_header->compacted_size = bvh_size;
output_header->serialization_size = sizeof(struct lvp_accel_struct_serialization_header) +
sizeof(uint64_t) * output_header->instance_count + dst->size;
sizeof(uint64_t) * output_header->instance_count + bvh_size;
for (uint32_t i = 0; i < header->active_leaf_count; i++) {
const void *ir_leaf = ir_bvh + i * ir_leaf_node_size;
@ -630,23 +655,6 @@ lvp_CopyAccelerationStructureToMemoryKHR(VkDevice _device, VkDeferredOperationKH
return VK_ERROR_FEATURE_NOT_PRESENT;
}
static VkDeviceSize
lvp_get_as_size(VkDevice device, const struct vk_acceleration_structure_build_state *state)
{
uint32_t internal_node_count = MAX2(state->leaf_node_count, 2) - 1;
uint32_t nodes_size = internal_node_count * sizeof(struct lvp_bvh_box_node);
uint32_t ir_leaf_node_size = 0;
uint32_t output_leaf_node_size = 0;
lvp_get_leaf_node_size(vk_get_as_geometry_type(state->build_info), &ir_leaf_node_size, &output_leaf_node_size);
nodes_size += state->leaf_node_count * output_leaf_node_size;
nodes_size = util_align_npot(nodes_size, LVP_BVH_NODE_PREFETCH_SIZE);
return sizeof(struct lvp_bvh_header) + nodes_size;
}
static VkResult
lvp_encode_bind_pipeline(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *state)
{

View file

@ -72,11 +72,10 @@ struct lvp_bvh_box_node {
struct lvp_bvh_header {
vk_aabb bounds;
uint32_t compacted_size;
uint32_t serialization_size;
uint32_t instance_count;
uint32_t leaf_nodes_offset;
uint32_t padding;
};
struct lvp_accel_struct_serialization_header {

View file

@ -4480,17 +4480,12 @@ handle_copy_acceleration_structure(struct vk_cmd_queue_entry *cmd, struct render
{
struct vk_cmd_copy_acceleration_structure_khr *copy = &cmd->u.copy_acceleration_structure_khr;
VK_FROM_HANDLE(vk_acceleration_structure, src, copy->info->src);
VK_FROM_HANDLE(vk_acceleration_structure, dst, copy->info->dst);
VK_FROM_HANDLE(vk_acceleration_structure, src_accel_struct, copy->info->src);
VK_FROM_HANDLE(vk_acceleration_structure, dst_accel_struct, copy->info->dst);
struct pipe_box box = { 0 };
u_box_1d(src->offset, MIN2(src->size, dst->size), &box);
state->pctx->resource_copy_region(state->pctx,
lvp_buffer_from_handle(
vk_buffer_to_handle(dst->buffer))->bo, 0,
dst->offset, 0, 0,
lvp_buffer_from_handle(
vk_buffer_to_handle(src->buffer))->bo, 0, &box);
struct lvp_bvh_header *src = (void *)(uintptr_t)vk_acceleration_structure_get_va(src_accel_struct);
struct lvp_bvh_header *dst = (void *)(uintptr_t)vk_acceleration_structure_get_va(dst_accel_struct);
memcpy(dst, src, src->compacted_size);
}
static void
@ -4526,7 +4521,7 @@ handle_copy_acceleration_structure_to_memory(struct vk_cmd_queue_entry *cmd, str
lvp_device_get_cache_uuid(dst->driver_uuid);
lvp_device_get_cache_uuid(dst->accel_struct_compat);
dst->serialization_size = src->serialization_size;
dst->compacted_size = accel_struct->size;
dst->compacted_size = src->compacted_size;
dst->instance_count = src->instance_count;
for (uint32_t i = 0; i < src->instance_count; i++) {
@ -4536,7 +4531,7 @@ handle_copy_acceleration_structure_to_memory(struct vk_cmd_queue_entry *cmd, str
dst->instances[i] = node[i].bvh_ptr;
}
memcpy(&dst->instances[dst->instance_count], src, accel_struct->size);
memcpy(&dst->instances[dst->instance_count], src, src->compacted_size);
}
static void
@ -4552,20 +4547,20 @@ handle_write_acceleration_structures_properties(struct vk_cmd_queue_entry *cmd,
for (uint32_t i = 0; i < write->acceleration_structure_count; i++) {
VK_FROM_HANDLE(vk_acceleration_structure, accel_struct, write->acceleration_structures[i]);
struct lvp_bvh_header *header = (void *)(uintptr_t)vk_acceleration_structure_get_va(accel_struct);
switch ((uint32_t)pool->base_type) {
case LVP_QUERY_ACCELERATION_STRUCTURE_COMPACTED_SIZE:
dst[i] = accel_struct->size;
dst[i] = header->compacted_size;
break;
case LVP_QUERY_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE: {
struct lvp_bvh_header *header = (void *)(uintptr_t)vk_acceleration_structure_get_va(accel_struct);
dst[i] = header->serialization_size;
break;
}
case LVP_QUERY_ACCELERATION_STRUCTURE_SIZE:
dst[i] = accel_struct->size;
dst[i] = header->compacted_size;
break;
case LVP_QUERY_ACCELERATION_STRUCTURE_INSTANCE_COUNT: {
struct lvp_bvh_header *header = (void *)(uintptr_t)vk_acceleration_structure_get_va(accel_struct);
dst[i] = header->instance_count;
break;
}