vulkan: Init all update scratch at once
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

Reviewed-by: Natalie Vock <natalie.vock@gmx.de>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/39752>
This commit is contained in:
Konstantin Seurer 2026-02-07 11:58:17 +01:00 committed by Marge Bot
parent 411e23d389
commit 01156c0697
3 changed files with 54 additions and 35 deletions

View file

@ -733,46 +733,64 @@ radv_init_geometry_infos(VkCommandBuffer commandBuffer, const struct vk_accelera
}
static void
radv_init_update_scratch(VkCommandBuffer commandBuffer, const struct vk_acceleration_structure_build_state *state)
radv_init_update_scratch(VkCommandBuffer commandBuffer, const struct vk_acceleration_structure_build_state *states,
uint32_t build_count)
{
VK_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
uint64_t scratch = state->build_info->scratchData.deviceAddress;
for (uint32_t i = 0; i < build_count; i++) {
const struct vk_acceleration_structure_build_state *state = &states[i];
if (state->config.internal_type != VK_INTERNAL_BUILD_TYPE_UPDATE)
continue;
struct update_scratch_layout layout;
radv_get_update_scratch_layout(device, state, &layout);
uint64_t scratch = state->build_info->scratchData.deviceAddress;
/* Prepare ready counts for internal nodes */
radv_fill_memory(cmd_buffer, scratch + layout.internal_ready_count_offset,
layout.size - layout.internal_ready_count_offset, 0x0, VK_ADDRESS_COPY_DEVICE_LOCAL_BIT_KHR);
struct update_scratch_layout layout;
radv_get_update_scratch_layout(device, state, &layout);
/* geometryCount == 1 passes the data as push constant. */
if (radv_use_bvh8(pdev) && !(state->config.update_key[1] & RADV_BUILD_FLAG_UPDATE_SINGLE_GEOMETRY)) {
uint32_t data_size = sizeof(struct vk_bvh_geometry_data) * state->build_info->geometryCount;
struct vk_bvh_geometry_data *data = malloc(data_size);
if (!data) {
vk_command_buffer_set_error(&cmd_buffer->vk, VK_ERROR_OUT_OF_HOST_MEMORY);
return;
/* Prepare ready counts for internal nodes */
radv_fill_memory(cmd_buffer, scratch + layout.internal_ready_count_offset,
layout.size - layout.internal_ready_count_offset, 0x0, VK_ADDRESS_COPY_DEVICE_LOCAL_BIT_KHR);
}
for (uint32_t i = 0; i < build_count; i++) {
const struct vk_acceleration_structure_build_state *state = &states[i];
if (state->config.internal_type != VK_INTERNAL_BUILD_TYPE_UPDATE)
continue;
/* geometryCount == 1 passes the data as push constant. */
if (radv_use_bvh8(pdev) && !(state->config.update_key[1] & RADV_BUILD_FLAG_UPDATE_SINGLE_GEOMETRY)) {
uint64_t scratch = state->build_info->scratchData.deviceAddress;
struct update_scratch_layout layout;
radv_get_update_scratch_layout(device, state, &layout);
uint32_t data_size = sizeof(struct vk_bvh_geometry_data) * state->build_info->geometryCount;
struct vk_bvh_geometry_data *data = malloc(data_size);
if (!data) {
vk_command_buffer_set_error(&cmd_buffer->vk, VK_ERROR_OUT_OF_HOST_MEMORY);
return;
}
uint32_t first_id = 0;
for (uint32_t j = 0; j < state->build_info->geometryCount; j++) {
const VkAccelerationStructureGeometryKHR *geom =
state->build_info->pGeometries ? &state->build_info->pGeometries[j] : state->build_info->ppGeometries[j];
const VkAccelerationStructureBuildRangeInfoKHR *build_range_info = &state->build_range_infos[j];
data[j] = vk_fill_geometry_data(state->build_info->type, first_id, j, geom, build_range_info);
first_id += build_range_info->primitiveCount;
}
radv_update_memory(cmd_buffer, scratch + layout.geometry_data_offset, data_size, data,
VK_ADDRESS_COPY_DEVICE_LOCAL_BIT_KHR);
free(data);
}
uint32_t first_id = 0;
for (uint32_t i = 0; i < state->build_info->geometryCount; i++) {
const VkAccelerationStructureGeometryKHR *geom =
state->build_info->pGeometries ? &state->build_info->pGeometries[i] : state->build_info->ppGeometries[i];
const VkAccelerationStructureBuildRangeInfoKHR *build_range_info = &state->build_range_infos[i];
data[i] = vk_fill_geometry_data(state->build_info->type, first_id, i, geom, build_range_info);
first_id += build_range_info->primitiveCount;
}
radv_update_memory(cmd_buffer, scratch + layout.geometry_data_offset, data_size, data,
VK_ADDRESS_COPY_DEVICE_LOCAL_BIT_KHR);
free(data);
}
}

View file

@ -1271,11 +1271,12 @@ vk_cmd_build_acceleration_structures(VkCommandBuffer commandBuffer,
device->write_buffer_cp(commandBuffer, pInfos[i].scratchData.deviceAddress + states[i].scratch.header_offset,
&header, sizeof(header));
} else {
ops->init_update_scratch(commandBuffer, &states[i]);
}
}
if (batch_state.any_update)
ops->init_update_scratch(commandBuffer, states, infoCount);
bool flushed_compute_after_init_update_scratch = false;
bool flushed_cp_after_init_update_scratch = true;

View file

@ -164,9 +164,9 @@ struct vk_acceleration_structure_build_ops {
VkResult (*encode_prepare[MAX_ENCODE_PASSES])(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *state);
void (*encode_as[MAX_ENCODE_PASSES])(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *state);
void (*init_update_scratch)(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *state);
void (*init_update_scratch)(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *states, uint32_t build_count);
void (*update_prepare[MAX_ENCODE_PASSES])(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *state,
bool flushed_cp_after_init_update_scratch, bool flushed_compute_after_init_update_scratch);
bool flushed_cp_after_init_update_scratch, bool flushed_compute_after_init_update_scratch);
void (*update_as[MAX_ENCODE_PASSES])(VkCommandBuffer cmd_buffer, const struct vk_acceleration_structure_build_state *state);
const uint32_t *leaf_spirv_override;