diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c index 67c95ad2240..246d8d61350 100644 --- a/src/amd/vulkan/radv_pipeline_rt.c +++ b/src/amd/vulkan/radv_pipeline_rt.c @@ -156,11 +156,63 @@ radv_create_group_handles(struct radv_device *device, const VkRayTracingPipeline return VK_SUCCESS; } +static VkResult +radv_rt_init_capture_replay(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, + const struct radv_ray_tracing_stage *stages, const struct radv_ray_tracing_group *groups, + struct radv_serialized_shader_arena_block *capture_replay_blocks) +{ + VkResult result = VK_SUCCESS; + uint32_t idx; + + for (idx = 0; idx < pCreateInfo->groupCount; idx++) { + if (!pCreateInfo->pGroups[idx].pShaderGroupCaptureReplayHandle) + continue; + + const struct radv_rt_capture_replay_handle *handle = + (const struct radv_rt_capture_replay_handle *)pCreateInfo->pGroups[idx].pShaderGroupCaptureReplayHandle; + + if (groups[idx].recursive_shader < pCreateInfo->stageCount) { + capture_replay_blocks[groups[idx].recursive_shader] = handle->recursive_shader_alloc; + } else if (groups[idx].recursive_shader != VK_SHADER_UNUSED_KHR) { + struct radv_shader *library_shader = stages[groups[idx].recursive_shader].shader; + simple_mtx_lock(&library_shader->replay_mtx); + /* If arena_va is 0, the pipeline is monolithic and the shader was inlined into raygen */ + if (!library_shader->has_replay_alloc && handle->recursive_shader_alloc.arena_va) { + union radv_shader_arena_block *new_block = + radv_replay_shader_arena_block(device, &handle->recursive_shader_alloc, library_shader); + if (!new_block) { + result = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS; + goto reloc_out; + } + + radv_shader_wait_for_upload(device, library_shader->upload_seq); + radv_free_shader_memory(device, library_shader->alloc); + + library_shader->alloc = new_block; + library_shader->has_replay_alloc = true; + + library_shader->bo = library_shader->alloc->arena->bo; + library_shader->va = radv_buffer_get_va(library_shader->bo) + library_shader->alloc->offset; + + if (!radv_shader_reupload(device, library_shader)) { + result = VK_ERROR_UNKNOWN; + goto reloc_out; + } + } + + reloc_out: + simple_mtx_unlock(&library_shader->replay_mtx); + if (result != VK_SUCCESS) + return result; + } + } + + return result; +} + static VkResult radv_rt_fill_group_info(struct radv_device *device, const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, - const struct radv_ray_tracing_stage *stages, - struct radv_serialized_shader_arena_block *capture_replay_blocks, - struct radv_ray_tracing_group *groups) + const struct radv_ray_tracing_stage *stages, struct radv_ray_tracing_group *groups) { VkResult result = radv_create_group_handles(device, pCreateInfo, stages, groups); @@ -173,46 +225,6 @@ radv_rt_fill_group_info(struct radv_device *device, const VkRayTracingPipelineCr groups[idx].recursive_shader = pCreateInfo->pGroups[idx].closestHitShader; groups[idx].any_hit_shader = pCreateInfo->pGroups[idx].anyHitShader; groups[idx].intersection_shader = pCreateInfo->pGroups[idx].intersectionShader; - - if (pCreateInfo->pGroups[idx].pShaderGroupCaptureReplayHandle) { - const struct radv_rt_capture_replay_handle *handle = - (const struct radv_rt_capture_replay_handle *)pCreateInfo->pGroups[idx].pShaderGroupCaptureReplayHandle; - - if (groups[idx].recursive_shader < pCreateInfo->stageCount) { - capture_replay_blocks[groups[idx].recursive_shader] = handle->recursive_shader_alloc; - } else if (groups[idx].recursive_shader != VK_SHADER_UNUSED_KHR) { - struct radv_shader *library_shader = stages[groups[idx].recursive_shader].shader; - simple_mtx_lock(&library_shader->replay_mtx); - /* If arena_va is 0, the pipeline is monolithic and the shader was inlined into raygen */ - if (!library_shader->has_replay_alloc && handle->recursive_shader_alloc.arena_va) { - union radv_shader_arena_block *new_block = - radv_replay_shader_arena_block(device, &handle->recursive_shader_alloc, library_shader); - if (!new_block) { - result = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS; - goto reloc_out; - } - - radv_shader_wait_for_upload(device, library_shader->upload_seq); - radv_free_shader_memory(device, library_shader->alloc); - - library_shader->alloc = new_block; - library_shader->has_replay_alloc = true; - - library_shader->bo = library_shader->alloc->arena->bo; - library_shader->va = radv_buffer_get_va(library_shader->bo) + library_shader->alloc->offset; - - if (!radv_shader_reupload(device, library_shader)) { - result = VK_ERROR_UNKNOWN; - goto reloc_out; - } - } - - reloc_out: - simple_mtx_unlock(&library_shader->replay_mtx); - if (result != VK_SUCCESS) - return result; - } - } } /* copy and adjust library groups (incl. handles) */ @@ -872,7 +884,11 @@ radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkRayTra pipeline->traversal_uniform_robustness2 = true; radv_init_rt_stage_hashes(device, pCreateInfo, stages, stage_keys); - result = radv_rt_fill_group_info(device, pCreateInfo, stages, capture_replay_blocks, pipeline->groups); + result = radv_rt_fill_group_info(device, pCreateInfo, stages, pipeline->groups); + if (result != VK_SUCCESS) + goto fail; + + result = radv_rt_init_capture_replay(device, pCreateInfo, stages, pipeline->groups, capture_replay_blocks); if (result != VK_SUCCESS) goto fail;