diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c index ca3868bd2e4..2cb3c1fac9b 100644 --- a/src/amd/vulkan/radv_pipeline_rt.c +++ b/src/amd/vulkan/radv_pipeline_rt.c @@ -196,7 +196,8 @@ radv_rt_fill_group_info(struct radv_device *device, const struct radv_ray_tracin } else if (groups[idx].recursive_shader != VK_SHADER_UNUSED_KHR) { struct radv_shader *library_shader = stages[groups[idx].recursive_shader].shader; simple_mtx_lock(&library_shader->replay_mtx); - if (!library_shader->has_replay_alloc) { + /* If arena_va is 0, the pipeline is monolithic and the shader was inlined into raygen */ + if (!library_shader->has_replay_alloc && handle->recursive_shader_alloc.arena_va) { union radv_shader_arena_block *new_block = radv_replay_shader_arena_block(device, &handle->recursive_shader_alloc, library_shader); if (!new_block) { @@ -956,7 +957,8 @@ radv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline uint32_t recursive_shader = rt_pipeline->groups[firstGroup + i].recursive_shader; if (recursive_shader != VK_SHADER_UNUSED_KHR) { struct radv_shader *shader = rt_pipeline->stages[recursive_shader].shader; - data[i].recursive_shader_alloc = radv_serialize_shader_arena_block(shader->alloc); + if (shader) + data[i].recursive_shader_alloc = radv_serialize_shader_arena_block(shader->alloc); } data[i].non_recursive_idx = rt_pipeline->groups[firstGroup + i].handle.any_hit_index; }