mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-03 18:00:10 +01:00
radv: add support for VkPipelineCreateFlags2CreateInfoKHR
If the structure is present in pNext, it's used instead of flags. Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24392>
This commit is contained in:
parent
f4b9c5b1d0
commit
9089b091b2
8 changed files with 99 additions and 59 deletions
|
|
@ -1250,7 +1250,8 @@ sqtt_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, ui
|
|||
if (!pipeline)
|
||||
continue;
|
||||
|
||||
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
|
||||
const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
|
||||
if (create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)
|
||||
continue;
|
||||
|
||||
result = radv_sqtt_reloc_graphics_shaders(device, radv_pipeline_to_graphics(pipeline));
|
||||
|
|
@ -1326,7 +1327,8 @@ sqtt_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR defer
|
|||
if (!pipeline)
|
||||
continue;
|
||||
|
||||
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
|
||||
const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
|
||||
if (create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)
|
||||
continue;
|
||||
|
||||
result = radv_register_pipeline(device, pipeline);
|
||||
|
|
|
|||
|
|
@ -61,16 +61,16 @@ radv_shader_need_indirect_descriptor_sets(const struct radv_shader *shader)
|
|||
}
|
||||
|
||||
bool
|
||||
radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags flags)
|
||||
radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags2KHR flags)
|
||||
{
|
||||
return (flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) ||
|
||||
return (flags & VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) ||
|
||||
(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS) || device->keep_shader_info;
|
||||
}
|
||||
|
||||
bool
|
||||
radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags flags)
|
||||
radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags2KHR flags)
|
||||
{
|
||||
return (flags & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR) ||
|
||||
return (flags & VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR) ||
|
||||
(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS) || device->keep_shader_info;
|
||||
}
|
||||
|
||||
|
|
@ -145,13 +145,13 @@ radv_convert_buffer_robustness(const struct radv_device *device, VkPipelineRobus
|
|||
|
||||
struct radv_pipeline_key
|
||||
radv_generate_pipeline_key(const struct radv_device *device, const VkPipelineShaderStageCreateInfo *stages,
|
||||
const unsigned num_stages, VkPipelineCreateFlags flags, const void *pNext)
|
||||
const unsigned num_stages, VkPipelineCreateFlags2KHR flags, const void *pNext)
|
||||
{
|
||||
struct radv_pipeline_key key;
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
|
||||
if (flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
|
||||
if (flags & VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR)
|
||||
key.optimisations_disabled = 1;
|
||||
|
||||
key.disable_aniso_single_level =
|
||||
|
|
|
|||
|
|
@ -111,13 +111,13 @@ radv_hash_rt_shaders(unsigned char *hash, const VkRayTracingPipelineCreateInfoKH
|
|||
}
|
||||
}
|
||||
|
||||
const uint32_t pipeline_flags =
|
||||
pCreateInfo->flags &
|
||||
(VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR | VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR | VK_PIPELINE_CREATE_LIBRARY_BIT_KHR);
|
||||
const uint64_t pipeline_flags =
|
||||
radv_get_pipeline_create_flags(pCreateInfo) &
|
||||
(VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR | VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR |
|
||||
VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR | VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR);
|
||||
_mesa_sha1_update(&ctx, &pipeline_flags, 4);
|
||||
|
||||
_mesa_sha1_update(&ctx, &flags, 4);
|
||||
|
|
@ -473,7 +473,7 @@ radv_ray_tracing_pipeline_cache_search(struct radv_device *device, struct vk_pip
|
|||
|
||||
struct radv_pipeline_cache_object *pipeline_obj = container_of(object, struct radv_pipeline_cache_object, base);
|
||||
|
||||
bool is_library = pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
|
||||
bool is_library = pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR;
|
||||
bool complete = true;
|
||||
unsigned idx = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ radv_compute_pipeline_compile(struct radv_compute_pipeline *pipeline, struct rad
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (pipeline->base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
|
||||
if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
|
||||
return VK_PIPELINE_COMPILE_REQUIRED;
|
||||
|
||||
int64_t stage_start = os_time_get_nano();
|
||||
|
|
@ -266,7 +266,7 @@ radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkC
|
|||
}
|
||||
|
||||
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_COMPUTE);
|
||||
pipeline->base.create_flags = pCreateInfo->flags;
|
||||
pipeline->base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
|
||||
pipeline->base.is_internal = _cache == device->meta_state.cache;
|
||||
|
||||
const VkPipelineCreationFeedbackCreateInfo *creation_feedback =
|
||||
|
|
@ -303,7 +303,8 @@ radv_create_compute_pipelines(VkDevice _device, VkPipelineCache pipelineCache, u
|
|||
result = r;
|
||||
pPipelines[i] = VK_NULL_HANDLE;
|
||||
|
||||
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
|
||||
VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
|
||||
if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -305,7 +305,7 @@ radv_pipeline_uses_vrs_attachment(const struct radv_graphics_pipeline *pipeline,
|
|||
return !!subpass->fragment_shading_rate_attachment;
|
||||
}
|
||||
|
||||
return (pipeline->base.create_flags & VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) != 0;
|
||||
return (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) != 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -727,7 +727,7 @@ radv_pipeline_import_graphics_info(struct radv_device *device, struct radv_graph
|
|||
*
|
||||
* "However, in the specific case that a final link is being
|
||||
* performed between stages and
|
||||
* `VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT` is specified,
|
||||
* `VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT` is specified,
|
||||
* the application can override the pipeline layout with one that is
|
||||
* compatible with that union but does not have the
|
||||
* `VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT` flag set,
|
||||
|
|
@ -736,7 +736,7 @@ radv_pipeline_import_graphics_info(struct radv_device *device, struct radv_graph
|
|||
*
|
||||
* In that case discard whatever was imported before.
|
||||
*/
|
||||
if (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT &&
|
||||
if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT &&
|
||||
!pipeline_layout->independent_sets) {
|
||||
radv_pipeline_layout_finish(device, layout);
|
||||
radv_pipeline_layout_init(device, layout, false /* independent_sets */);
|
||||
|
|
@ -841,7 +841,7 @@ radv_pipeline_uses_ds_feedback_loop(const struct radv_graphics_pipeline *pipelin
|
|||
}
|
||||
}
|
||||
|
||||
return (pipeline->base.create_flags & VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
|
||||
return (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT) != 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -2371,7 +2371,7 @@ radv_pipeline_load_retained_shaders(const struct radv_device *device, struct rad
|
|||
{
|
||||
const VkPipelineLibraryCreateInfoKHR *libs_info =
|
||||
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_LIBRARY_CREATE_INFO_KHR);
|
||||
const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
|
||||
const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
|
||||
|
||||
/* Nothing to load if no libs are imported. */
|
||||
if (!libs_info)
|
||||
|
|
@ -2644,7 +2644,7 @@ radv_graphics_pipeline_compile(struct radv_graphics_pipeline *pipeline, const Vk
|
|||
bool skip_shaders_cache = false;
|
||||
VkResult result = VK_SUCCESS;
|
||||
const bool retain_shaders =
|
||||
!!(pipeline->base.create_flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT);
|
||||
!!(pipeline->base.create_flags & VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT);
|
||||
struct radv_retained_shaders *retained_shaders = NULL;
|
||||
|
||||
int64_t pipeline_start = os_time_get_nano();
|
||||
|
|
@ -2683,7 +2683,7 @@ radv_graphics_pipeline_compile(struct radv_graphics_pipeline *pipeline, const Vk
|
|||
*/
|
||||
if (fast_linking_enabled || keep_executable_info) {
|
||||
skip_shaders_cache = true;
|
||||
} else if ((pipeline->base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) && retain_shaders) {
|
||||
} else if ((pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR) && retain_shaders) {
|
||||
for (uint32_t i = 0; i < MESA_VULKAN_SHADER_STAGES; i++) {
|
||||
if (stages[i].entrypoint && !stages[i].spirv.size) {
|
||||
skip_shaders_cache = true;
|
||||
|
|
@ -2717,7 +2717,7 @@ radv_graphics_pipeline_compile(struct radv_graphics_pipeline *pipeline, const Vk
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (pipeline->base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
|
||||
if (pipeline->base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
|
||||
return VK_PIPELINE_COMPILE_REQUIRED;
|
||||
|
||||
if (retain_shaders) {
|
||||
|
|
@ -3851,7 +3851,7 @@ radv_is_fast_linking_enabled(const struct radv_graphics_pipeline *pipeline,
|
|||
if (!libs_info)
|
||||
return false;
|
||||
|
||||
return !(pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT);
|
||||
return !(pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT);
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
@ -3908,7 +3908,8 @@ radv_graphics_pipeline_init(struct radv_graphics_pipeline *pipeline, struct radv
|
|||
|
||||
/* If we have libraries, import them first. */
|
||||
if (libs_info) {
|
||||
const bool link_optimize = (pipeline->base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
|
||||
const bool link_optimize =
|
||||
(pipeline->base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
|
||||
|
||||
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
|
||||
RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
|
||||
|
|
@ -3917,7 +3918,7 @@ radv_graphics_pipeline_init(struct radv_graphics_pipeline *pipeline, struct radv
|
|||
assert(pipeline_lib->type == RADV_PIPELINE_GRAPHICS_LIB);
|
||||
|
||||
/* If we have link time optimization, all libraries must be created with
|
||||
* VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT.
|
||||
* VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT.
|
||||
*/
|
||||
assert(!link_optimize || gfx_pipeline_lib->base.retain_shaders);
|
||||
|
||||
|
|
@ -4038,7 +4039,7 @@ radv_graphics_pipeline_create(VkDevice _device, VkPipelineCache _cache, const Vk
|
|||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_GRAPHICS);
|
||||
pipeline->base.create_flags = pCreateInfo->flags;
|
||||
pipeline->base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
|
||||
pipeline->base.is_internal = _cache == device->meta_state.cache;
|
||||
|
||||
result = radv_graphics_pipeline_init(pipeline, device, cache, pCreateInfo, extra);
|
||||
|
|
@ -4086,7 +4087,7 @@ radv_graphics_lib_pipeline_init(struct radv_graphics_lib_pipeline *pipeline, str
|
|||
|
||||
pipeline->base.last_vgt_api_stage = MESA_SHADER_NONE;
|
||||
pipeline->base.retain_shaders =
|
||||
(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT) != 0;
|
||||
(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT) != 0;
|
||||
pipeline->lib_flags = needed_lib_flags;
|
||||
|
||||
radv_pipeline_layout_init(device, pipeline_layout, false);
|
||||
|
|
@ -4094,7 +4095,7 @@ radv_graphics_lib_pipeline_init(struct radv_graphics_lib_pipeline *pipeline, str
|
|||
/* If we have libraries, import them first. */
|
||||
if (libs_info) {
|
||||
const bool link_optimize =
|
||||
(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
|
||||
(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT) != 0;
|
||||
|
||||
for (uint32_t i = 0; i < libs_info->libraryCount; i++) {
|
||||
RADV_FROM_HANDLE(radv_pipeline, pipeline_lib, libs_info->pLibraries[i]);
|
||||
|
|
@ -4139,7 +4140,7 @@ radv_graphics_lib_pipeline_create(VkDevice _device, VkPipelineCache _cache,
|
|||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_GRAPHICS_LIB);
|
||||
pipeline->base.base.create_flags = pCreateInfo->flags;
|
||||
pipeline->base.base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
|
||||
|
||||
pipeline->mem_ctx = ralloc_context(NULL);
|
||||
|
||||
|
|
@ -4179,8 +4180,9 @@ radv_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, ui
|
|||
unsigned i = 0;
|
||||
|
||||
for (; i < count; i++) {
|
||||
const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
|
||||
VkResult r;
|
||||
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
|
||||
if (create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR) {
|
||||
r = radv_graphics_lib_pipeline_create(_device, pipelineCache, &pCreateInfos[i], pAllocator, &pPipelines[i]);
|
||||
} else {
|
||||
r = radv_graphics_pipeline_create(_device, pipelineCache, &pCreateInfos[i], NULL, pAllocator, &pPipelines[i]);
|
||||
|
|
@ -4189,7 +4191,7 @@ radv_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, ui
|
|||
result = r;
|
||||
pPipelines[i] = VK_NULL_HANDLE;
|
||||
|
||||
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
|
||||
if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ radv_create_group_handles(struct radv_device *device, const struct radv_ray_trac
|
|||
struct radv_ray_tracing_group *groups)
|
||||
{
|
||||
bool capture_replay =
|
||||
pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
|
||||
pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
|
||||
for (unsigned i = 0; i < pCreateInfo->groupCount; ++i) {
|
||||
const VkRayTracingShaderGroupCreateInfoKHR *group_info = &pCreateInfo->pGroups[i];
|
||||
switch (group_info->type) {
|
||||
|
|
@ -403,7 +403,7 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
|
|||
|
||||
bool dump_shader = radv_can_dump_shader(device, shaders[0], false);
|
||||
bool replayable =
|
||||
pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
|
||||
pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR;
|
||||
|
||||
/* Compile NIR shader to AMD assembly. */
|
||||
binary = radv_shader_nir_to_asm(device, stage, shaders, num_shaders, pipeline_key, keep_executable_info,
|
||||
|
|
@ -441,7 +441,7 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
|
|||
const struct radv_pipeline_key *key, struct radv_ray_tracing_pipeline *pipeline,
|
||||
struct radv_serialized_shader_arena_block *capture_replay_handles)
|
||||
{
|
||||
if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT)
|
||||
if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR)
|
||||
return VK_PIPELINE_COMPILE_REQUIRED;
|
||||
VkResult result = VK_SUCCESS;
|
||||
|
||||
|
|
@ -487,7 +487,7 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
|
|||
}
|
||||
}
|
||||
|
||||
if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
|
||||
if (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)
|
||||
return VK_SUCCESS;
|
||||
|
||||
/* create traversal shader */
|
||||
|
|
@ -638,7 +638,7 @@ radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkRayTra
|
|||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
|
||||
radv_pipeline_init(device, &pipeline->base.base, RADV_PIPELINE_RAY_TRACING);
|
||||
pipeline->base.base.create_flags = pCreateInfo->flags;
|
||||
pipeline->base.base.create_flags = radv_get_pipeline_create_flags(pCreateInfo);
|
||||
pipeline->stage_count = local_create_info.stageCount;
|
||||
pipeline->group_count = local_create_info.groupCount;
|
||||
pipeline->stages = stages;
|
||||
|
|
@ -677,7 +677,7 @@ radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache, const VkRayTra
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (!(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)) {
|
||||
if (!(pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR)) {
|
||||
compute_rt_stack_size(pCreateInfo, pipeline);
|
||||
compile_rt_prolog(device, pipeline);
|
||||
|
||||
|
|
@ -738,7 +738,8 @@ radv_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR defer
|
|||
result = r;
|
||||
pPipelines[i] = VK_NULL_HANDLE;
|
||||
|
||||
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
|
||||
const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(&pCreateInfos[i]);
|
||||
if (create_flags & VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1911,6 +1911,36 @@ struct radv_dispatch_info {
|
|||
|
||||
void radv_compute_dispatch(struct radv_cmd_buffer *cmd_buffer, const struct radv_dispatch_info *info);
|
||||
|
||||
static VkPipelineCreateFlagBits2KHR
|
||||
radv_get_pipeline_create_flags(const void *pCreateInfo)
|
||||
{
|
||||
const VkBaseInStructure *base = pCreateInfo;
|
||||
const VkPipelineCreateFlags2CreateInfoKHR *flags2 =
|
||||
vk_find_struct_const(base->pNext, PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR);
|
||||
|
||||
if (flags2)
|
||||
return flags2->flags;
|
||||
|
||||
switch (((VkBaseInStructure *)pCreateInfo)->sType) {
|
||||
case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO: {
|
||||
const VkGraphicsPipelineCreateInfo *create_info = (VkGraphicsPipelineCreateInfo *)pCreateInfo;
|
||||
return create_info->flags;
|
||||
}
|
||||
case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO: {
|
||||
const VkComputePipelineCreateInfo *create_info = (VkComputePipelineCreateInfo *)pCreateInfo;
|
||||
return create_info->flags;
|
||||
}
|
||||
case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR: {
|
||||
const VkRayTracingPipelineCreateInfoKHR *create_info = (VkRayTracingPipelineCreateInfoKHR *)pCreateInfo;
|
||||
return create_info->flags;
|
||||
}
|
||||
default:
|
||||
unreachable("invalid pCreateInfo pipeline struct");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct radv_image;
|
||||
struct radv_image_view;
|
||||
|
||||
|
|
@ -2225,7 +2255,7 @@ struct radv_pipeline {
|
|||
struct vk_object_base base;
|
||||
enum radv_pipeline_type type;
|
||||
|
||||
VkPipelineCreateFlags create_flags;
|
||||
VkPipelineCreateFlags2KHR create_flags;
|
||||
|
||||
struct vk_pipeline_cache_object *cache_object;
|
||||
|
||||
|
|
@ -2470,7 +2500,7 @@ struct radv_graphics_pipeline_create_info {
|
|||
|
||||
struct radv_pipeline_key radv_generate_pipeline_key(const struct radv_device *device,
|
||||
const VkPipelineShaderStageCreateInfo *stages,
|
||||
const unsigned num_stages, VkPipelineCreateFlags flags,
|
||||
const unsigned num_stages, VkPipelineCreateFlags2KHR flags,
|
||||
const void *pNext);
|
||||
|
||||
void radv_pipeline_init(struct radv_device *device, struct radv_pipeline *pipeline, enum radv_pipeline_type type);
|
||||
|
|
@ -2484,8 +2514,8 @@ VkResult radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache,
|
|||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline);
|
||||
|
||||
bool radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags flags);
|
||||
bool radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags flags);
|
||||
bool radv_pipeline_capture_shaders(const struct radv_device *device, VkPipelineCreateFlags2KHR flags);
|
||||
bool radv_pipeline_capture_shader_stats(const struct radv_device *device, VkPipelineCreateFlags2KHR flags);
|
||||
|
||||
VkPipelineShaderStageCreateInfo *radv_copy_shader_stage_create_info(struct radv_device *device, uint32_t stageCount,
|
||||
const VkPipelineShaderStageCreateInfo *pStages,
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ lower_rt_derefs(nir_shader *shader)
|
|||
* Global variables for an RT pipeline
|
||||
*/
|
||||
struct rt_variables {
|
||||
const VkPipelineCreateFlags flags;
|
||||
const VkPipelineCreateFlags2KHR flags;
|
||||
|
||||
/* idx of the next shader to run in the next iteration of the main loop.
|
||||
* During traversal, idx is used to store the SBT index and will contain
|
||||
|
|
@ -124,7 +124,7 @@ struct rt_variables {
|
|||
};
|
||||
|
||||
static struct rt_variables
|
||||
create_rt_variables(nir_shader *shader, const VkPipelineCreateFlags flags)
|
||||
create_rt_variables(nir_shader *shader, const VkPipelineCreateFlags2KHR flags)
|
||||
{
|
||||
struct rt_variables vars = {
|
||||
.flags = flags,
|
||||
|
|
@ -524,7 +524,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
|
|||
nir_ssa_def *should_return = nir_test_mask(&b_shader, nir_load_var(&b_shader, vars->cull_mask_and_flags),
|
||||
SpvRayFlagsSkipClosestHitShaderKHRMask);
|
||||
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR)) {
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR)) {
|
||||
should_return = nir_ior(&b_shader, should_return,
|
||||
nir_ieq_imm(&b_shader, nir_load_var(&b_shader, vars->shader_va), 0));
|
||||
}
|
||||
|
|
@ -546,7 +546,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
|
|||
nir_ssa_def *miss_index = nir_load_var(&b_shader, vars->miss_index);
|
||||
load_sbt_entry(&b_shader, vars, miss_index, SBT_MISS, SBT_RECURSIVE_PTR);
|
||||
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR)) {
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR)) {
|
||||
/* In case of a NULL miss shader, do nothing and just return. */
|
||||
nir_push_if(&b_shader, nir_ieq_imm(&b_shader, nir_load_var(&b_shader, vars->shader_va), 0));
|
||||
insert_rt_return(&b_shader, vars);
|
||||
|
|
@ -1126,7 +1126,7 @@ visit_any_hit_shaders(struct radv_device *device, nir_builder *b, struct travers
|
|||
{
|
||||
nir_ssa_def *sbt_idx = nir_load_var(b, vars->idx);
|
||||
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
|
||||
nir_push_if(b, nir_ine_imm(b, sbt_idx, 0));
|
||||
|
||||
for (unsigned i = 0; i < data->pipeline->group_count; ++i) {
|
||||
|
|
@ -1160,7 +1160,7 @@ visit_any_hit_shaders(struct radv_device *device, nir_builder *b, struct travers
|
|||
ralloc_free(nir_stage);
|
||||
}
|
||||
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
|
||||
if (!(vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR))
|
||||
nir_pop_if(b, NULL);
|
||||
}
|
||||
|
||||
|
|
@ -1255,7 +1255,7 @@ handle_candidate_aabb(nir_builder *b, struct radv_leaf_intersection *intersectio
|
|||
nir_store_var(b, data->vars->ahit_accept, nir_imm_false(b), 0x1);
|
||||
nir_store_var(b, data->vars->ahit_terminate, nir_imm_false(b), 0x1);
|
||||
|
||||
if (!(data->vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
|
||||
if (!(data->vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
|
||||
nir_push_if(b, nir_ine_imm(b, nir_load_var(b, inner_vars.idx), 0));
|
||||
|
||||
for (unsigned i = 0; i < data->pipeline->group_count; ++i) {
|
||||
|
|
@ -1304,7 +1304,7 @@ handle_candidate_aabb(nir_builder *b, struct radv_leaf_intersection *intersectio
|
|||
ralloc_free(nir_stage);
|
||||
}
|
||||
|
||||
if (!(data->vars->flags & VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
|
||||
if (!(data->vars->flags & VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR))
|
||||
nir_pop_if(b, NULL);
|
||||
|
||||
nir_push_if(b, nir_load_var(b, data->vars->ahit_accept));
|
||||
|
|
@ -1344,6 +1344,8 @@ nir_shader *
|
|||
radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_pipeline *pipeline,
|
||||
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo, const struct radv_pipeline_key *key)
|
||||
{
|
||||
const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(pCreateInfo);
|
||||
|
||||
/* Create the traversal shader as an intersection shader to prevent validation failures due to
|
||||
* invalid variable modes.*/
|
||||
nir_builder b = radv_meta_init_shader(device, MESA_SHADER_INTERSECTION, "rt_traversal");
|
||||
|
|
@ -1351,7 +1353,7 @@ radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_
|
|||
b.shader->info.workgroup_size[0] = 8;
|
||||
b.shader->info.workgroup_size[1] = device->physical_device->rt_wave_size == 64 ? 8 : 4;
|
||||
b.shader->info.shared_size = device->physical_device->rt_wave_size * MAX_STACK_ENTRY_COUNT * sizeof(uint32_t);
|
||||
struct rt_variables vars = create_rt_variables(b.shader, pCreateInfo->flags);
|
||||
struct rt_variables vars = create_rt_variables(b.shader, create_flags);
|
||||
|
||||
/* Register storage for hit attributes */
|
||||
nir_variable *hit_attribs[RADV_MAX_HIT_ATTRIB_SIZE / sizeof(uint32_t)];
|
||||
|
|
@ -1444,10 +1446,10 @@ radv_build_traversal_shader(struct radv_device *device, struct radv_ray_tracing_
|
|||
.stack_base = 0,
|
||||
.stack_store_cb = store_stack_entry,
|
||||
.stack_load_cb = load_stack_entry,
|
||||
.aabb_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR)
|
||||
.aabb_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR)
|
||||
? NULL
|
||||
: handle_candidate_aabb,
|
||||
.triangle_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR)
|
||||
.triangle_cb = (pipeline->base.base.create_flags & VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR)
|
||||
? NULL
|
||||
: handle_candidate_triangle,
|
||||
.data = &data,
|
||||
|
|
@ -1531,7 +1533,9 @@ radv_nir_lower_rt_abi(nir_shader *shader, const VkRayTracingPipelineCreateInfoKH
|
|||
{
|
||||
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
|
||||
|
||||
struct rt_variables vars = create_rt_variables(shader, pCreateInfo->flags);
|
||||
const VkPipelineCreateFlagBits2KHR create_flags = radv_get_pipeline_create_flags(pCreateInfo);
|
||||
|
||||
struct rt_variables vars = create_rt_variables(shader, create_flags);
|
||||
lower_rt_instructions(shader, &vars, 0);
|
||||
|
||||
if (stack_size) {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue