mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 00:10:10 +01:00
venus: sync protocol for ray tracing support
Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33907>
This commit is contained in:
parent
a3f827319f
commit
6bac77b75c
11 changed files with 4721 additions and 1641 deletions
|
|
@ -1,4 +1,4 @@
|
|||
/* This file is generated by venus-protocol git-768a9862. */
|
||||
/* This file is generated by venus-protocol git-d553be88. */
|
||||
|
||||
/*
|
||||
* Copyright 2020 Google LLC
|
||||
|
|
@ -43,5 +43,6 @@
|
|||
#include "vn_protocol_driver_command_buffer.h"
|
||||
#include "vn_protocol_driver_private_data_slot.h"
|
||||
#include "vn_protocol_driver_host_copy.h"
|
||||
#include "vn_protocol_driver_acceleration_structure.h"
|
||||
|
||||
#endif /* VN_PROTOCOL_DRIVER_H */
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -378,6 +378,30 @@ typedef enum VkCommandTypeEXT {
|
|||
VK_COMMAND_TYPE_vkGetFenceFdKHR_EXT = 239,
|
||||
VK_COMMAND_TYPE_vkCmdSetSampleLocationsEXT_EXT = 283,
|
||||
VK_COMMAND_TYPE_vkGetPhysicalDeviceMultisamplePropertiesEXT_EXT = 284,
|
||||
VK_COMMAND_TYPE_vkCreateAccelerationStructureKHR_EXT = 304,
|
||||
VK_COMMAND_TYPE_vkDestroyAccelerationStructureKHR_EXT = 305,
|
||||
VK_COMMAND_TYPE_vkCmdBuildAccelerationStructuresKHR_EXT = 306,
|
||||
VK_COMMAND_TYPE_vkCmdBuildAccelerationStructuresIndirectKHR_EXT = 307,
|
||||
VK_COMMAND_TYPE_vkBuildAccelerationStructuresKHR_EXT = 308,
|
||||
VK_COMMAND_TYPE_vkCopyAccelerationStructureKHR_EXT = 309,
|
||||
VK_COMMAND_TYPE_vkCopyAccelerationStructureToMemoryKHR_EXT = 310,
|
||||
VK_COMMAND_TYPE_vkCopyMemoryToAccelerationStructureKHR_EXT = 311,
|
||||
VK_COMMAND_TYPE_vkWriteAccelerationStructuresPropertiesKHR_EXT = 312,
|
||||
VK_COMMAND_TYPE_vkCmdCopyAccelerationStructureKHR_EXT = 313,
|
||||
VK_COMMAND_TYPE_vkCmdCopyAccelerationStructureToMemoryKHR_EXT = 314,
|
||||
VK_COMMAND_TYPE_vkCmdCopyMemoryToAccelerationStructureKHR_EXT = 315,
|
||||
VK_COMMAND_TYPE_vkGetAccelerationStructureDeviceAddressKHR_EXT = 316,
|
||||
VK_COMMAND_TYPE_vkCmdWriteAccelerationStructuresPropertiesKHR_EXT = 317,
|
||||
VK_COMMAND_TYPE_vkGetDeviceAccelerationStructureCompatibilityKHR_EXT = 318,
|
||||
VK_COMMAND_TYPE_vkGetAccelerationStructureBuildSizesKHR_EXT = 319,
|
||||
VK_COMMAND_TYPE_vkCmdTraceRaysKHR_EXT = 320,
|
||||
VK_COMMAND_TYPE_vkCreateRayTracingPipelinesKHR_EXT = 321,
|
||||
VK_COMMAND_TYPE_vkGetRayTracingShaderGroupHandlesKHR_EXT = 322,
|
||||
VK_COMMAND_TYPE_vkGetRayTracingShaderGroupHandlesNV_EXT = 322,
|
||||
VK_COMMAND_TYPE_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_EXT = 323,
|
||||
VK_COMMAND_TYPE_vkCmdTraceRaysIndirectKHR_EXT = 324,
|
||||
VK_COMMAND_TYPE_vkGetRayTracingShaderGroupStackSizeKHR_EXT = 325,
|
||||
VK_COMMAND_TYPE_vkCmdSetRayTracingPipelineStackSizeKHR_EXT = 326,
|
||||
VK_COMMAND_TYPE_vkGetImageDrmFormatModifierPropertiesEXT_EXT = 187,
|
||||
VK_COMMAND_TYPE_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR_EXT = 235,
|
||||
VK_COMMAND_TYPE_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_EXT = 235,
|
||||
|
|
@ -385,10 +409,16 @@ typedef enum VkCommandTypeEXT {
|
|||
VK_COMMAND_TYPE_vkGetCalibratedTimestampsEXT_EXT = 236,
|
||||
VK_COMMAND_TYPE_vkGetPhysicalDeviceFragmentShadingRatesKHR_EXT = 277,
|
||||
VK_COMMAND_TYPE_vkCmdSetFragmentShadingRateKHR_EXT = 278,
|
||||
VK_COMMAND_TYPE_vkCreateDeferredOperationKHR_EXT = 299,
|
||||
VK_COMMAND_TYPE_vkDestroyDeferredOperationKHR_EXT = 300,
|
||||
VK_COMMAND_TYPE_vkGetDeferredOperationMaxConcurrencyKHR_EXT = 301,
|
||||
VK_COMMAND_TYPE_vkGetDeferredOperationResultKHR_EXT = 302,
|
||||
VK_COMMAND_TYPE_vkDeferredOperationJoinKHR_EXT = 303,
|
||||
VK_COMMAND_TYPE_vkCmdSetVertexInputEXT_EXT = 255,
|
||||
VK_COMMAND_TYPE_vkCmdSetPatchControlPointsEXT_EXT = 233,
|
||||
VK_COMMAND_TYPE_vkCmdSetLogicOpEXT_EXT = 234,
|
||||
VK_COMMAND_TYPE_vkCmdSetColorWriteEnableEXT_EXT = 254,
|
||||
VK_COMMAND_TYPE_vkCmdTraceRaysIndirect2KHR_EXT = 327,
|
||||
VK_COMMAND_TYPE_vkCmdDrawMultiEXT_EXT = 247,
|
||||
VK_COMMAND_TYPE_vkCmdDrawMultiIndexedEXT_EXT = 248,
|
||||
VK_COMMAND_TYPE_vkCmdSetDepthClampEnableEXT_EXT = 257,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -631,6 +631,52 @@ vn_decode_VkSamplerYcbcrConversion(struct vn_cs_decoder *dec, VkSamplerYcbcrConv
|
|||
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
|
||||
}
|
||||
|
||||
/* VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureKHR(const VkAccelerationStructureKHR *val)
|
||||
{
|
||||
return sizeof(uint64_t);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureKHR *val)
|
||||
{
|
||||
const uint64_t id = vn_cs_handle_load_id((const void **)val, VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR);
|
||||
vn_encode_uint64_t(enc, &id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkAccelerationStructureKHR(struct vn_cs_decoder *dec, VkAccelerationStructureKHR *val)
|
||||
{
|
||||
uint64_t id;
|
||||
vn_decode_uint64_t(dec, &id);
|
||||
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR);
|
||||
}
|
||||
|
||||
/* VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkDeferredOperationKHR(const VkDeferredOperationKHR *val)
|
||||
{
|
||||
return sizeof(uint64_t);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkDeferredOperationKHR(struct vn_cs_encoder *enc, const VkDeferredOperationKHR *val)
|
||||
{
|
||||
const uint64_t id = vn_cs_handle_load_id((const void **)val, VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR);
|
||||
vn_encode_uint64_t(enc, &id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkDeferredOperationKHR(struct vn_cs_decoder *dec, VkDeferredOperationKHR *val)
|
||||
{
|
||||
uint64_t id;
|
||||
vn_decode_uint64_t(dec, &id);
|
||||
vn_cs_handle_store_id((void **)val, id, VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR);
|
||||
}
|
||||
|
||||
/* VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot) */
|
||||
|
||||
static inline size_t
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ struct vn_info_extension {
|
|||
};
|
||||
|
||||
/* sorted by extension names for bsearch */
|
||||
static const uint32_t _vn_info_extension_count = 156;
|
||||
static const struct vn_info_extension _vn_info_extensions[156] = {
|
||||
static const uint32_t _vn_info_extension_count = 162;
|
||||
static const struct vn_info_extension _vn_info_extensions[162] = {
|
||||
{ "VK_ARM_rasterization_order_attachment_access", 343, 1 },
|
||||
{ "VK_EXT_4444_formats", 341, 1 },
|
||||
{ "VK_EXT_attachment_feedback_loop_layout", 340, 2 },
|
||||
|
|
@ -99,6 +99,7 @@ static const struct vn_info_extension _vn_info_extensions[156] = {
|
|||
{ "VK_EXT_ycbcr_image_arrays", 253, 1 },
|
||||
{ "VK_KHR_16bit_storage", 84, 1 },
|
||||
{ "VK_KHR_8bit_storage", 178, 1 },
|
||||
{ "VK_KHR_acceleration_structure", 151, 13 },
|
||||
{ "VK_KHR_bind_memory2", 158, 1 },
|
||||
{ "VK_KHR_buffer_device_address", 258, 1 },
|
||||
{ "VK_KHR_calibrated_timestamps", 544, 1 },
|
||||
|
|
@ -106,6 +107,7 @@ static const struct vn_info_extension _vn_info_extensions[156] = {
|
|||
{ "VK_KHR_copy_commands2", 338, 1 },
|
||||
{ "VK_KHR_create_renderpass2", 110, 1 },
|
||||
{ "VK_KHR_dedicated_allocation", 128, 3 },
|
||||
{ "VK_KHR_deferred_host_operations", 269, 4 },
|
||||
{ "VK_KHR_depth_clamp_zero_one", 605, 1 },
|
||||
{ "VK_KHR_depth_stencil_resolve", 200, 1 },
|
||||
{ "VK_KHR_descriptor_update_template", 86, 1 },
|
||||
|
|
@ -145,6 +147,10 @@ static const struct vn_info_extension _vn_info_extensions[156] = {
|
|||
{ "VK_KHR_multiview", 54, 1 },
|
||||
{ "VK_KHR_pipeline_library", 291, 1 },
|
||||
{ "VK_KHR_push_descriptor", 81, 2 },
|
||||
{ "VK_KHR_ray_query", 349, 1 },
|
||||
{ "VK_KHR_ray_tracing_maintenance1", 387, 1 },
|
||||
{ "VK_KHR_ray_tracing_pipeline", 348, 1 },
|
||||
{ "VK_KHR_ray_tracing_position_fetch", 482, 1 },
|
||||
{ "VK_KHR_relaxed_block_layout", 145, 1 },
|
||||
{ "VK_KHR_sampler_mirror_clamp_to_edge", 15, 3 },
|
||||
{ "VK_KHR_sampler_ycbcr_conversion", 157, 14 },
|
||||
|
|
|
|||
|
|
@ -2828,6 +2828,304 @@ vn_encode_VkComputePipelineCreateInfo(struct vn_cs_encoder *enc, const VkCompute
|
|||
vn_encode_VkComputePipelineCreateInfo_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkRayTracingShaderGroupCreateInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingShaderGroupCreateInfoKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingShaderGroupCreateInfoKHR_self(const VkRayTracingShaderGroupCreateInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkRayTracingShaderGroupTypeKHR(&val->type);
|
||||
size += vn_sizeof_uint32_t(&val->generalShader);
|
||||
size += vn_sizeof_uint32_t(&val->closestHitShader);
|
||||
size += vn_sizeof_uint32_t(&val->anyHitShader);
|
||||
size += vn_sizeof_uint32_t(&val->intersectionShader);
|
||||
size += vn_sizeof_simple_pointer(val->pShaderGroupCaptureReplayHandle);
|
||||
if (val->pShaderGroupCaptureReplayHandle)
|
||||
assert(false);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingShaderGroupCreateInfoKHR(const VkRayTracingShaderGroupCreateInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkRayTracingShaderGroupCreateInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkRayTracingShaderGroupCreateInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingShaderGroupCreateInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingShaderGroupCreateInfoKHR_self(struct vn_cs_encoder *enc, const VkRayTracingShaderGroupCreateInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkRayTracingShaderGroupTypeKHR(enc, &val->type);
|
||||
vn_encode_uint32_t(enc, &val->generalShader);
|
||||
vn_encode_uint32_t(enc, &val->closestHitShader);
|
||||
vn_encode_uint32_t(enc, &val->anyHitShader);
|
||||
vn_encode_uint32_t(enc, &val->intersectionShader);
|
||||
if (vn_encode_simple_pointer(enc, val->pShaderGroupCaptureReplayHandle))
|
||||
assert(false);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingShaderGroupCreateInfoKHR(struct vn_cs_encoder *enc, const VkRayTracingShaderGroupCreateInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR });
|
||||
vn_encode_VkRayTracingShaderGroupCreateInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkRayTracingShaderGroupCreateInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkRayTracingPipelineInterfaceCreateInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingPipelineInterfaceCreateInfoKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingPipelineInterfaceCreateInfoKHR_self(const VkRayTracingPipelineInterfaceCreateInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_uint32_t(&val->maxPipelineRayPayloadSize);
|
||||
size += vn_sizeof_uint32_t(&val->maxPipelineRayHitAttributeSize);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingPipelineInterfaceCreateInfoKHR(const VkRayTracingPipelineInterfaceCreateInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkRayTracingPipelineInterfaceCreateInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkRayTracingPipelineInterfaceCreateInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingPipelineInterfaceCreateInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingPipelineInterfaceCreateInfoKHR_self(struct vn_cs_encoder *enc, const VkRayTracingPipelineInterfaceCreateInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_uint32_t(enc, &val->maxPipelineRayPayloadSize);
|
||||
vn_encode_uint32_t(enc, &val->maxPipelineRayHitAttributeSize);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingPipelineInterfaceCreateInfoKHR(struct vn_cs_encoder *enc, const VkRayTracingPipelineInterfaceCreateInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR });
|
||||
vn_encode_VkRayTracingPipelineInterfaceCreateInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkRayTracingPipelineInterfaceCreateInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkRayTracingPipelineCreateInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingPipelineCreateInfoKHR_pnext(const void *val)
|
||||
{
|
||||
const VkBaseInStructure *pnext = val;
|
||||
size_t size = 0;
|
||||
|
||||
while (pnext) {
|
||||
switch ((int32_t)pnext->sType) {
|
||||
case VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO:
|
||||
if (!vn_cs_renderer_protocol_has_extension(471 /* VK_KHR_maintenance5 */))
|
||||
break;
|
||||
size += vn_sizeof_simple_pointer(pnext);
|
||||
size += vn_sizeof_VkStructureType(&pnext->sType);
|
||||
size += vn_sizeof_VkRayTracingPipelineCreateInfoKHR_pnext(pnext->pNext);
|
||||
size += vn_sizeof_VkPipelineCreateFlags2CreateInfo_self((const VkPipelineCreateFlags2CreateInfo *)pnext);
|
||||
return size;
|
||||
case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
|
||||
if (!vn_cs_renderer_protocol_has_extension(193 /* VK_EXT_pipeline_creation_feedback */))
|
||||
break;
|
||||
size += vn_sizeof_simple_pointer(pnext);
|
||||
size += vn_sizeof_VkStructureType(&pnext->sType);
|
||||
size += vn_sizeof_VkRayTracingPipelineCreateInfoKHR_pnext(pnext->pNext);
|
||||
size += vn_sizeof_VkPipelineCreationFeedbackCreateInfo_self((const VkPipelineCreationFeedbackCreateInfo *)pnext);
|
||||
return size;
|
||||
case VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO:
|
||||
if (!vn_cs_renderer_protocol_has_extension(69 /* VK_EXT_pipeline_robustness */))
|
||||
break;
|
||||
size += vn_sizeof_simple_pointer(pnext);
|
||||
size += vn_sizeof_VkStructureType(&pnext->sType);
|
||||
size += vn_sizeof_VkRayTracingPipelineCreateInfoKHR_pnext(pnext->pNext);
|
||||
size += vn_sizeof_VkPipelineRobustnessCreateInfo_self((const VkPipelineRobustnessCreateInfo *)pnext);
|
||||
return size;
|
||||
default:
|
||||
/* ignore unknown/unsupported struct */
|
||||
break;
|
||||
}
|
||||
pnext = pnext->pNext;
|
||||
}
|
||||
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingPipelineCreateInfoKHR_self(const VkRayTracingPipelineCreateInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkFlags(&val->flags);
|
||||
size += vn_sizeof_uint32_t(&val->stageCount);
|
||||
if (val->pStages) {
|
||||
size += vn_sizeof_array_size(val->stageCount);
|
||||
for (uint32_t i = 0; i < val->stageCount; i++)
|
||||
size += vn_sizeof_VkPipelineShaderStageCreateInfo(&val->pStages[i]);
|
||||
} else {
|
||||
size += vn_sizeof_array_size(0);
|
||||
}
|
||||
size += vn_sizeof_uint32_t(&val->groupCount);
|
||||
if (val->pGroups) {
|
||||
size += vn_sizeof_array_size(val->groupCount);
|
||||
for (uint32_t i = 0; i < val->groupCount; i++)
|
||||
size += vn_sizeof_VkRayTracingShaderGroupCreateInfoKHR(&val->pGroups[i]);
|
||||
} else {
|
||||
size += vn_sizeof_array_size(0);
|
||||
}
|
||||
size += vn_sizeof_uint32_t(&val->maxPipelineRayRecursionDepth);
|
||||
size += vn_sizeof_simple_pointer(val->pLibraryInfo);
|
||||
if (val->pLibraryInfo)
|
||||
size += vn_sizeof_VkPipelineLibraryCreateInfoKHR(val->pLibraryInfo);
|
||||
size += vn_sizeof_simple_pointer(val->pLibraryInterface);
|
||||
if (val->pLibraryInterface)
|
||||
size += vn_sizeof_VkRayTracingPipelineInterfaceCreateInfoKHR(val->pLibraryInterface);
|
||||
size += vn_sizeof_simple_pointer(val->pDynamicState);
|
||||
if (val->pDynamicState)
|
||||
size += vn_sizeof_VkPipelineDynamicStateCreateInfo(val->pDynamicState);
|
||||
size += vn_sizeof_VkPipelineLayout(&val->layout);
|
||||
size += vn_sizeof_VkPipeline(&val->basePipelineHandle);
|
||||
size += vn_sizeof_int32_t(&val->basePipelineIndex);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingPipelineCreateInfoKHR(const VkRayTracingPipelineCreateInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkRayTracingPipelineCreateInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkRayTracingPipelineCreateInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
const VkBaseInStructure *pnext = val;
|
||||
|
||||
while (pnext) {
|
||||
switch ((int32_t)pnext->sType) {
|
||||
case VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO:
|
||||
if (!vn_cs_renderer_protocol_has_extension(471 /* VK_KHR_maintenance5 */))
|
||||
break;
|
||||
vn_encode_simple_pointer(enc, pnext);
|
||||
vn_encode_VkStructureType(enc, &pnext->sType);
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_pnext(enc, pnext->pNext);
|
||||
vn_encode_VkPipelineCreateFlags2CreateInfo_self(enc, (const VkPipelineCreateFlags2CreateInfo *)pnext);
|
||||
return;
|
||||
case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO:
|
||||
if (!vn_cs_renderer_protocol_has_extension(193 /* VK_EXT_pipeline_creation_feedback */))
|
||||
break;
|
||||
vn_encode_simple_pointer(enc, pnext);
|
||||
vn_encode_VkStructureType(enc, &pnext->sType);
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_pnext(enc, pnext->pNext);
|
||||
vn_encode_VkPipelineCreationFeedbackCreateInfo_self(enc, (const VkPipelineCreationFeedbackCreateInfo *)pnext);
|
||||
return;
|
||||
case VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO:
|
||||
if (!vn_cs_renderer_protocol_has_extension(69 /* VK_EXT_pipeline_robustness */))
|
||||
break;
|
||||
vn_encode_simple_pointer(enc, pnext);
|
||||
vn_encode_VkStructureType(enc, &pnext->sType);
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_pnext(enc, pnext->pNext);
|
||||
vn_encode_VkPipelineRobustnessCreateInfo_self(enc, (const VkPipelineRobustnessCreateInfo *)pnext);
|
||||
return;
|
||||
default:
|
||||
/* ignore unknown/unsupported struct */
|
||||
break;
|
||||
}
|
||||
pnext = pnext->pNext;
|
||||
}
|
||||
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_self(struct vn_cs_encoder *enc, const VkRayTracingPipelineCreateInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkFlags(enc, &val->flags);
|
||||
vn_encode_uint32_t(enc, &val->stageCount);
|
||||
if (val->pStages) {
|
||||
vn_encode_array_size(enc, val->stageCount);
|
||||
for (uint32_t i = 0; i < val->stageCount; i++)
|
||||
vn_encode_VkPipelineShaderStageCreateInfo(enc, &val->pStages[i]);
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
vn_encode_uint32_t(enc, &val->groupCount);
|
||||
if (val->pGroups) {
|
||||
vn_encode_array_size(enc, val->groupCount);
|
||||
for (uint32_t i = 0; i < val->groupCount; i++)
|
||||
vn_encode_VkRayTracingShaderGroupCreateInfoKHR(enc, &val->pGroups[i]);
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
vn_encode_uint32_t(enc, &val->maxPipelineRayRecursionDepth);
|
||||
if (vn_encode_simple_pointer(enc, val->pLibraryInfo))
|
||||
vn_encode_VkPipelineLibraryCreateInfoKHR(enc, val->pLibraryInfo);
|
||||
if (vn_encode_simple_pointer(enc, val->pLibraryInterface))
|
||||
vn_encode_VkRayTracingPipelineInterfaceCreateInfoKHR(enc, val->pLibraryInterface);
|
||||
if (vn_encode_simple_pointer(enc, val->pDynamicState))
|
||||
vn_encode_VkPipelineDynamicStateCreateInfo(enc, val->pDynamicState);
|
||||
vn_encode_VkPipelineLayout(enc, &val->layout);
|
||||
vn_encode_VkPipeline(enc, &val->basePipelineHandle);
|
||||
vn_encode_int32_t(enc, &val->basePipelineIndex);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR(struct vn_cs_encoder *enc, const VkRayTracingPipelineCreateInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR });
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateGraphicsPipelines_EXT;
|
||||
|
|
@ -3091,6 +3389,328 @@ static inline void vn_decode_vkDestroyPipeline_reply(struct vn_cs_decoder *dec,
|
|||
/* skip pAllocator */
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingShaderGroupHandlesKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkPipeline(&pipeline);
|
||||
cmd_size += vn_sizeof_uint32_t(&firstGroup);
|
||||
cmd_size += vn_sizeof_uint32_t(&groupCount);
|
||||
cmd_size += vn_sizeof_size_t(&dataSize);
|
||||
cmd_size += vn_sizeof_simple_pointer(pData); /* out */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkGetRayTracingShaderGroupHandlesKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingShaderGroupHandlesKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkPipeline(enc, &pipeline);
|
||||
vn_encode_uint32_t(enc, &firstGroup);
|
||||
vn_encode_uint32_t(enc, &groupCount);
|
||||
vn_encode_size_t(enc, &dataSize);
|
||||
vn_encode_array_size(enc, pData ? dataSize : 0); /* out */
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetRayTracingShaderGroupHandlesKHR_reply(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingShaderGroupHandlesKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkResult ret;
|
||||
cmd_size += vn_sizeof_VkResult(&ret);
|
||||
/* skip device */
|
||||
/* skip pipeline */
|
||||
/* skip firstGroup */
|
||||
/* skip groupCount */
|
||||
/* skip dataSize */
|
||||
if (pData) {
|
||||
cmd_size += vn_sizeof_array_size(dataSize);
|
||||
cmd_size += vn_sizeof_blob_array(pData, dataSize);
|
||||
} else {
|
||||
cmd_size += vn_sizeof_array_size(0);
|
||||
}
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkResult vn_decode_vkGetRayTracingShaderGroupHandlesKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkGetRayTracingShaderGroupHandlesKHR_EXT);
|
||||
|
||||
VkResult ret;
|
||||
vn_decode_VkResult(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip pipeline */
|
||||
/* skip firstGroup */
|
||||
/* skip groupCount */
|
||||
/* skip dataSize */
|
||||
if (vn_peek_array_size(dec)) {
|
||||
const size_t array_size = vn_decode_array_size(dec, dataSize);
|
||||
vn_decode_blob_array(dec, pData, array_size);
|
||||
} else {
|
||||
vn_decode_array_size_unchecked(dec);
|
||||
pData = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkPipeline(&pipeline);
|
||||
cmd_size += vn_sizeof_uint32_t(&firstGroup);
|
||||
cmd_size += vn_sizeof_uint32_t(&groupCount);
|
||||
cmd_size += vn_sizeof_size_t(&dataSize);
|
||||
cmd_size += vn_sizeof_simple_pointer(pData); /* out */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkPipeline(enc, &pipeline);
|
||||
vn_encode_uint32_t(enc, &firstGroup);
|
||||
vn_encode_uint32_t(enc, &groupCount);
|
||||
vn_encode_size_t(enc, &dataSize);
|
||||
vn_encode_array_size(enc, pData ? dataSize : 0); /* out */
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_reply(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkResult ret;
|
||||
cmd_size += vn_sizeof_VkResult(&ret);
|
||||
/* skip device */
|
||||
/* skip pipeline */
|
||||
/* skip firstGroup */
|
||||
/* skip groupCount */
|
||||
/* skip dataSize */
|
||||
if (pData) {
|
||||
cmd_size += vn_sizeof_array_size(dataSize);
|
||||
cmd_size += vn_sizeof_blob_array(pData, dataSize);
|
||||
} else {
|
||||
cmd_size += vn_sizeof_array_size(0);
|
||||
}
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkResult vn_decode_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_EXT);
|
||||
|
||||
VkResult ret;
|
||||
vn_decode_VkResult(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip pipeline */
|
||||
/* skip firstGroup */
|
||||
/* skip groupCount */
|
||||
/* skip dataSize */
|
||||
if (vn_peek_array_size(dec)) {
|
||||
const size_t array_size = vn_decode_array_size(dec, dataSize);
|
||||
vn_decode_blob_array(dec, pData, array_size);
|
||||
} else {
|
||||
vn_decode_array_size_unchecked(dec);
|
||||
pData = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateRayTracingPipelinesKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(&deferredOperation);
|
||||
cmd_size += vn_sizeof_VkPipelineCache(&pipelineCache);
|
||||
cmd_size += vn_sizeof_uint32_t(&createInfoCount);
|
||||
if (pCreateInfos) {
|
||||
cmd_size += vn_sizeof_array_size(createInfoCount);
|
||||
for (uint32_t i = 0; i < createInfoCount; i++)
|
||||
cmd_size += vn_sizeof_VkRayTracingPipelineCreateInfoKHR(&pCreateInfos[i]);
|
||||
} else {
|
||||
cmd_size += vn_sizeof_array_size(0);
|
||||
}
|
||||
cmd_size += vn_sizeof_simple_pointer(pAllocator);
|
||||
if (pAllocator)
|
||||
assert(false);
|
||||
if (pPipelines) {
|
||||
cmd_size += vn_sizeof_array_size(createInfoCount);
|
||||
for (uint32_t i = 0; i < createInfoCount; i++)
|
||||
cmd_size += vn_sizeof_VkPipeline(&pPipelines[i]);
|
||||
} else {
|
||||
cmd_size += vn_sizeof_array_size(0);
|
||||
}
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkCreateRayTracingPipelinesKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateRayTracingPipelinesKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkDeferredOperationKHR(enc, &deferredOperation);
|
||||
vn_encode_VkPipelineCache(enc, &pipelineCache);
|
||||
vn_encode_uint32_t(enc, &createInfoCount);
|
||||
if (pCreateInfos) {
|
||||
vn_encode_array_size(enc, createInfoCount);
|
||||
for (uint32_t i = 0; i < createInfoCount; i++)
|
||||
vn_encode_VkRayTracingPipelineCreateInfoKHR(enc, &pCreateInfos[i]);
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
if (vn_encode_simple_pointer(enc, pAllocator))
|
||||
assert(false);
|
||||
if (pPipelines) {
|
||||
vn_encode_array_size(enc, createInfoCount);
|
||||
for (uint32_t i = 0; i < createInfoCount; i++)
|
||||
vn_encode_VkPipeline(enc, &pPipelines[i]);
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkCreateRayTracingPipelinesKHR_reply(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateRayTracingPipelinesKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkResult ret;
|
||||
cmd_size += vn_sizeof_VkResult(&ret);
|
||||
/* skip device */
|
||||
/* skip deferredOperation */
|
||||
/* skip pipelineCache */
|
||||
/* skip createInfoCount */
|
||||
/* skip pCreateInfos */
|
||||
/* skip pAllocator */
|
||||
if (pPipelines) {
|
||||
cmd_size += vn_sizeof_array_size(createInfoCount);
|
||||
for (uint32_t i = 0; i < createInfoCount; i++)
|
||||
cmd_size += vn_sizeof_VkPipeline(&pPipelines[i]);
|
||||
} else {
|
||||
cmd_size += vn_sizeof_array_size(0);
|
||||
}
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkResult vn_decode_vkCreateRayTracingPipelinesKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkCreateRayTracingPipelinesKHR_EXT);
|
||||
|
||||
VkResult ret;
|
||||
vn_decode_VkResult(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip deferredOperation */
|
||||
/* skip pipelineCache */
|
||||
/* skip createInfoCount */
|
||||
/* skip pCreateInfos */
|
||||
/* skip pAllocator */
|
||||
if (vn_peek_array_size(dec)) {
|
||||
const uint32_t iter_count = vn_decode_array_size(dec, createInfoCount);
|
||||
for (uint32_t i = 0; i < iter_count; i++)
|
||||
vn_decode_VkPipeline(dec, &pPipelines[i]);
|
||||
} else {
|
||||
vn_decode_array_size_unchecked(dec);
|
||||
pPipelines = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingShaderGroupStackSizeKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkPipeline(&pipeline);
|
||||
cmd_size += vn_sizeof_uint32_t(&group);
|
||||
cmd_size += vn_sizeof_VkShaderGroupShaderKHR(&groupShader);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkGetRayTracingShaderGroupStackSizeKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingShaderGroupStackSizeKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkPipeline(enc, &pipeline);
|
||||
vn_encode_uint32_t(enc, &group);
|
||||
vn_encode_VkShaderGroupShaderKHR(enc, &groupShader);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetRayTracingShaderGroupStackSizeKHR_reply(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetRayTracingShaderGroupStackSizeKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkDeviceSize ret;
|
||||
cmd_size += vn_sizeof_VkDeviceSize(&ret);
|
||||
/* skip device */
|
||||
/* skip pipeline */
|
||||
/* skip group */
|
||||
/* skip groupShader */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkDeviceSize vn_decode_vkGetRayTracingShaderGroupStackSizeKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkGetRayTracingShaderGroupStackSizeKHR_EXT);
|
||||
|
||||
VkDeviceSize ret;
|
||||
vn_decode_VkDeviceSize(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip pipeline */
|
||||
/* skip group */
|
||||
/* skip groupShader */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkCreateGraphicsPipelines(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
|
|
@ -3154,6 +3774,90 @@ static inline void vn_submit_vkDestroyPipeline(struct vn_ring *vn_ring, VkComman
|
|||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkGetRayTracingShaderGroupHandlesKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkGetRayTracingShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetRayTracingShaderGroupHandlesKHR_reply(device, pipeline, firstGroup, groupCount, dataSize, pData) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkGetRayTracingShaderGroupHandlesKHR(enc, cmd_flags, device, pipeline, firstGroup, groupCount, dataSize, pData);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_reply(device, pipeline, firstGroup, groupCount, dataSize, pData) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(enc, cmd_flags, device, pipeline, firstGroup, groupCount, dataSize, pData);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkCreateRayTracingPipelinesKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateRayTracingPipelinesKHR_reply(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkCreateRayTracingPipelinesKHR(enc, cmd_flags, device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkGetRayTracingShaderGroupStackSizeKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkGetRayTracingShaderGroupStackSizeKHR(device, pipeline, group, groupShader);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetRayTracingShaderGroupStackSizeKHR_reply(device, pipeline, group, groupShader) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkGetRayTracingShaderGroupStackSizeKHR(enc, cmd_flags, device, pipeline, group, groupShader);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkCreateGraphicsPipelines(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
|
@ -3217,4 +3921,92 @@ static inline void vn_async_vkDestroyPipeline(struct vn_ring *vn_ring, VkDevice
|
|||
vn_submit_vkDestroyPipeline(vn_ring, 0, device, pipeline, pAllocator, &submit);
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkGetRayTracingShaderGroupHandlesKHR(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetRayTracingShaderGroupHandlesKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipeline, firstGroup, groupCount, dataSize, pData, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkResult ret = vn_decode_vkGetRayTracingShaderGroupHandlesKHR_reply(dec, device, pipeline, firstGroup, groupCount, dataSize, pData);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkGetRayTracingShaderGroupHandlesKHR(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetRayTracingShaderGroupHandlesKHR(vn_ring, 0, device, pipeline, firstGroup, groupCount, dataSize, pData, &submit);
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipeline, firstGroup, groupCount, dataSize, pData, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkResult ret = vn_decode_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_reply(dec, device, pipeline, firstGroup, groupCount, dataSize, pData);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(vn_ring, 0, device, pipeline, firstGroup, groupCount, dataSize, pData, &submit);
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkCreateRayTracingPipelinesKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkCreateRayTracingPipelinesKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkResult ret = vn_decode_vkCreateRayTracingPipelinesKHR_reply(dec, device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkCreateRayTracingPipelinesKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkCreateRayTracingPipelinesKHR(vn_ring, 0, device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
|
||||
}
|
||||
|
||||
static inline VkDeviceSize vn_call_vkGetRayTracingShaderGroupStackSizeKHR(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetRayTracingShaderGroupStackSizeKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipeline, group, groupShader, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkDeviceSize ret = vn_decode_vkGetRayTracingShaderGroupStackSizeKHR_reply(dec, device, pipeline, group, groupShader);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkGetRayTracingShaderGroupStackSizeKHR(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetRayTracingShaderGroupStackSizeKHR(vn_ring, 0, device, pipeline, group, groupShader, &submit);
|
||||
}
|
||||
|
||||
#endif /* VN_PROTOCOL_DRIVER_PIPELINE_H */
|
||||
|
|
|
|||
|
|
@ -1181,8 +1181,9 @@ vn_encode_VkPipelineLayoutCreateInfo(struct vn_cs_encoder *enc, const VkPipeline
|
|||
/* union VkClearColorValue */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkClearColorValue_tag(const VkClearColorValue *val, uint32_t tag)
|
||||
vn_sizeof_VkClearColorValue(const VkClearColorValue *val)
|
||||
{
|
||||
static const uint32_t tag = 2; /* union with default tag */
|
||||
size_t size = vn_sizeof_uint32_t(&tag);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
|
|
@ -1204,15 +1205,10 @@ vn_sizeof_VkClearColorValue_tag(const VkClearColorValue *val, uint32_t tag)
|
|||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkClearColorValue(const VkClearColorValue *val)
|
||||
{
|
||||
return vn_sizeof_VkClearColorValue_tag(val, 2);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkClearColorValue_tag(struct vn_cs_encoder *enc, const VkClearColorValue *val, uint32_t tag)
|
||||
vn_encode_VkClearColorValue(struct vn_cs_encoder *enc, const VkClearColorValue *val)
|
||||
{
|
||||
static const uint32_t tag = 2; /* union with default tag */
|
||||
vn_encode_uint32_t(enc, &tag);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
|
|
@ -1233,12 +1229,6 @@ vn_encode_VkClearColorValue_tag(struct vn_cs_encoder *enc, const VkClearColorVal
|
|||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkClearColorValue(struct vn_cs_encoder *enc, const VkClearColorValue *val)
|
||||
{
|
||||
vn_encode_VkClearColorValue_tag(enc, val, 2); /* union with default tag */
|
||||
}
|
||||
|
||||
/* struct VkMutableDescriptorTypeListEXT */
|
||||
|
||||
static inline size_t
|
||||
|
|
@ -1439,6 +1429,73 @@ vn_encode_VkWriteDescriptorSetInlineUniformBlock(struct vn_cs_encoder *enc, cons
|
|||
vn_encode_VkWriteDescriptorSetInlineUniformBlock_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkWriteDescriptorSetAccelerationStructureKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkWriteDescriptorSetAccelerationStructureKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkWriteDescriptorSetAccelerationStructureKHR_self(const VkWriteDescriptorSetAccelerationStructureKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_uint32_t(&val->accelerationStructureCount);
|
||||
if (val->pAccelerationStructures) {
|
||||
size += vn_sizeof_array_size(val->accelerationStructureCount);
|
||||
for (uint32_t i = 0; i < val->accelerationStructureCount; i++)
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->pAccelerationStructures[i]);
|
||||
} else {
|
||||
size += vn_sizeof_array_size(0);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkWriteDescriptorSetAccelerationStructureKHR(const VkWriteDescriptorSetAccelerationStructureKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkWriteDescriptorSetAccelerationStructureKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkWriteDescriptorSetAccelerationStructureKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkWriteDescriptorSetAccelerationStructureKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkWriteDescriptorSetAccelerationStructureKHR_self(struct vn_cs_encoder *enc, const VkWriteDescriptorSetAccelerationStructureKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_uint32_t(enc, &val->accelerationStructureCount);
|
||||
if (val->pAccelerationStructures) {
|
||||
vn_encode_array_size(enc, val->accelerationStructureCount);
|
||||
for (uint32_t i = 0; i < val->accelerationStructureCount; i++)
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->pAccelerationStructures[i]);
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkWriteDescriptorSetAccelerationStructureKHR(struct vn_cs_encoder *enc, const VkWriteDescriptorSetAccelerationStructureKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR });
|
||||
vn_encode_VkWriteDescriptorSetAccelerationStructureKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkWriteDescriptorSetAccelerationStructureKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkWriteDescriptorSet chain */
|
||||
|
||||
static inline size_t
|
||||
|
|
@ -1457,6 +1514,14 @@ vn_sizeof_VkWriteDescriptorSet_pnext(const void *val)
|
|||
size += vn_sizeof_VkWriteDescriptorSet_pnext(pnext->pNext);
|
||||
size += vn_sizeof_VkWriteDescriptorSetInlineUniformBlock_self((const VkWriteDescriptorSetInlineUniformBlock *)pnext);
|
||||
return size;
|
||||
case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
|
||||
if (!vn_cs_renderer_protocol_has_extension(151 /* VK_KHR_acceleration_structure */))
|
||||
break;
|
||||
size += vn_sizeof_simple_pointer(pnext);
|
||||
size += vn_sizeof_VkStructureType(&pnext->sType);
|
||||
size += vn_sizeof_VkWriteDescriptorSet_pnext(pnext->pNext);
|
||||
size += vn_sizeof_VkWriteDescriptorSetAccelerationStructureKHR_self((const VkWriteDescriptorSetAccelerationStructureKHR *)pnext);
|
||||
return size;
|
||||
default:
|
||||
/* ignore unknown/unsupported struct */
|
||||
break;
|
||||
|
|
@ -1528,6 +1593,14 @@ vn_encode_VkWriteDescriptorSet_pnext(struct vn_cs_encoder *enc, const void *val)
|
|||
vn_encode_VkWriteDescriptorSet_pnext(enc, pnext->pNext);
|
||||
vn_encode_VkWriteDescriptorSetInlineUniformBlock_self(enc, (const VkWriteDescriptorSetInlineUniformBlock *)pnext);
|
||||
return;
|
||||
case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
|
||||
if (!vn_cs_renderer_protocol_has_extension(151 /* VK_KHR_acceleration_structure */))
|
||||
break;
|
||||
vn_encode_simple_pointer(enc, pnext);
|
||||
vn_encode_VkStructureType(enc, &pnext->sType);
|
||||
vn_encode_VkWriteDescriptorSet_pnext(enc, pnext->pNext);
|
||||
vn_encode_VkWriteDescriptorSetAccelerationStructureKHR_self(enc, (const VkWriteDescriptorSetAccelerationStructureKHR *)pnext);
|
||||
return;
|
||||
default:
|
||||
/* ignore unknown/unsupported struct */
|
||||
break;
|
||||
|
|
@ -2007,6 +2080,657 @@ vn_encode_VkMemoryBarrier2(struct vn_cs_encoder *enc, const VkMemoryBarrier2 *va
|
|||
vn_encode_VkMemoryBarrier2_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkCopyAccelerationStructureInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureInfoKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureInfoKHR_self(const VkCopyAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->src);
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->dst);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureModeKHR(&val->mode);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureInfoKHR_self(struct vn_cs_encoder *enc, const VkCopyAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->src);
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->dst);
|
||||
vn_encode_VkCopyAccelerationStructureModeKHR(enc, &val->mode);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureInfoKHR(struct vn_cs_encoder *enc, const VkCopyAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR });
|
||||
vn_encode_VkCopyAccelerationStructureInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkCopyAccelerationStructureInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* union VkDeviceOrHostAddressKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkDeviceOrHostAddressKHR(const VkDeviceOrHostAddressKHR *val)
|
||||
{
|
||||
static const uint32_t tag = 0; /* union with default tag */
|
||||
size_t size = vn_sizeof_uint32_t(&tag);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
size += vn_sizeof_VkDeviceAddress(&val->deviceAddress);
|
||||
break;
|
||||
case 1:
|
||||
size += vn_sizeof_simple_pointer(val->hostAddress);
|
||||
if (val->hostAddress)
|
||||
assert(false);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkDeviceOrHostAddressKHR(struct vn_cs_encoder *enc, const VkDeviceOrHostAddressKHR *val)
|
||||
{
|
||||
static const uint32_t tag = 0; /* union with default tag */
|
||||
vn_encode_uint32_t(enc, &tag);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
vn_encode_VkDeviceAddress(enc, &val->deviceAddress);
|
||||
break;
|
||||
case 1:
|
||||
if (vn_encode_simple_pointer(enc, val->hostAddress))
|
||||
assert(false);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* struct VkCopyAccelerationStructureToMemoryInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureToMemoryInfoKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureToMemoryInfoKHR_self(const VkCopyAccelerationStructureToMemoryInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->src);
|
||||
size += vn_sizeof_VkDeviceOrHostAddressKHR(&val->dst);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureModeKHR(&val->mode);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureToMemoryInfoKHR(const VkCopyAccelerationStructureToMemoryInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureToMemoryInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureToMemoryInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureToMemoryInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureToMemoryInfoKHR_self(struct vn_cs_encoder *enc, const VkCopyAccelerationStructureToMemoryInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->src);
|
||||
vn_encode_VkDeviceOrHostAddressKHR(enc, &val->dst);
|
||||
vn_encode_VkCopyAccelerationStructureModeKHR(enc, &val->mode);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureToMemoryInfoKHR(struct vn_cs_encoder *enc, const VkCopyAccelerationStructureToMemoryInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR });
|
||||
vn_encode_VkCopyAccelerationStructureToMemoryInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkCopyAccelerationStructureToMemoryInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* union VkDeviceOrHostAddressConstKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkDeviceOrHostAddressConstKHR(const VkDeviceOrHostAddressConstKHR *val)
|
||||
{
|
||||
static const uint32_t tag = 0; /* union with default tag */
|
||||
size_t size = vn_sizeof_uint32_t(&tag);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
size += vn_sizeof_VkDeviceAddress(&val->deviceAddress);
|
||||
break;
|
||||
case 1:
|
||||
size += vn_sizeof_simple_pointer(val->hostAddress);
|
||||
if (val->hostAddress)
|
||||
assert(false);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(struct vn_cs_encoder *enc, const VkDeviceOrHostAddressConstKHR *val)
|
||||
{
|
||||
static const uint32_t tag = 0; /* union with default tag */
|
||||
vn_encode_uint32_t(enc, &tag);
|
||||
switch (tag) {
|
||||
case 0:
|
||||
vn_encode_VkDeviceAddress(enc, &val->deviceAddress);
|
||||
break;
|
||||
case 1:
|
||||
if (vn_encode_simple_pointer(enc, val->hostAddress))
|
||||
assert(false);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* struct VkCopyMemoryToAccelerationStructureInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyMemoryToAccelerationStructureInfoKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyMemoryToAccelerationStructureInfoKHR_self(const VkCopyMemoryToAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkDeviceOrHostAddressConstKHR(&val->src);
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->dst);
|
||||
size += vn_sizeof_VkCopyAccelerationStructureModeKHR(&val->mode);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyMemoryToAccelerationStructureInfoKHR(const VkCopyMemoryToAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkCopyMemoryToAccelerationStructureInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkCopyMemoryToAccelerationStructureInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyMemoryToAccelerationStructureInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyMemoryToAccelerationStructureInfoKHR_self(struct vn_cs_encoder *enc, const VkCopyMemoryToAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(enc, &val->src);
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->dst);
|
||||
vn_encode_VkCopyAccelerationStructureModeKHR(enc, &val->mode);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyMemoryToAccelerationStructureInfoKHR(struct vn_cs_encoder *enc, const VkCopyMemoryToAccelerationStructureInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR });
|
||||
vn_encode_VkCopyMemoryToAccelerationStructureInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkCopyMemoryToAccelerationStructureInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkAccelerationStructureGeometryTrianglesDataKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryTrianglesDataKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryTrianglesDataKHR_self(const VkAccelerationStructureGeometryTrianglesDataKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkFormat(&val->vertexFormat);
|
||||
size += vn_sizeof_VkDeviceOrHostAddressConstKHR(&val->vertexData);
|
||||
size += vn_sizeof_VkDeviceSize(&val->vertexStride);
|
||||
size += vn_sizeof_uint32_t(&val->maxVertex);
|
||||
size += vn_sizeof_VkIndexType(&val->indexType);
|
||||
size += vn_sizeof_VkDeviceOrHostAddressConstKHR(&val->indexData);
|
||||
size += vn_sizeof_VkDeviceOrHostAddressConstKHR(&val->transformData);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryTrianglesDataKHR(const VkAccelerationStructureGeometryTrianglesDataKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryTrianglesDataKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryTrianglesDataKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryTrianglesDataKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryTrianglesDataKHR_self(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryTrianglesDataKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkFormat(enc, &val->vertexFormat);
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(enc, &val->vertexData);
|
||||
vn_encode_VkDeviceSize(enc, &val->vertexStride);
|
||||
vn_encode_uint32_t(enc, &val->maxVertex);
|
||||
vn_encode_VkIndexType(enc, &val->indexType);
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(enc, &val->indexData);
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(enc, &val->transformData);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryTrianglesDataKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryTrianglesDataKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR });
|
||||
vn_encode_VkAccelerationStructureGeometryTrianglesDataKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkAccelerationStructureGeometryTrianglesDataKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkAccelerationStructureGeometryAabbsDataKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryAabbsDataKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryAabbsDataKHR_self(const VkAccelerationStructureGeometryAabbsDataKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkDeviceOrHostAddressConstKHR(&val->data);
|
||||
size += vn_sizeof_VkDeviceSize(&val->stride);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryAabbsDataKHR(const VkAccelerationStructureGeometryAabbsDataKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryAabbsDataKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryAabbsDataKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryAabbsDataKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryAabbsDataKHR_self(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryAabbsDataKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(enc, &val->data);
|
||||
vn_encode_VkDeviceSize(enc, &val->stride);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryAabbsDataKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryAabbsDataKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR });
|
||||
vn_encode_VkAccelerationStructureGeometryAabbsDataKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkAccelerationStructureGeometryAabbsDataKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkAccelerationStructureGeometryInstancesDataKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryInstancesDataKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryInstancesDataKHR_self(const VkAccelerationStructureGeometryInstancesDataKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkBool32(&val->arrayOfPointers);
|
||||
size += vn_sizeof_VkDeviceOrHostAddressConstKHR(&val->data);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryInstancesDataKHR(const VkAccelerationStructureGeometryInstancesDataKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryInstancesDataKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryInstancesDataKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryInstancesDataKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryInstancesDataKHR_self(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryInstancesDataKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkBool32(enc, &val->arrayOfPointers);
|
||||
vn_encode_VkDeviceOrHostAddressConstKHR(enc, &val->data);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryInstancesDataKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryInstancesDataKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR });
|
||||
vn_encode_VkAccelerationStructureGeometryInstancesDataKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkAccelerationStructureGeometryInstancesDataKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* union VkAccelerationStructureGeometryDataKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryDataKHR(const VkAccelerationStructureGeometryDataKHR *val, VkGeometryTypeKHR tag)
|
||||
{
|
||||
size_t size = vn_sizeof_VkGeometryTypeKHR(&tag);
|
||||
switch (tag) {
|
||||
case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryTrianglesDataKHR(&val->triangles);
|
||||
break;
|
||||
case VK_GEOMETRY_TYPE_AABBS_KHR:
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryAabbsDataKHR(&val->aabbs);
|
||||
break;
|
||||
case VK_GEOMETRY_TYPE_INSTANCES_KHR:
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryInstancesDataKHR(&val->instances);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryDataKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryDataKHR *val, VkGeometryTypeKHR tag)
|
||||
{
|
||||
vn_encode_VkGeometryTypeKHR(enc, &tag);
|
||||
switch (tag) {
|
||||
case VK_GEOMETRY_TYPE_TRIANGLES_KHR:
|
||||
vn_encode_VkAccelerationStructureGeometryTrianglesDataKHR(enc, &val->triangles);
|
||||
break;
|
||||
case VK_GEOMETRY_TYPE_AABBS_KHR:
|
||||
vn_encode_VkAccelerationStructureGeometryAabbsDataKHR(enc, &val->aabbs);
|
||||
break;
|
||||
case VK_GEOMETRY_TYPE_INSTANCES_KHR:
|
||||
vn_encode_VkAccelerationStructureGeometryInstancesDataKHR(enc, &val->instances);
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* struct VkAccelerationStructureGeometryKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryKHR_self(const VkAccelerationStructureGeometryKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkGeometryTypeKHR(&val->geometryType);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryDataKHR(&val->geometry, val->geometryType);
|
||||
size += vn_sizeof_VkFlags(&val->flags);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureGeometryKHR(const VkAccelerationStructureGeometryKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryKHR_self(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkGeometryTypeKHR(enc, &val->geometryType);
|
||||
vn_encode_VkAccelerationStructureGeometryDataKHR(enc, &val->geometry, val->geometryType);
|
||||
vn_encode_VkFlags(enc, &val->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureGeometryKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureGeometryKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR });
|
||||
vn_encode_VkAccelerationStructureGeometryKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkAccelerationStructureGeometryKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkAccelerationStructureBuildGeometryInfoKHR chain */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureBuildGeometryInfoKHR_pnext(const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
return vn_sizeof_simple_pointer(NULL);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureBuildGeometryInfoKHR_self(const VkAccelerationStructureBuildGeometryInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
/* skip val->{sType,pNext} */
|
||||
size += vn_sizeof_VkAccelerationStructureTypeKHR(&val->type);
|
||||
size += vn_sizeof_VkFlags(&val->flags);
|
||||
size += vn_sizeof_VkBuildAccelerationStructureModeKHR(&val->mode);
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->srcAccelerationStructure);
|
||||
size += vn_sizeof_VkAccelerationStructureKHR(&val->dstAccelerationStructure);
|
||||
size += vn_sizeof_uint32_t(&val->geometryCount);
|
||||
if (val->pGeometries) {
|
||||
size += vn_sizeof_array_size(val->geometryCount);
|
||||
for (uint32_t i = 0; i < val->geometryCount; i++)
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryKHR(&val->pGeometries[i]);
|
||||
} else {
|
||||
size += vn_sizeof_array_size(0);
|
||||
}
|
||||
if (val->ppGeometries) {
|
||||
size += vn_sizeof_array_size(val->geometryCount);
|
||||
for (uint32_t i = 0; i < val->geometryCount; i++) {
|
||||
size += vn_sizeof_array_size(1);
|
||||
for (uint32_t j = 0; j < 1; j++)
|
||||
size += vn_sizeof_VkAccelerationStructureGeometryKHR(&val->ppGeometries[i][j]);
|
||||
}
|
||||
} else {
|
||||
size += vn_sizeof_array_size(0);
|
||||
}
|
||||
size += vn_sizeof_VkDeviceOrHostAddressKHR(&val->scratchData);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureBuildGeometryInfoKHR(const VkAccelerationStructureBuildGeometryInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
|
||||
size += vn_sizeof_VkStructureType(&val->sType);
|
||||
size += vn_sizeof_VkAccelerationStructureBuildGeometryInfoKHR_pnext(val->pNext);
|
||||
size += vn_sizeof_VkAccelerationStructureBuildGeometryInfoKHR_self(val);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureBuildGeometryInfoKHR_pnext(struct vn_cs_encoder *enc, const void *val)
|
||||
{
|
||||
/* no known/supported struct */
|
||||
vn_encode_simple_pointer(enc, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureBuildGeometryInfoKHR_self(struct vn_cs_encoder *enc, const VkAccelerationStructureBuildGeometryInfoKHR *val)
|
||||
{
|
||||
/* skip val->{sType,pNext} */
|
||||
vn_encode_VkAccelerationStructureTypeKHR(enc, &val->type);
|
||||
vn_encode_VkFlags(enc, &val->flags);
|
||||
vn_encode_VkBuildAccelerationStructureModeKHR(enc, &val->mode);
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->srcAccelerationStructure);
|
||||
vn_encode_VkAccelerationStructureKHR(enc, &val->dstAccelerationStructure);
|
||||
vn_encode_uint32_t(enc, &val->geometryCount);
|
||||
if (val->pGeometries) {
|
||||
vn_encode_array_size(enc, val->geometryCount);
|
||||
for (uint32_t i = 0; i < val->geometryCount; i++)
|
||||
vn_encode_VkAccelerationStructureGeometryKHR(enc, &val->pGeometries[i]);
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
if (val->ppGeometries) {
|
||||
vn_encode_array_size(enc, val->geometryCount);
|
||||
for (uint32_t i = 0; i < val->geometryCount; i++) {
|
||||
vn_encode_array_size(enc, 1);
|
||||
for (uint32_t j = 0; j < 1; j++)
|
||||
vn_encode_VkAccelerationStructureGeometryKHR(enc, &val->ppGeometries[i][j]);
|
||||
}
|
||||
} else {
|
||||
vn_encode_array_size(enc, 0);
|
||||
}
|
||||
vn_encode_VkDeviceOrHostAddressKHR(enc, &val->scratchData);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureBuildGeometryInfoKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureBuildGeometryInfoKHR *val)
|
||||
{
|
||||
assert(val->sType == VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR);
|
||||
vn_encode_VkStructureType(enc, &(VkStructureType){ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR });
|
||||
vn_encode_VkAccelerationStructureBuildGeometryInfoKHR_pnext(enc, val->pNext);
|
||||
vn_encode_VkAccelerationStructureBuildGeometryInfoKHR_self(enc, val);
|
||||
}
|
||||
|
||||
/* struct VkAccelerationStructureBuildRangeInfoKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureBuildRangeInfoKHR(const VkAccelerationStructureBuildRangeInfoKHR *val)
|
||||
{
|
||||
size_t size = 0;
|
||||
size += vn_sizeof_uint32_t(&val->primitiveCount);
|
||||
size += vn_sizeof_uint32_t(&val->primitiveOffset);
|
||||
size += vn_sizeof_uint32_t(&val->firstVertex);
|
||||
size += vn_sizeof_uint32_t(&val->transformOffset);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureBuildRangeInfoKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureBuildRangeInfoKHR *val)
|
||||
{
|
||||
vn_encode_uint32_t(enc, &val->primitiveCount);
|
||||
vn_encode_uint32_t(enc, &val->primitiveOffset);
|
||||
vn_encode_uint32_t(enc, &val->firstVertex);
|
||||
vn_encode_uint32_t(enc, &val->transformOffset);
|
||||
}
|
||||
|
||||
/* struct VkImageCopy2 chain */
|
||||
|
||||
static inline size_t
|
||||
|
|
|
|||
|
|
@ -275,6 +275,273 @@ vn_encode_VkRingCreateInfoMESA(struct vn_cs_encoder *enc, const VkRingCreateInfo
|
|||
vn_encode_VkRingCreateInfoMESA_self(enc, val);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkCreateDeferredOperationKHR(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateDeferredOperationKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_simple_pointer(pAllocator);
|
||||
if (pAllocator)
|
||||
assert(false);
|
||||
cmd_size += vn_sizeof_simple_pointer(pDeferredOperation);
|
||||
if (pDeferredOperation)
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(pDeferredOperation);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkCreateDeferredOperationKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateDeferredOperationKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
if (vn_encode_simple_pointer(enc, pAllocator))
|
||||
assert(false);
|
||||
if (vn_encode_simple_pointer(enc, pDeferredOperation))
|
||||
vn_encode_VkDeferredOperationKHR(enc, pDeferredOperation);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkCreateDeferredOperationKHR_reply(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkCreateDeferredOperationKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkResult ret;
|
||||
cmd_size += vn_sizeof_VkResult(&ret);
|
||||
/* skip device */
|
||||
/* skip pAllocator */
|
||||
cmd_size += vn_sizeof_simple_pointer(pDeferredOperation);
|
||||
if (pDeferredOperation)
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(pDeferredOperation);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkResult vn_decode_vkCreateDeferredOperationKHR_reply(struct vn_cs_decoder *dec, VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkCreateDeferredOperationKHR_EXT);
|
||||
|
||||
VkResult ret;
|
||||
vn_decode_VkResult(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip pAllocator */
|
||||
if (vn_decode_simple_pointer(dec)) {
|
||||
vn_decode_VkDeferredOperationKHR(dec, pDeferredOperation);
|
||||
} else {
|
||||
pDeferredOperation = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkDestroyDeferredOperationKHR(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDestroyDeferredOperationKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(&operation);
|
||||
cmd_size += vn_sizeof_simple_pointer(pAllocator);
|
||||
if (pAllocator)
|
||||
assert(false);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkDestroyDeferredOperationKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDestroyDeferredOperationKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkDeferredOperationKHR(enc, &operation);
|
||||
if (vn_encode_simple_pointer(enc, pAllocator))
|
||||
assert(false);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkDestroyDeferredOperationKHR_reply(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDestroyDeferredOperationKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
/* skip pAllocator */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_decode_vkDestroyDeferredOperationKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkDestroyDeferredOperationKHR_EXT);
|
||||
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
/* skip pAllocator */
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetDeferredOperationMaxConcurrencyKHR(VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetDeferredOperationMaxConcurrencyKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(&operation);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkGetDeferredOperationMaxConcurrencyKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetDeferredOperationMaxConcurrencyKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkDeferredOperationKHR(enc, &operation);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetDeferredOperationMaxConcurrencyKHR_reply(VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetDeferredOperationMaxConcurrencyKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
uint32_t ret;
|
||||
cmd_size += vn_sizeof_uint32_t(&ret);
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline uint32_t vn_decode_vkGetDeferredOperationMaxConcurrencyKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkGetDeferredOperationMaxConcurrencyKHR_EXT);
|
||||
|
||||
uint32_t ret;
|
||||
vn_decode_uint32_t(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetDeferredOperationResultKHR(VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetDeferredOperationResultKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(&operation);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkGetDeferredOperationResultKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetDeferredOperationResultKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkDeferredOperationKHR(enc, &operation);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkGetDeferredOperationResultKHR_reply(VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkGetDeferredOperationResultKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkResult ret;
|
||||
cmd_size += vn_sizeof_VkResult(&ret);
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkResult vn_decode_vkGetDeferredOperationResultKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkGetDeferredOperationResultKHR_EXT);
|
||||
|
||||
VkResult ret;
|
||||
vn_decode_VkResult(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkDeferredOperationJoinKHR(VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDeferredOperationJoinKHR_EXT;
|
||||
const VkFlags cmd_flags = 0;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type) + vn_sizeof_VkFlags(&cmd_flags);
|
||||
|
||||
cmd_size += vn_sizeof_VkDevice(&device);
|
||||
cmd_size += vn_sizeof_VkDeferredOperationKHR(&operation);
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline void vn_encode_vkDeferredOperationJoinKHR(struct vn_cs_encoder *enc, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDeferredOperationJoinKHR_EXT;
|
||||
|
||||
vn_encode_VkCommandTypeEXT(enc, &cmd_type);
|
||||
vn_encode_VkFlags(enc, &cmd_flags);
|
||||
|
||||
vn_encode_VkDevice(enc, &device);
|
||||
vn_encode_VkDeferredOperationKHR(enc, &operation);
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkDeferredOperationJoinKHR_reply(VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkDeferredOperationJoinKHR_EXT;
|
||||
size_t cmd_size = vn_sizeof_VkCommandTypeEXT(&cmd_type);
|
||||
|
||||
VkResult ret;
|
||||
cmd_size += vn_sizeof_VkResult(&ret);
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
|
||||
return cmd_size;
|
||||
}
|
||||
|
||||
static inline VkResult vn_decode_vkDeferredOperationJoinKHR_reply(struct vn_cs_decoder *dec, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
VkCommandTypeEXT command_type;
|
||||
vn_decode_VkCommandTypeEXT(dec, &command_type);
|
||||
assert(command_type == VK_COMMAND_TYPE_vkDeferredOperationJoinKHR_EXT);
|
||||
|
||||
VkResult ret;
|
||||
vn_decode_VkResult(dec, &ret);
|
||||
/* skip device */
|
||||
/* skip operation */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline size_t vn_sizeof_vkSetReplyCommandStreamMESA(const VkCommandStreamDescriptionMESA* pStream)
|
||||
{
|
||||
const VkCommandTypeEXT cmd_type = VK_COMMAND_TYPE_vkSetReplyCommandStreamMESA_EXT;
|
||||
|
|
@ -763,6 +1030,111 @@ static inline void vn_decode_vkWaitRingSeqnoMESA_reply(struct vn_cs_decoder *dec
|
|||
/* skip seqno */
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkCreateDeferredOperationKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkCreateDeferredOperationKHR(device, pAllocator, pDeferredOperation);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateDeferredOperationKHR_reply(device, pAllocator, pDeferredOperation) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkCreateDeferredOperationKHR(enc, cmd_flags, device, pAllocator, pDeferredOperation);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkDestroyDeferredOperationKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkDestroyDeferredOperationKHR(device, operation, pAllocator);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyDeferredOperationKHR_reply(device, operation, pAllocator) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkDestroyDeferredOperationKHR(enc, cmd_flags, device, operation, pAllocator);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkGetDeferredOperationMaxConcurrencyKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkGetDeferredOperationMaxConcurrencyKHR(device, operation);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeferredOperationMaxConcurrencyKHR_reply(device, operation) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkGetDeferredOperationMaxConcurrencyKHR(enc, cmd_flags, device, operation);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkGetDeferredOperationResultKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkGetDeferredOperationResultKHR(device, operation);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeferredOperationResultKHR_reply(device, operation) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkGetDeferredOperationResultKHR(enc, cmd_flags, device, operation);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkDeferredOperationJoinKHR(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeferredOperationKHR operation, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
void *cmd_data = local_cmd_data;
|
||||
size_t cmd_size = vn_sizeof_vkDeferredOperationJoinKHR(device, operation);
|
||||
if (cmd_size > sizeof(local_cmd_data)) {
|
||||
cmd_data = malloc(cmd_size);
|
||||
if (!cmd_data)
|
||||
cmd_size = 0;
|
||||
}
|
||||
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDeferredOperationJoinKHR_reply(device, operation) : 0;
|
||||
|
||||
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
|
||||
if (cmd_size) {
|
||||
vn_encode_vkDeferredOperationJoinKHR(enc, cmd_flags, device, operation);
|
||||
vn_ring_submit_command(vn_ring, submit);
|
||||
if (cmd_data != local_cmd_data)
|
||||
free(cmd_data);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_submit_vkSetReplyCommandStreamMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, const VkCommandStreamDescriptionMESA* pStream, struct vn_ring_submit_command *submit)
|
||||
{
|
||||
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
|
||||
|
|
@ -973,6 +1345,113 @@ static inline void vn_submit_vkWaitRingSeqnoMESA(struct vn_ring *vn_ring, VkComm
|
|||
}
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkCreateDeferredOperationKHR(struct vn_ring *vn_ring, VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkCreateDeferredOperationKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pAllocator, pDeferredOperation, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkResult ret = vn_decode_vkCreateDeferredOperationKHR_reply(dec, device, pAllocator, pDeferredOperation);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkCreateDeferredOperationKHR(struct vn_ring *vn_ring, VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkCreateDeferredOperationKHR(vn_ring, 0, device, pAllocator, pDeferredOperation, &submit);
|
||||
}
|
||||
|
||||
static inline void vn_call_vkDestroyDeferredOperationKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkDestroyDeferredOperationKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, operation, pAllocator, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
vn_decode_vkDestroyDeferredOperationKHR_reply(dec, device, operation, pAllocator);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkDestroyDeferredOperationKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkDestroyDeferredOperationKHR(vn_ring, 0, device, operation, pAllocator, &submit);
|
||||
}
|
||||
|
||||
static inline uint32_t vn_call_vkGetDeferredOperationMaxConcurrencyKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetDeferredOperationMaxConcurrencyKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, operation, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const uint32_t ret = vn_decode_vkGetDeferredOperationMaxConcurrencyKHR_reply(dec, device, operation);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkGetDeferredOperationMaxConcurrencyKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetDeferredOperationMaxConcurrencyKHR(vn_ring, 0, device, operation, &submit);
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkGetDeferredOperationResultKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetDeferredOperationResultKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, operation, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkResult ret = vn_decode_vkGetDeferredOperationResultKHR_reply(dec, device, operation);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkGetDeferredOperationResultKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkGetDeferredOperationResultKHR(vn_ring, 0, device, operation, &submit);
|
||||
}
|
||||
|
||||
static inline VkResult vn_call_vkDeferredOperationJoinKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkDeferredOperationJoinKHR(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, operation, &submit);
|
||||
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
|
||||
if (dec) {
|
||||
const VkResult ret = vn_decode_vkDeferredOperationJoinKHR_reply(dec, device, operation);
|
||||
vn_ring_free_command_reply(vn_ring, &submit);
|
||||
return ret;
|
||||
} else {
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vn_async_vkDeferredOperationJoinKHR(struct vn_ring *vn_ring, VkDevice device, VkDeferredOperationKHR operation)
|
||||
{
|
||||
struct vn_ring_submit_command submit;
|
||||
vn_submit_vkDeferredOperationJoinKHR(vn_ring, 0, device, operation, &submit);
|
||||
}
|
||||
|
||||
static inline void vn_call_vkSetReplyCommandStreamMESA(struct vn_ring *vn_ring, const VkCommandStreamDescriptionMESA* pStream)
|
||||
{
|
||||
VN_TRACE_FUNC();
|
||||
|
|
|
|||
|
|
@ -1675,6 +1675,90 @@ vn_decode_VkSubgroupFeatureFlagBits(struct vn_cs_decoder *dec, VkSubgroupFeature
|
|||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkGeometryFlagBitsKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkGeometryFlagBitsKHR(const VkGeometryFlagBitsKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkGeometryFlagBitsKHR(struct vn_cs_encoder *enc, const VkGeometryFlagBitsKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkGeometryFlagBitsKHR(struct vn_cs_decoder *dec, VkGeometryFlagBitsKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkGeometryInstanceFlagBitsKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkGeometryInstanceFlagBitsKHR(const VkGeometryInstanceFlagBitsKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkGeometryInstanceFlagBitsKHR(struct vn_cs_encoder *enc, const VkGeometryInstanceFlagBitsKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkGeometryInstanceFlagBitsKHR(struct vn_cs_decoder *dec, VkGeometryInstanceFlagBitsKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkBuildAccelerationStructureFlagBitsKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkBuildAccelerationStructureFlagBitsKHR(const VkBuildAccelerationStructureFlagBitsKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkBuildAccelerationStructureFlagBitsKHR(struct vn_cs_encoder *enc, const VkBuildAccelerationStructureFlagBitsKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkBuildAccelerationStructureFlagBitsKHR(struct vn_cs_decoder *dec, VkBuildAccelerationStructureFlagBitsKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkAccelerationStructureCreateFlagBitsKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureCreateFlagBitsKHR(const VkAccelerationStructureCreateFlagBitsKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureCreateFlagBitsKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureCreateFlagBitsKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkAccelerationStructureCreateFlagBitsKHR(struct vn_cs_decoder *dec, VkAccelerationStructureCreateFlagBitsKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkPipelineCreationFeedbackFlagBits */
|
||||
|
||||
static inline size_t
|
||||
|
|
@ -3223,6 +3307,192 @@ vn_decode_VkSemaphoreType(struct vn_cs_decoder *dec, VkSemaphoreType *val)
|
|||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkBuildAccelerationStructureModeKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkBuildAccelerationStructureModeKHR(const VkBuildAccelerationStructureModeKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkBuildAccelerationStructureModeKHR(struct vn_cs_encoder *enc, const VkBuildAccelerationStructureModeKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkBuildAccelerationStructureModeKHR(struct vn_cs_decoder *dec, VkBuildAccelerationStructureModeKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkCopyAccelerationStructureModeKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkCopyAccelerationStructureModeKHR(const VkCopyAccelerationStructureModeKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkCopyAccelerationStructureModeKHR(struct vn_cs_encoder *enc, const VkCopyAccelerationStructureModeKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkCopyAccelerationStructureModeKHR(struct vn_cs_decoder *dec, VkCopyAccelerationStructureModeKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkAccelerationStructureTypeKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureTypeKHR(const VkAccelerationStructureTypeKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureTypeKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureTypeKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkAccelerationStructureTypeKHR(struct vn_cs_decoder *dec, VkAccelerationStructureTypeKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkGeometryTypeKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkGeometryTypeKHR(const VkGeometryTypeKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkGeometryTypeKHR(struct vn_cs_encoder *enc, const VkGeometryTypeKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkGeometryTypeKHR(struct vn_cs_decoder *dec, VkGeometryTypeKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkRayTracingShaderGroupTypeKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkRayTracingShaderGroupTypeKHR(const VkRayTracingShaderGroupTypeKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkRayTracingShaderGroupTypeKHR(struct vn_cs_encoder *enc, const VkRayTracingShaderGroupTypeKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkRayTracingShaderGroupTypeKHR(struct vn_cs_decoder *dec, VkRayTracingShaderGroupTypeKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkAccelerationStructureBuildTypeKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureBuildTypeKHR(const VkAccelerationStructureBuildTypeKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureBuildTypeKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureBuildTypeKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkAccelerationStructureBuildTypeKHR(struct vn_cs_decoder *dec, VkAccelerationStructureBuildTypeKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkAccelerationStructureCompatibilityKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureCompatibilityKHR(const VkAccelerationStructureCompatibilityKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureCompatibilityKHR(struct vn_cs_encoder *enc, const VkAccelerationStructureCompatibilityKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkAccelerationStructureCompatibilityKHR(struct vn_cs_decoder *dec, VkAccelerationStructureCompatibilityKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkAccelerationStructureCompatibilityKHR_array(const VkAccelerationStructureCompatibilityKHR *val, uint32_t count)
|
||||
{
|
||||
return vn_sizeof_int32_t_array((const int32_t *)val, count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkAccelerationStructureCompatibilityKHR_array(struct vn_cs_encoder *enc, const VkAccelerationStructureCompatibilityKHR *val, uint32_t count)
|
||||
{
|
||||
vn_encode_int32_t_array(enc, (const int32_t *)val, count);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkAccelerationStructureCompatibilityKHR_array(struct vn_cs_decoder *dec, VkAccelerationStructureCompatibilityKHR *val, uint32_t count)
|
||||
{
|
||||
vn_decode_int32_t_array(dec, (int32_t *)val, count);
|
||||
}
|
||||
|
||||
/* enum VkShaderGroupShaderKHR */
|
||||
|
||||
static inline size_t
|
||||
vn_sizeof_VkShaderGroupShaderKHR(const VkShaderGroupShaderKHR *val)
|
||||
{
|
||||
assert(sizeof(*val) == sizeof(int32_t));
|
||||
return vn_sizeof_int32_t((const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_encode_VkShaderGroupShaderKHR(struct vn_cs_encoder *enc, const VkShaderGroupShaderKHR *val)
|
||||
{
|
||||
vn_encode_int32_t(enc, (const int32_t *)val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
vn_decode_VkShaderGroupShaderKHR(struct vn_cs_decoder *dec, VkShaderGroupShaderKHR *val)
|
||||
{
|
||||
vn_decode_int32_t(dec, (int32_t *)val);
|
||||
}
|
||||
|
||||
/* enum VkLineRasterizationMode */
|
||||
|
||||
static inline size_t
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue