venus: switch to vn_ring as the protocol interface - part 3

Sync protocol and fix all the interfaces, otherwise we have to generate
two sets of headers with both interfaces to separate protocol sync and
the driver side adaptation.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26179>
This commit is contained in:
Yiwei Zhang 2023-11-09 20:18:37 -08:00 committed by Marge Bot
parent 5943f70c7a
commit b170c1a391
42 changed files with 3140 additions and 3187 deletions

View file

@ -1,4 +1,4 @@
/* This file is generated by venus-protocol git-625e3d92. */
/* This file is generated by venus-protocol git-bfa3ebfb. */
/*
* Copyright 2020 Google LLC

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_BUFFER_H
#define VN_PROTOCOL_DRIVER_BUFFER_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkExternalMemoryBufferCreateInfo chain */
@ -1083,7 +1083,7 @@ static inline VkDeviceAddress vn_decode_vkGetBufferDeviceAddress_reply(struct vn
return ret;
}
static inline void vn_submit_vkGetBufferMemoryRequirements(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetBufferMemoryRequirements(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1095,16 +1095,16 @@ static inline void vn_submit_vkGetBufferMemoryRequirements(struct vn_instance *v
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetBufferMemoryRequirements_reply(device, buffer, pMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetBufferMemoryRequirements(enc, cmd_flags, device, buffer, pMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkBindBufferMemory(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkBindBufferMemory(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1116,16 +1116,16 @@ static inline void vn_submit_vkBindBufferMemory(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkBindBufferMemory_reply(device, buffer, memory, memoryOffset) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkBindBufferMemory(enc, cmd_flags, device, buffer, memory, memoryOffset);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkCreateBuffer(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateBuffer(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1137,16 +1137,16 @@ static inline void vn_submit_vkCreateBuffer(struct vn_instance *vn_instance, VkC
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateBuffer_reply(device, pCreateInfo, pAllocator, pBuffer) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateBuffer(enc, cmd_flags, device, pCreateInfo, pAllocator, pBuffer);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyBuffer(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyBuffer(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1158,16 +1158,16 @@ static inline void vn_submit_vkDestroyBuffer(struct vn_instance *vn_instance, Vk
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyBuffer_reply(device, buffer, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyBuffer(enc, cmd_flags, device, buffer, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkBindBufferMemory2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkBindBufferMemory2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1179,16 +1179,16 @@ static inline void vn_submit_vkBindBufferMemory2(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkBindBufferMemory2_reply(device, bindInfoCount, pBindInfos) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkBindBufferMemory2(enc, cmd_flags, device, bindInfoCount, pBindInfos);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetBufferMemoryRequirements2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetBufferMemoryRequirements2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1200,16 +1200,16 @@ static inline void vn_submit_vkGetBufferMemoryRequirements2(struct vn_instance *
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetBufferMemoryRequirements2_reply(device, pInfo, pMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetBufferMemoryRequirements2(enc, cmd_flags, device, pInfo, pMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetDeviceBufferMemoryRequirements(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetDeviceBufferMemoryRequirements(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1221,16 +1221,16 @@ static inline void vn_submit_vkGetDeviceBufferMemoryRequirements(struct vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeviceBufferMemoryRequirements_reply(device, pInfo, pMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetDeviceBufferMemoryRequirements(enc, cmd_flags, device, pInfo, pMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetBufferOpaqueCaptureAddress(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferDeviceAddressInfo* pInfo, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetBufferOpaqueCaptureAddress(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferDeviceAddressInfo* pInfo, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1242,16 +1242,16 @@ static inline void vn_submit_vkGetBufferOpaqueCaptureAddress(struct vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetBufferOpaqueCaptureAddress_reply(device, pInfo) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetBufferOpaqueCaptureAddress(enc, cmd_flags, device, pInfo);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetBufferDeviceAddress(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferDeviceAddressInfo* pInfo, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetBufferDeviceAddress(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferDeviceAddressInfo* pInfo, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1263,199 +1263,199 @@ static inline void vn_submit_vkGetBufferDeviceAddress(struct vn_instance *vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetBufferDeviceAddress_reply(device, pInfo) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetBufferDeviceAddress(enc, cmd_flags, device, pInfo);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_call_vkGetBufferMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements)
static inline void vn_call_vkGetBufferMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, buffer, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, buffer, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetBufferMemoryRequirements_reply(dec, device, buffer, pMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetBufferMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements)
static inline void vn_async_vkGetBufferMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements(vn_instance, 0, device, buffer, pMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements(vn_ring, 0, device, buffer, pMemoryRequirements, &submit);
}
static inline VkResult vn_call_vkBindBufferMemory(struct vn_instance *vn_instance, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
static inline VkResult vn_call_vkBindBufferMemory(struct vn_ring *vn_ring, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkBindBufferMemory(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, buffer, memory, memoryOffset, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindBufferMemory(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, buffer, memory, memoryOffset, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkBindBufferMemory_reply(dec, device, buffer, memory, memoryOffset);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkBindBufferMemory(struct vn_instance *vn_instance, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
static inline void vn_async_vkBindBufferMemory(struct vn_ring *vn_ring, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset)
{
struct vn_instance_submit_command submit;
vn_submit_vkBindBufferMemory(vn_instance, 0, device, buffer, memory, memoryOffset, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindBufferMemory(vn_ring, 0, device, buffer, memory, memoryOffset, &submit);
}
static inline VkResult vn_call_vkCreateBuffer(struct vn_instance *vn_instance, VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer)
static inline VkResult vn_call_vkCreateBuffer(struct vn_ring *vn_ring, VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateBuffer(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pBuffer, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateBuffer(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pBuffer, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateBuffer_reply(dec, device, pCreateInfo, pAllocator, pBuffer);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateBuffer(struct vn_instance *vn_instance, VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer)
static inline void vn_async_vkCreateBuffer(struct vn_ring *vn_ring, VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateBuffer(vn_instance, 0, device, pCreateInfo, pAllocator, pBuffer, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateBuffer(vn_ring, 0, device, pCreateInfo, pAllocator, pBuffer, &submit);
}
static inline void vn_call_vkDestroyBuffer(struct vn_instance *vn_instance, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyBuffer(struct vn_ring *vn_ring, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyBuffer(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, buffer, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyBuffer(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, buffer, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyBuffer_reply(dec, device, buffer, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyBuffer(struct vn_instance *vn_instance, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyBuffer(struct vn_ring *vn_ring, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyBuffer(vn_instance, 0, device, buffer, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyBuffer(vn_ring, 0, device, buffer, pAllocator, &submit);
}
static inline VkResult vn_call_vkBindBufferMemory2(struct vn_instance *vn_instance, VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos)
static inline VkResult vn_call_vkBindBufferMemory2(struct vn_ring *vn_ring, VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkBindBufferMemory2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, bindInfoCount, pBindInfos, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindBufferMemory2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, bindInfoCount, pBindInfos, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkBindBufferMemory2_reply(dec, device, bindInfoCount, pBindInfos);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkBindBufferMemory2(struct vn_instance *vn_instance, VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos)
static inline void vn_async_vkBindBufferMemory2(struct vn_ring *vn_ring, VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos)
{
struct vn_instance_submit_command submit;
vn_submit_vkBindBufferMemory2(vn_instance, 0, device, bindInfoCount, pBindInfos, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindBufferMemory2(vn_ring, 0, device, bindInfoCount, pBindInfos, &submit);
}
static inline void vn_call_vkGetBufferMemoryRequirements2(struct vn_instance *vn_instance, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_call_vkGetBufferMemoryRequirements2(struct vn_ring *vn_ring, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetBufferMemoryRequirements2_reply(dec, device, pInfo, pMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetBufferMemoryRequirements2(struct vn_instance *vn_instance, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_async_vkGetBufferMemoryRequirements2(struct vn_ring *vn_ring, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements2(vn_instance, 0, device, pInfo, pMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferMemoryRequirements2(vn_ring, 0, device, pInfo, pMemoryRequirements, &submit);
}
static inline void vn_call_vkGetDeviceBufferMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_call_vkGetDeviceBufferMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceBufferMemoryRequirements(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceBufferMemoryRequirements(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetDeviceBufferMemoryRequirements_reply(dec, device, pInfo, pMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetDeviceBufferMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_async_vkGetDeviceBufferMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceBufferMemoryRequirements(vn_instance, 0, device, pInfo, pMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceBufferMemoryRequirements(vn_ring, 0, device, pInfo, pMemoryRequirements, &submit);
}
static inline uint64_t vn_call_vkGetBufferOpaqueCaptureAddress(struct vn_instance *vn_instance, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
static inline uint64_t vn_call_vkGetBufferOpaqueCaptureAddress(struct vn_ring *vn_ring, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferOpaqueCaptureAddress(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferOpaqueCaptureAddress(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const uint64_t ret = vn_decode_vkGetBufferOpaqueCaptureAddress_reply(dec, device, pInfo);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetBufferOpaqueCaptureAddress(struct vn_instance *vn_instance, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
static inline void vn_async_vkGetBufferOpaqueCaptureAddress(struct vn_ring *vn_ring, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferOpaqueCaptureAddress(vn_instance, 0, device, pInfo, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferOpaqueCaptureAddress(vn_ring, 0, device, pInfo, &submit);
}
static inline VkDeviceAddress vn_call_vkGetBufferDeviceAddress(struct vn_instance *vn_instance, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
static inline VkDeviceAddress vn_call_vkGetBufferDeviceAddress(struct vn_ring *vn_ring, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferDeviceAddress(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferDeviceAddress(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkDeviceAddress ret = vn_decode_vkGetBufferDeviceAddress_reply(dec, device, pInfo);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetBufferDeviceAddress(struct vn_instance *vn_instance, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
static inline void vn_async_vkGetBufferDeviceAddress(struct vn_ring *vn_ring, VkDevice device, const VkBufferDeviceAddressInfo* pInfo)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetBufferDeviceAddress(vn_instance, 0, device, pInfo, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetBufferDeviceAddress(vn_ring, 0, device, pInfo, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_BUFFER_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_BUFFER_VIEW_H
#define VN_PROTOCOL_DRIVER_BUFFER_VIEW_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkBufferViewCreateInfo chain */
@ -196,7 +196,7 @@ static inline void vn_decode_vkDestroyBufferView_reply(struct vn_cs_decoder *dec
/* skip pAllocator */
}
static inline void vn_submit_vkCreateBufferView(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateBufferView(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -208,16 +208,16 @@ static inline void vn_submit_vkCreateBufferView(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateBufferView_reply(device, pCreateInfo, pAllocator, pView) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateBufferView(enc, cmd_flags, device, pCreateInfo, pAllocator, pView);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyBufferView(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyBufferView(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -229,54 +229,54 @@ static inline void vn_submit_vkDestroyBufferView(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyBufferView_reply(device, bufferView, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyBufferView(enc, cmd_flags, device, bufferView, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateBufferView(struct vn_instance *vn_instance, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView)
static inline VkResult vn_call_vkCreateBufferView(struct vn_ring *vn_ring, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateBufferView(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pView, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateBufferView(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pView, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateBufferView_reply(dec, device, pCreateInfo, pAllocator, pView);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateBufferView(struct vn_instance *vn_instance, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView)
static inline void vn_async_vkCreateBufferView(struct vn_ring *vn_ring, VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateBufferView(vn_instance, 0, device, pCreateInfo, pAllocator, pView, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateBufferView(vn_ring, 0, device, pCreateInfo, pAllocator, pView, &submit);
}
static inline void vn_call_vkDestroyBufferView(struct vn_instance *vn_instance, VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyBufferView(struct vn_ring *vn_ring, VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyBufferView(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, bufferView, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyBufferView(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, bufferView, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyBufferView_reply(dec, device, bufferView, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyBufferView(struct vn_instance *vn_instance, VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyBufferView(struct vn_ring *vn_ring, VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyBufferView(vn_instance, 0, device, bufferView, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyBufferView(vn_ring, 0, device, bufferView, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_BUFFER_VIEW_H */

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_COMMAND_POOL_H
#define VN_PROTOCOL_DRIVER_COMMAND_POOL_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkCommandPoolCreateInfo chain */
@ -292,7 +292,7 @@ static inline void vn_decode_vkTrimCommandPool_reply(struct vn_cs_decoder *dec,
/* skip flags */
}
static inline void vn_submit_vkCreateCommandPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateCommandPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -304,16 +304,16 @@ static inline void vn_submit_vkCreateCommandPool(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateCommandPool_reply(device, pCreateInfo, pAllocator, pCommandPool) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateCommandPool(enc, cmd_flags, device, pCreateInfo, pAllocator, pCommandPool);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyCommandPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyCommandPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -325,16 +325,16 @@ static inline void vn_submit_vkDestroyCommandPool(struct vn_instance *vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyCommandPool_reply(device, commandPool, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyCommandPool(enc, cmd_flags, device, commandPool, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkResetCommandPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkResetCommandPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -346,16 +346,16 @@ static inline void vn_submit_vkResetCommandPool(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetCommandPool_reply(device, commandPool, flags) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkResetCommandPool(enc, cmd_flags, device, commandPool, flags);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkTrimCommandPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkTrimCommandPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -367,95 +367,95 @@ static inline void vn_submit_vkTrimCommandPool(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkTrimCommandPool_reply(device, commandPool, flags) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkTrimCommandPool(enc, cmd_flags, device, commandPool, flags);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateCommandPool(struct vn_instance *vn_instance, VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
static inline VkResult vn_call_vkCreateCommandPool(struct vn_ring *vn_ring, VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateCommandPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pCommandPool, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateCommandPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pCommandPool, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateCommandPool_reply(dec, device, pCreateInfo, pAllocator, pCommandPool);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateCommandPool(struct vn_instance *vn_instance, VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
static inline void vn_async_vkCreateCommandPool(struct vn_ring *vn_ring, VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateCommandPool(vn_instance, 0, device, pCreateInfo, pAllocator, pCommandPool, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateCommandPool(vn_ring, 0, device, pCreateInfo, pAllocator, pCommandPool, &submit);
}
static inline void vn_call_vkDestroyCommandPool(struct vn_instance *vn_instance, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyCommandPool(struct vn_ring *vn_ring, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyCommandPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, commandPool, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyCommandPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, commandPool, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyCommandPool_reply(dec, device, commandPool, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyCommandPool(struct vn_instance *vn_instance, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyCommandPool(struct vn_ring *vn_ring, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyCommandPool(vn_instance, 0, device, commandPool, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyCommandPool(vn_ring, 0, device, commandPool, pAllocator, &submit);
}
static inline VkResult vn_call_vkResetCommandPool(struct vn_instance *vn_instance, VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
static inline VkResult vn_call_vkResetCommandPool(struct vn_ring *vn_ring, VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkResetCommandPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, commandPool, flags, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetCommandPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, commandPool, flags, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkResetCommandPool_reply(dec, device, commandPool, flags);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkResetCommandPool(struct vn_instance *vn_instance, VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
static inline void vn_async_vkResetCommandPool(struct vn_ring *vn_ring, VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
{
struct vn_instance_submit_command submit;
vn_submit_vkResetCommandPool(vn_instance, 0, device, commandPool, flags, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetCommandPool(vn_ring, 0, device, commandPool, flags, &submit);
}
static inline void vn_call_vkTrimCommandPool(struct vn_instance *vn_instance, VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
static inline void vn_call_vkTrimCommandPool(struct vn_ring *vn_ring, VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkTrimCommandPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, commandPool, flags, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkTrimCommandPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, commandPool, flags, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkTrimCommandPool_reply(dec, device, commandPool, flags);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkTrimCommandPool(struct vn_instance *vn_instance, VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
static inline void vn_async_vkTrimCommandPool(struct vn_ring *vn_ring, VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags)
{
struct vn_instance_submit_command submit;
vn_submit_vkTrimCommandPool(vn_instance, 0, device, commandPool, flags, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkTrimCommandPool(vn_ring, 0, device, commandPool, flags, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_COMMAND_POOL_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_DESCRIPTOR_POOL_H
#define VN_PROTOCOL_DRIVER_DESCRIPTOR_POOL_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkDescriptorPoolSize */
@ -384,7 +384,7 @@ static inline VkResult vn_decode_vkResetDescriptorPool_reply(struct vn_cs_decode
return ret;
}
static inline void vn_submit_vkCreateDescriptorPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateDescriptorPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -396,16 +396,16 @@ static inline void vn_submit_vkCreateDescriptorPool(struct vn_instance *vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateDescriptorPool_reply(device, pCreateInfo, pAllocator, pDescriptorPool) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateDescriptorPool(enc, cmd_flags, device, pCreateInfo, pAllocator, pDescriptorPool);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyDescriptorPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyDescriptorPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -417,16 +417,16 @@ static inline void vn_submit_vkDestroyDescriptorPool(struct vn_instance *vn_inst
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyDescriptorPool_reply(device, descriptorPool, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyDescriptorPool(enc, cmd_flags, device, descriptorPool, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkResetDescriptorPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkResetDescriptorPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -438,76 +438,76 @@ static inline void vn_submit_vkResetDescriptorPool(struct vn_instance *vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetDescriptorPool_reply(device, descriptorPool, flags) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkResetDescriptorPool(enc, cmd_flags, device, descriptorPool, flags);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateDescriptorPool(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
static inline VkResult vn_call_vkCreateDescriptorPool(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateDescriptorPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pDescriptorPool, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateDescriptorPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pDescriptorPool, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateDescriptorPool_reply(dec, device, pCreateInfo, pAllocator, pDescriptorPool);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateDescriptorPool(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
static inline void vn_async_vkCreateDescriptorPool(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateDescriptorPool(vn_instance, 0, device, pCreateInfo, pAllocator, pDescriptorPool, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateDescriptorPool(vn_ring, 0, device, pCreateInfo, pAllocator, pDescriptorPool, &submit);
}
static inline void vn_call_vkDestroyDescriptorPool(struct vn_instance *vn_instance, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyDescriptorPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyDescriptorPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyDescriptorPool_reply(dec, device, descriptorPool, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyDescriptorPool(struct vn_instance *vn_instance, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyDescriptorPool(vn_instance, 0, device, descriptorPool, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyDescriptorPool(vn_ring, 0, device, descriptorPool, pAllocator, &submit);
}
static inline VkResult vn_call_vkResetDescriptorPool(struct vn_instance *vn_instance, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
static inline VkResult vn_call_vkResetDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkResetDescriptorPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, flags, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetDescriptorPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, flags, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkResetDescriptorPool_reply(dec, device, descriptorPool, flags);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkResetDescriptorPool(struct vn_instance *vn_instance, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
static inline void vn_async_vkResetDescriptorPool(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
{
struct vn_instance_submit_command submit;
vn_submit_vkResetDescriptorPool(vn_instance, 0, device, descriptorPool, flags, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetDescriptorPool(vn_ring, 0, device, descriptorPool, flags, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_DESCRIPTOR_POOL_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_DESCRIPTOR_SET_H
#define VN_PROTOCOL_DRIVER_DESCRIPTOR_SET_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/*
@ -481,7 +481,7 @@ static inline void vn_decode_vkUpdateDescriptorSets_reply(struct vn_cs_decoder *
/* skip pDescriptorCopies */
}
static inline void vn_submit_vkAllocateDescriptorSets(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkAllocateDescriptorSets(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -493,16 +493,16 @@ static inline void vn_submit_vkAllocateDescriptorSets(struct vn_instance *vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkAllocateDescriptorSets_reply(device, pAllocateInfo, pDescriptorSets) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkAllocateDescriptorSets(enc, cmd_flags, device, pAllocateInfo, pDescriptorSets);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkFreeDescriptorSets(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkFreeDescriptorSets(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -514,16 +514,16 @@ static inline void vn_submit_vkFreeDescriptorSets(struct vn_instance *vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkFreeDescriptorSets_reply(device, descriptorPool, descriptorSetCount, pDescriptorSets) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkFreeDescriptorSets(enc, cmd_flags, device, descriptorPool, descriptorSetCount, pDescriptorSets);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkUpdateDescriptorSets(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkUpdateDescriptorSets(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -535,76 +535,76 @@ static inline void vn_submit_vkUpdateDescriptorSets(struct vn_instance *vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkUpdateDescriptorSets_reply(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkUpdateDescriptorSets(enc, cmd_flags, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkAllocateDescriptorSets(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets)
static inline VkResult vn_call_vkAllocateDescriptorSets(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkAllocateDescriptorSets(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pAllocateInfo, pDescriptorSets, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkAllocateDescriptorSets(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pAllocateInfo, pDescriptorSets, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkAllocateDescriptorSets_reply(dec, device, pAllocateInfo, pDescriptorSets);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkAllocateDescriptorSets(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets)
static inline void vn_async_vkAllocateDescriptorSets(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets)
{
struct vn_instance_submit_command submit;
vn_submit_vkAllocateDescriptorSets(vn_instance, 0, device, pAllocateInfo, pDescriptorSets, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkAllocateDescriptorSets(vn_ring, 0, device, pAllocateInfo, pDescriptorSets, &submit);
}
static inline VkResult vn_call_vkFreeDescriptorSets(struct vn_instance *vn_instance, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
static inline VkResult vn_call_vkFreeDescriptorSets(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkFreeDescriptorSets(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, descriptorSetCount, pDescriptorSets, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkFreeDescriptorSets(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorPool, descriptorSetCount, pDescriptorSets, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkFreeDescriptorSets_reply(dec, device, descriptorPool, descriptorSetCount, pDescriptorSets);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkFreeDescriptorSets(struct vn_instance *vn_instance, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
static inline void vn_async_vkFreeDescriptorSets(struct vn_ring *vn_ring, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets)
{
struct vn_instance_submit_command submit;
vn_submit_vkFreeDescriptorSets(vn_instance, 0, device, descriptorPool, descriptorSetCount, pDescriptorSets, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkFreeDescriptorSets(vn_ring, 0, device, descriptorPool, descriptorSetCount, pDescriptorSets, &submit);
}
static inline void vn_call_vkUpdateDescriptorSets(struct vn_instance *vn_instance, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
static inline void vn_call_vkUpdateDescriptorSets(struct vn_ring *vn_ring, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkUpdateDescriptorSets(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkUpdateDescriptorSets(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkUpdateDescriptorSets_reply(dec, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkUpdateDescriptorSets(struct vn_instance *vn_instance, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
static inline void vn_async_vkUpdateDescriptorSets(struct vn_ring *vn_ring, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
{
struct vn_instance_submit_command submit;
vn_submit_vkUpdateDescriptorSets(vn_instance, 0, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkUpdateDescriptorSets(vn_ring, 0, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_DESCRIPTOR_SET_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_DESCRIPTOR_SET_LAYOUT_H
#define VN_PROTOCOL_DRIVER_DESCRIPTOR_SET_LAYOUT_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkDescriptorSetLayoutBinding */
@ -703,7 +703,7 @@ static inline void vn_decode_vkGetDescriptorSetLayoutSupport_reply(struct vn_cs_
}
}
static inline void vn_submit_vkCreateDescriptorSetLayout(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateDescriptorSetLayout(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -715,16 +715,16 @@ static inline void vn_submit_vkCreateDescriptorSetLayout(struct vn_instance *vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateDescriptorSetLayout_reply(device, pCreateInfo, pAllocator, pSetLayout) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateDescriptorSetLayout(enc, cmd_flags, device, pCreateInfo, pAllocator, pSetLayout);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyDescriptorSetLayout(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyDescriptorSetLayout(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -736,16 +736,16 @@ static inline void vn_submit_vkDestroyDescriptorSetLayout(struct vn_instance *vn
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyDescriptorSetLayout_reply(device, descriptorSetLayout, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyDescriptorSetLayout(enc, cmd_flags, device, descriptorSetLayout, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetDescriptorSetLayoutSupport(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetDescriptorSetLayoutSupport(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -757,73 +757,73 @@ static inline void vn_submit_vkGetDescriptorSetLayoutSupport(struct vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDescriptorSetLayoutSupport_reply(device, pCreateInfo, pSupport) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetDescriptorSetLayoutSupport(enc, cmd_flags, device, pCreateInfo, pSupport);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateDescriptorSetLayout(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout)
static inline VkResult vn_call_vkCreateDescriptorSetLayout(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateDescriptorSetLayout(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pSetLayout, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateDescriptorSetLayout(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pSetLayout, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateDescriptorSetLayout_reply(dec, device, pCreateInfo, pAllocator, pSetLayout);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateDescriptorSetLayout(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout)
static inline void vn_async_vkCreateDescriptorSetLayout(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateDescriptorSetLayout(vn_instance, 0, device, pCreateInfo, pAllocator, pSetLayout, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateDescriptorSetLayout(vn_ring, 0, device, pCreateInfo, pAllocator, pSetLayout, &submit);
}
static inline void vn_call_vkDestroyDescriptorSetLayout(struct vn_instance *vn_instance, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyDescriptorSetLayout(struct vn_ring *vn_ring, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyDescriptorSetLayout(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorSetLayout, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyDescriptorSetLayout(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorSetLayout, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyDescriptorSetLayout_reply(dec, device, descriptorSetLayout, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyDescriptorSetLayout(struct vn_instance *vn_instance, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyDescriptorSetLayout(struct vn_ring *vn_ring, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyDescriptorSetLayout(vn_instance, 0, device, descriptorSetLayout, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyDescriptorSetLayout(vn_ring, 0, device, descriptorSetLayout, pAllocator, &submit);
}
static inline void vn_call_vkGetDescriptorSetLayoutSupport(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport)
static inline void vn_call_vkGetDescriptorSetLayoutSupport(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetDescriptorSetLayoutSupport(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pSupport, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDescriptorSetLayoutSupport(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pSupport, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetDescriptorSetLayoutSupport_reply(dec, device, pCreateInfo, pSupport);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetDescriptorSetLayoutSupport(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport)
static inline void vn_async_vkGetDescriptorSetLayoutSupport(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetDescriptorSetLayoutSupport(vn_instance, 0, device, pCreateInfo, pSupport, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDescriptorSetLayoutSupport(vn_ring, 0, device, pCreateInfo, pSupport, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_DESCRIPTOR_SET_LAYOUT_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_DESCRIPTOR_UPDATE_TEMPLATE_H
#define VN_PROTOCOL_DRIVER_DESCRIPTOR_UPDATE_TEMPLATE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkDescriptorUpdateTemplateEntry */
@ -240,7 +240,7 @@ static inline void vn_decode_vkDestroyDescriptorUpdateTemplate_reply(struct vn_c
/* skip pAllocator */
}
static inline void vn_submit_vkCreateDescriptorUpdateTemplate(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateDescriptorUpdateTemplate(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -252,16 +252,16 @@ static inline void vn_submit_vkCreateDescriptorUpdateTemplate(struct vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateDescriptorUpdateTemplate_reply(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateDescriptorUpdateTemplate(enc, cmd_flags, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyDescriptorUpdateTemplate(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyDescriptorUpdateTemplate(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -273,54 +273,54 @@ static inline void vn_submit_vkDestroyDescriptorUpdateTemplate(struct vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyDescriptorUpdateTemplate_reply(device, descriptorUpdateTemplate, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyDescriptorUpdateTemplate(enc, cmd_flags, device, descriptorUpdateTemplate, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateDescriptorUpdateTemplate(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
static inline VkResult vn_call_vkCreateDescriptorUpdateTemplate(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateDescriptorUpdateTemplate(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateDescriptorUpdateTemplate(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateDescriptorUpdateTemplate_reply(dec, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateDescriptorUpdateTemplate(struct vn_instance *vn_instance, VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
static inline void vn_async_vkCreateDescriptorUpdateTemplate(struct vn_ring *vn_ring, VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateDescriptorUpdateTemplate(vn_instance, 0, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateDescriptorUpdateTemplate(vn_ring, 0, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, &submit);
}
static inline void vn_call_vkDestroyDescriptorUpdateTemplate(struct vn_instance *vn_instance, VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyDescriptorUpdateTemplate(struct vn_ring *vn_ring, VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyDescriptorUpdateTemplate(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorUpdateTemplate, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyDescriptorUpdateTemplate(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, descriptorUpdateTemplate, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyDescriptorUpdateTemplate_reply(dec, device, descriptorUpdateTemplate, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyDescriptorUpdateTemplate(struct vn_instance *vn_instance, VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyDescriptorUpdateTemplate(struct vn_ring *vn_ring, VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyDescriptorUpdateTemplate(vn_instance, 0, device, descriptorUpdateTemplate, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyDescriptorUpdateTemplate(vn_ring, 0, device, descriptorUpdateTemplate, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_DESCRIPTOR_UPDATE_TEMPLATE_H */

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_DEVICE_MEMORY_H
#define VN_PROTOCOL_DRIVER_DEVICE_MEMORY_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/*
@ -1294,7 +1294,7 @@ static inline VkResult vn_decode_vkGetMemoryResourcePropertiesMESA_reply(struct
return ret;
}
static inline void vn_submit_vkAllocateMemory(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkAllocateMemory(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1306,16 +1306,16 @@ static inline void vn_submit_vkAllocateMemory(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkAllocateMemory_reply(device, pAllocateInfo, pAllocator, pMemory) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkAllocateMemory(enc, cmd_flags, device, pAllocateInfo, pAllocator, pMemory);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkFreeMemory(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkFreeMemory(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1327,16 +1327,16 @@ static inline void vn_submit_vkFreeMemory(struct vn_instance *vn_instance, VkCom
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkFreeMemory_reply(device, memory, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkFreeMemory(enc, cmd_flags, device, memory, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkUnmapMemory(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeviceMemory memory, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkUnmapMemory(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeviceMemory memory, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1348,16 +1348,16 @@ static inline void vn_submit_vkUnmapMemory(struct vn_instance *vn_instance, VkCo
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkUnmapMemory_reply(device, memory) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkUnmapMemory(enc, cmd_flags, device, memory);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkFlushMappedMemoryRanges(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkFlushMappedMemoryRanges(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1369,16 +1369,16 @@ static inline void vn_submit_vkFlushMappedMemoryRanges(struct vn_instance *vn_in
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkFlushMappedMemoryRanges_reply(device, memoryRangeCount, pMemoryRanges) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkFlushMappedMemoryRanges(enc, cmd_flags, device, memoryRangeCount, pMemoryRanges);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkInvalidateMappedMemoryRanges(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkInvalidateMappedMemoryRanges(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1390,16 +1390,16 @@ static inline void vn_submit_vkInvalidateMappedMemoryRanges(struct vn_instance *
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkInvalidateMappedMemoryRanges_reply(device, memoryRangeCount, pMemoryRanges) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkInvalidateMappedMemoryRanges(enc, cmd_flags, device, memoryRangeCount, pMemoryRanges);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetDeviceMemoryCommitment(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetDeviceMemoryCommitment(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1411,16 +1411,16 @@ static inline void vn_submit_vkGetDeviceMemoryCommitment(struct vn_instance *vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeviceMemoryCommitment_reply(device, memory, pCommittedMemoryInBytes) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetDeviceMemoryCommitment(enc, cmd_flags, device, memory, pCommittedMemoryInBytes);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1432,16 +1432,16 @@ static inline void vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeviceMemoryOpaqueCaptureAddress_reply(device, pInfo) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetDeviceMemoryOpaqueCaptureAddress(enc, cmd_flags, device, pInfo);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetMemoryResourcePropertiesMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t resourceId, VkMemoryResourcePropertiesMESA* pMemoryResourceProperties, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetMemoryResourcePropertiesMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t resourceId, VkMemoryResourcePropertiesMESA* pMemoryResourceProperties, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1453,180 +1453,180 @@ static inline void vn_submit_vkGetMemoryResourcePropertiesMESA(struct vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetMemoryResourcePropertiesMESA_reply(device, resourceId, pMemoryResourceProperties) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetMemoryResourcePropertiesMESA(enc, cmd_flags, device, resourceId, pMemoryResourceProperties);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkAllocateMemory(struct vn_instance *vn_instance, VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory)
static inline VkResult vn_call_vkAllocateMemory(struct vn_ring *vn_ring, VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkAllocateMemory(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pAllocateInfo, pAllocator, pMemory, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkAllocateMemory(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pAllocateInfo, pAllocator, pMemory, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkAllocateMemory_reply(dec, device, pAllocateInfo, pAllocator, pMemory);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkAllocateMemory(struct vn_instance *vn_instance, VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory)
static inline void vn_async_vkAllocateMemory(struct vn_ring *vn_ring, VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory)
{
struct vn_instance_submit_command submit;
vn_submit_vkAllocateMemory(vn_instance, 0, device, pAllocateInfo, pAllocator, pMemory, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkAllocateMemory(vn_ring, 0, device, pAllocateInfo, pAllocator, pMemory, &submit);
}
static inline void vn_call_vkFreeMemory(struct vn_instance *vn_instance, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkFreeMemory(struct vn_ring *vn_ring, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkFreeMemory(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memory, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkFreeMemory(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memory, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkFreeMemory_reply(dec, device, memory, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkFreeMemory(struct vn_instance *vn_instance, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkFreeMemory(struct vn_ring *vn_ring, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkFreeMemory(vn_instance, 0, device, memory, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkFreeMemory(vn_ring, 0, device, memory, pAllocator, &submit);
}
static inline void vn_call_vkUnmapMemory(struct vn_instance *vn_instance, VkDevice device, VkDeviceMemory memory)
static inline void vn_call_vkUnmapMemory(struct vn_ring *vn_ring, VkDevice device, VkDeviceMemory memory)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkUnmapMemory(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memory, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkUnmapMemory(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memory, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkUnmapMemory_reply(dec, device, memory);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkUnmapMemory(struct vn_instance *vn_instance, VkDevice device, VkDeviceMemory memory)
static inline void vn_async_vkUnmapMemory(struct vn_ring *vn_ring, VkDevice device, VkDeviceMemory memory)
{
struct vn_instance_submit_command submit;
vn_submit_vkUnmapMemory(vn_instance, 0, device, memory, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkUnmapMemory(vn_ring, 0, device, memory, &submit);
}
static inline VkResult vn_call_vkFlushMappedMemoryRanges(struct vn_instance *vn_instance, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
static inline VkResult vn_call_vkFlushMappedMemoryRanges(struct vn_ring *vn_ring, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkFlushMappedMemoryRanges(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memoryRangeCount, pMemoryRanges, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkFlushMappedMemoryRanges(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memoryRangeCount, pMemoryRanges, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkFlushMappedMemoryRanges_reply(dec, device, memoryRangeCount, pMemoryRanges);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkFlushMappedMemoryRanges(struct vn_instance *vn_instance, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
static inline void vn_async_vkFlushMappedMemoryRanges(struct vn_ring *vn_ring, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
{
struct vn_instance_submit_command submit;
vn_submit_vkFlushMappedMemoryRanges(vn_instance, 0, device, memoryRangeCount, pMemoryRanges, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkFlushMappedMemoryRanges(vn_ring, 0, device, memoryRangeCount, pMemoryRanges, &submit);
}
static inline VkResult vn_call_vkInvalidateMappedMemoryRanges(struct vn_instance *vn_instance, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
static inline VkResult vn_call_vkInvalidateMappedMemoryRanges(struct vn_ring *vn_ring, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkInvalidateMappedMemoryRanges(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memoryRangeCount, pMemoryRanges, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkInvalidateMappedMemoryRanges(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memoryRangeCount, pMemoryRanges, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkInvalidateMappedMemoryRanges_reply(dec, device, memoryRangeCount, pMemoryRanges);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkInvalidateMappedMemoryRanges(struct vn_instance *vn_instance, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
static inline void vn_async_vkInvalidateMappedMemoryRanges(struct vn_ring *vn_ring, VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges)
{
struct vn_instance_submit_command submit;
vn_submit_vkInvalidateMappedMemoryRanges(vn_instance, 0, device, memoryRangeCount, pMemoryRanges, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkInvalidateMappedMemoryRanges(vn_ring, 0, device, memoryRangeCount, pMemoryRanges, &submit);
}
static inline void vn_call_vkGetDeviceMemoryCommitment(struct vn_instance *vn_instance, VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes)
static inline void vn_call_vkGetDeviceMemoryCommitment(struct vn_ring *vn_ring, VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceMemoryCommitment(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memory, pCommittedMemoryInBytes, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceMemoryCommitment(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, memory, pCommittedMemoryInBytes, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetDeviceMemoryCommitment_reply(dec, device, memory, pCommittedMemoryInBytes);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetDeviceMemoryCommitment(struct vn_instance *vn_instance, VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes)
static inline void vn_async_vkGetDeviceMemoryCommitment(struct vn_ring *vn_ring, VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceMemoryCommitment(vn_instance, 0, device, memory, pCommittedMemoryInBytes, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceMemoryCommitment(vn_ring, 0, device, memory, pCommittedMemoryInBytes, &submit);
}
static inline uint64_t vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_instance *vn_instance, VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
static inline uint64_t vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_ring *vn_ring, VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const uint64_t ret = vn_decode_vkGetDeviceMemoryOpaqueCaptureAddress_reply(dec, device, pInfo);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_instance *vn_instance, VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
static inline void vn_async_vkGetDeviceMemoryOpaqueCaptureAddress(struct vn_ring *vn_ring, VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(vn_instance, 0, device, pInfo, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceMemoryOpaqueCaptureAddress(vn_ring, 0, device, pInfo, &submit);
}
static inline VkResult vn_call_vkGetMemoryResourcePropertiesMESA(struct vn_instance *vn_instance, VkDevice device, uint32_t resourceId, VkMemoryResourcePropertiesMESA* pMemoryResourceProperties)
static inline VkResult vn_call_vkGetMemoryResourcePropertiesMESA(struct vn_ring *vn_ring, VkDevice device, uint32_t resourceId, VkMemoryResourcePropertiesMESA* pMemoryResourceProperties)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetMemoryResourcePropertiesMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, resourceId, pMemoryResourceProperties, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetMemoryResourcePropertiesMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, resourceId, pMemoryResourceProperties, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetMemoryResourcePropertiesMESA_reply(dec, device, resourceId, pMemoryResourceProperties);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetMemoryResourcePropertiesMESA(struct vn_instance *vn_instance, VkDevice device, uint32_t resourceId, VkMemoryResourcePropertiesMESA* pMemoryResourceProperties)
static inline void vn_async_vkGetMemoryResourcePropertiesMESA(struct vn_ring *vn_ring, VkDevice device, uint32_t resourceId, VkMemoryResourcePropertiesMESA* pMemoryResourceProperties)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetMemoryResourcePropertiesMESA(vn_instance, 0, device, resourceId, pMemoryResourceProperties, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetMemoryResourcePropertiesMESA(vn_ring, 0, device, resourceId, pMemoryResourceProperties, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_DEVICE_MEMORY_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_EVENT_H
#define VN_PROTOCOL_DRIVER_EVENT_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkEventCreateInfo chain */
@ -338,7 +338,7 @@ static inline VkResult vn_decode_vkResetEvent_reply(struct vn_cs_decoder *dec, V
return ret;
}
static inline void vn_submit_vkCreateEvent(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateEvent(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -350,16 +350,16 @@ static inline void vn_submit_vkCreateEvent(struct vn_instance *vn_instance, VkCo
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateEvent_reply(device, pCreateInfo, pAllocator, pEvent) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateEvent(enc, cmd_flags, device, pCreateInfo, pAllocator, pEvent);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyEvent(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyEvent(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -371,16 +371,16 @@ static inline void vn_submit_vkDestroyEvent(struct vn_instance *vn_instance, VkC
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyEvent_reply(device, event, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyEvent(enc, cmd_flags, device, event, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetEventStatus(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetEventStatus(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -392,16 +392,16 @@ static inline void vn_submit_vkGetEventStatus(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetEventStatus_reply(device, event) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetEventStatus(enc, cmd_flags, device, event);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkSetEvent(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkSetEvent(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -413,16 +413,16 @@ static inline void vn_submit_vkSetEvent(struct vn_instance *vn_instance, VkComma
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkSetEvent_reply(device, event) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkSetEvent(enc, cmd_flags, device, event);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkResetEvent(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkResetEvent(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkEvent event, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -434,120 +434,120 @@ static inline void vn_submit_vkResetEvent(struct vn_instance *vn_instance, VkCom
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetEvent_reply(device, event) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkResetEvent(enc, cmd_flags, device, event);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateEvent(struct vn_instance *vn_instance, VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent)
static inline VkResult vn_call_vkCreateEvent(struct vn_ring *vn_ring, VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateEvent(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pEvent, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateEvent(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pEvent, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateEvent_reply(dec, device, pCreateInfo, pAllocator, pEvent);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateEvent(struct vn_instance *vn_instance, VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent)
static inline void vn_async_vkCreateEvent(struct vn_ring *vn_ring, VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateEvent(vn_instance, 0, device, pCreateInfo, pAllocator, pEvent, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateEvent(vn_ring, 0, device, pCreateInfo, pAllocator, pEvent, &submit);
}
static inline void vn_call_vkDestroyEvent(struct vn_instance *vn_instance, VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyEvent(struct vn_ring *vn_ring, VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyEvent(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyEvent(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyEvent_reply(dec, device, event, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyEvent(struct vn_instance *vn_instance, VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyEvent(struct vn_ring *vn_ring, VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyEvent(vn_instance, 0, device, event, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyEvent(vn_ring, 0, device, event, pAllocator, &submit);
}
static inline VkResult vn_call_vkGetEventStatus(struct vn_instance *vn_instance, VkDevice device, VkEvent event)
static inline VkResult vn_call_vkGetEventStatus(struct vn_ring *vn_ring, VkDevice device, VkEvent event)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetEventStatus(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetEventStatus(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetEventStatus_reply(dec, device, event);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetEventStatus(struct vn_instance *vn_instance, VkDevice device, VkEvent event)
static inline void vn_async_vkGetEventStatus(struct vn_ring *vn_ring, VkDevice device, VkEvent event)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetEventStatus(vn_instance, 0, device, event, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetEventStatus(vn_ring, 0, device, event, &submit);
}
static inline VkResult vn_call_vkSetEvent(struct vn_instance *vn_instance, VkDevice device, VkEvent event)
static inline VkResult vn_call_vkSetEvent(struct vn_ring *vn_ring, VkDevice device, VkEvent event)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkSetEvent(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSetEvent(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkSetEvent_reply(dec, device, event);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkSetEvent(struct vn_instance *vn_instance, VkDevice device, VkEvent event)
static inline void vn_async_vkSetEvent(struct vn_ring *vn_ring, VkDevice device, VkEvent event)
{
struct vn_instance_submit_command submit;
vn_submit_vkSetEvent(vn_instance, 0, device, event, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSetEvent(vn_ring, 0, device, event, &submit);
}
static inline VkResult vn_call_vkResetEvent(struct vn_instance *vn_instance, VkDevice device, VkEvent event)
static inline VkResult vn_call_vkResetEvent(struct vn_ring *vn_ring, VkDevice device, VkEvent event)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkResetEvent(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetEvent(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, event, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkResetEvent_reply(dec, device, event);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkResetEvent(struct vn_instance *vn_instance, VkDevice device, VkEvent event)
static inline void vn_async_vkResetEvent(struct vn_ring *vn_ring, VkDevice device, VkEvent event)
{
struct vn_instance_submit_command submit;
vn_submit_vkResetEvent(vn_instance, 0, device, event, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetEvent(vn_ring, 0, device, event, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_EVENT_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_FENCE_H
#define VN_PROTOCOL_DRIVER_FENCE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/*
@ -515,7 +515,7 @@ static inline void vn_decode_vkResetFenceResourceMESA_reply(struct vn_cs_decoder
/* skip fence */
}
static inline void vn_submit_vkCreateFence(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateFence(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -527,16 +527,16 @@ static inline void vn_submit_vkCreateFence(struct vn_instance *vn_instance, VkCo
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateFence_reply(device, pCreateInfo, pAllocator, pFence) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateFence(enc, cmd_flags, device, pCreateInfo, pAllocator, pFence);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyFence(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyFence(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -548,16 +548,16 @@ static inline void vn_submit_vkDestroyFence(struct vn_instance *vn_instance, VkC
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyFence_reply(device, fence, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyFence(enc, cmd_flags, device, fence, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkResetFences(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t fenceCount, const VkFence* pFences, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkResetFences(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t fenceCount, const VkFence* pFences, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -569,16 +569,16 @@ static inline void vn_submit_vkResetFences(struct vn_instance *vn_instance, VkCo
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetFences_reply(device, fenceCount, pFences) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkResetFences(enc, cmd_flags, device, fenceCount, pFences);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetFenceStatus(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFence fence, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetFenceStatus(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFence fence, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -590,16 +590,16 @@ static inline void vn_submit_vkGetFenceStatus(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetFenceStatus_reply(device, fence) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetFenceStatus(enc, cmd_flags, device, fence);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkWaitForFences(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkWaitForFences(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -611,16 +611,16 @@ static inline void vn_submit_vkWaitForFences(struct vn_instance *vn_instance, Vk
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkWaitForFences_reply(device, fenceCount, pFences, waitAll, timeout) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkWaitForFences(enc, cmd_flags, device, fenceCount, pFences, waitAll, timeout);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkResetFenceResourceMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFence fence, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkResetFenceResourceMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFence fence, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -632,139 +632,139 @@ static inline void vn_submit_vkResetFenceResourceMESA(struct vn_instance *vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetFenceResourceMESA_reply(device, fence) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkResetFenceResourceMESA(enc, cmd_flags, device, fence);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateFence(struct vn_instance *vn_instance, VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence)
static inline VkResult vn_call_vkCreateFence(struct vn_ring *vn_ring, VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateFence(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pFence, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateFence(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pFence, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateFence_reply(dec, device, pCreateInfo, pAllocator, pFence);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateFence(struct vn_instance *vn_instance, VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence)
static inline void vn_async_vkCreateFence(struct vn_ring *vn_ring, VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateFence(vn_instance, 0, device, pCreateInfo, pAllocator, pFence, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateFence(vn_ring, 0, device, pCreateInfo, pAllocator, pFence, &submit);
}
static inline void vn_call_vkDestroyFence(struct vn_instance *vn_instance, VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyFence(struct vn_ring *vn_ring, VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyFence(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fence, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyFence(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fence, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyFence_reply(dec, device, fence, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyFence(struct vn_instance *vn_instance, VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyFence(struct vn_ring *vn_ring, VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyFence(vn_instance, 0, device, fence, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyFence(vn_ring, 0, device, fence, pAllocator, &submit);
}
static inline VkResult vn_call_vkResetFences(struct vn_instance *vn_instance, VkDevice device, uint32_t fenceCount, const VkFence* pFences)
static inline VkResult vn_call_vkResetFences(struct vn_ring *vn_ring, VkDevice device, uint32_t fenceCount, const VkFence* pFences)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkResetFences(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fenceCount, pFences, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetFences(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fenceCount, pFences, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkResetFences_reply(dec, device, fenceCount, pFences);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkResetFences(struct vn_instance *vn_instance, VkDevice device, uint32_t fenceCount, const VkFence* pFences)
static inline void vn_async_vkResetFences(struct vn_ring *vn_ring, VkDevice device, uint32_t fenceCount, const VkFence* pFences)
{
struct vn_instance_submit_command submit;
vn_submit_vkResetFences(vn_instance, 0, device, fenceCount, pFences, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetFences(vn_ring, 0, device, fenceCount, pFences, &submit);
}
static inline VkResult vn_call_vkGetFenceStatus(struct vn_instance *vn_instance, VkDevice device, VkFence fence)
static inline VkResult vn_call_vkGetFenceStatus(struct vn_ring *vn_ring, VkDevice device, VkFence fence)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetFenceStatus(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fence, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetFenceStatus(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fence, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetFenceStatus_reply(dec, device, fence);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetFenceStatus(struct vn_instance *vn_instance, VkDevice device, VkFence fence)
static inline void vn_async_vkGetFenceStatus(struct vn_ring *vn_ring, VkDevice device, VkFence fence)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetFenceStatus(vn_instance, 0, device, fence, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetFenceStatus(vn_ring, 0, device, fence, &submit);
}
static inline VkResult vn_call_vkWaitForFences(struct vn_instance *vn_instance, VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
static inline VkResult vn_call_vkWaitForFences(struct vn_ring *vn_ring, VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkWaitForFences(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fenceCount, pFences, waitAll, timeout, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitForFences(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fenceCount, pFences, waitAll, timeout, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkWaitForFences_reply(dec, device, fenceCount, pFences, waitAll, timeout);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkWaitForFences(struct vn_instance *vn_instance, VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
static inline void vn_async_vkWaitForFences(struct vn_ring *vn_ring, VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
{
struct vn_instance_submit_command submit;
vn_submit_vkWaitForFences(vn_instance, 0, device, fenceCount, pFences, waitAll, timeout, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitForFences(vn_ring, 0, device, fenceCount, pFences, waitAll, timeout, &submit);
}
static inline void vn_call_vkResetFenceResourceMESA(struct vn_instance *vn_instance, VkDevice device, VkFence fence)
static inline void vn_call_vkResetFenceResourceMESA(struct vn_ring *vn_ring, VkDevice device, VkFence fence)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkResetFenceResourceMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fence, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetFenceResourceMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, fence, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkResetFenceResourceMESA_reply(dec, device, fence);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkResetFenceResourceMESA(struct vn_instance *vn_instance, VkDevice device, VkFence fence)
static inline void vn_async_vkResetFenceResourceMESA(struct vn_ring *vn_ring, VkDevice device, VkFence fence)
{
struct vn_instance_submit_command submit;
vn_submit_vkResetFenceResourceMESA(vn_instance, 0, device, fence, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetFenceResourceMESA(vn_ring, 0, device, fence, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_FENCE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_FRAMEBUFFER_H
#define VN_PROTOCOL_DRIVER_FRAMEBUFFER_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkFramebufferAttachmentImageInfo chain */
@ -387,7 +387,7 @@ static inline void vn_decode_vkDestroyFramebuffer_reply(struct vn_cs_decoder *de
/* skip pAllocator */
}
static inline void vn_submit_vkCreateFramebuffer(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateFramebuffer(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -399,16 +399,16 @@ static inline void vn_submit_vkCreateFramebuffer(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateFramebuffer_reply(device, pCreateInfo, pAllocator, pFramebuffer) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateFramebuffer(enc, cmd_flags, device, pCreateInfo, pAllocator, pFramebuffer);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyFramebuffer(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyFramebuffer(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -420,54 +420,54 @@ static inline void vn_submit_vkDestroyFramebuffer(struct vn_instance *vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyFramebuffer_reply(device, framebuffer, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyFramebuffer(enc, cmd_flags, device, framebuffer, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateFramebuffer(struct vn_instance *vn_instance, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer)
static inline VkResult vn_call_vkCreateFramebuffer(struct vn_ring *vn_ring, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateFramebuffer(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pFramebuffer, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateFramebuffer(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pFramebuffer, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateFramebuffer_reply(dec, device, pCreateInfo, pAllocator, pFramebuffer);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateFramebuffer(struct vn_instance *vn_instance, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer)
static inline void vn_async_vkCreateFramebuffer(struct vn_ring *vn_ring, VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateFramebuffer(vn_instance, 0, device, pCreateInfo, pAllocator, pFramebuffer, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateFramebuffer(vn_ring, 0, device, pCreateInfo, pAllocator, pFramebuffer, &submit);
}
static inline void vn_call_vkDestroyFramebuffer(struct vn_instance *vn_instance, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyFramebuffer(struct vn_ring *vn_ring, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyFramebuffer(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, framebuffer, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyFramebuffer(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, framebuffer, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyFramebuffer_reply(dec, device, framebuffer, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyFramebuffer(struct vn_instance *vn_instance, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyFramebuffer(struct vn_ring *vn_ring, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyFramebuffer(vn_instance, 0, device, framebuffer, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyFramebuffer(vn_ring, 0, device, framebuffer, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_FRAMEBUFFER_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_IMAGE_H
#define VN_PROTOCOL_DRIVER_IMAGE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkSparseImageMemoryRequirements */
@ -1994,7 +1994,7 @@ static inline VkResult vn_decode_vkGetImageDrmFormatModifierPropertiesEXT_reply(
return ret;
}
static inline void vn_submit_vkGetImageMemoryRequirements(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetImageMemoryRequirements(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2006,16 +2006,16 @@ static inline void vn_submit_vkGetImageMemoryRequirements(struct vn_instance *vn
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetImageMemoryRequirements_reply(device, image, pMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetImageMemoryRequirements(enc, cmd_flags, device, image, pMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkBindImageMemory(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkBindImageMemory(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2027,16 +2027,16 @@ static inline void vn_submit_vkBindImageMemory(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkBindImageMemory_reply(device, image, memory, memoryOffset) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkBindImageMemory(enc, cmd_flags, device, image, memory, memoryOffset);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetImageSparseMemoryRequirements(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetImageSparseMemoryRequirements(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2048,16 +2048,16 @@ static inline void vn_submit_vkGetImageSparseMemoryRequirements(struct vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetImageSparseMemoryRequirements_reply(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetImageSparseMemoryRequirements(enc, cmd_flags, device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkCreateImage(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateImage(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2069,16 +2069,16 @@ static inline void vn_submit_vkCreateImage(struct vn_instance *vn_instance, VkCo
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateImage_reply(device, pCreateInfo, pAllocator, pImage) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateImage(enc, cmd_flags, device, pCreateInfo, pAllocator, pImage);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyImage(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyImage(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2090,16 +2090,16 @@ static inline void vn_submit_vkDestroyImage(struct vn_instance *vn_instance, VkC
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyImage_reply(device, image, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyImage(enc, cmd_flags, device, image, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetImageSubresourceLayout(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetImageSubresourceLayout(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2111,16 +2111,16 @@ static inline void vn_submit_vkGetImageSubresourceLayout(struct vn_instance *vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetImageSubresourceLayout_reply(device, image, pSubresource, pLayout) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetImageSubresourceLayout(enc, cmd_flags, device, image, pSubresource, pLayout);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkBindImageMemory2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkBindImageMemory2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2132,16 +2132,16 @@ static inline void vn_submit_vkBindImageMemory2(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkBindImageMemory2_reply(device, bindInfoCount, pBindInfos) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkBindImageMemory2(enc, cmd_flags, device, bindInfoCount, pBindInfos);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetImageMemoryRequirements2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetImageMemoryRequirements2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2153,16 +2153,16 @@ static inline void vn_submit_vkGetImageMemoryRequirements2(struct vn_instance *v
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetImageMemoryRequirements2_reply(device, pInfo, pMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetImageMemoryRequirements2(enc, cmd_flags, device, pInfo, pMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetImageSparseMemoryRequirements2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetImageSparseMemoryRequirements2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2174,16 +2174,16 @@ static inline void vn_submit_vkGetImageSparseMemoryRequirements2(struct vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetImageSparseMemoryRequirements2_reply(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetImageSparseMemoryRequirements2(enc, cmd_flags, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetDeviceImageMemoryRequirements(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetDeviceImageMemoryRequirements(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2195,16 +2195,16 @@ static inline void vn_submit_vkGetDeviceImageMemoryRequirements(struct vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeviceImageMemoryRequirements_reply(device, pInfo, pMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetDeviceImageMemoryRequirements(enc, cmd_flags, device, pInfo, pMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetDeviceImageSparseMemoryRequirements(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetDeviceImageSparseMemoryRequirements(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2216,16 +2216,16 @@ static inline void vn_submit_vkGetDeviceImageSparseMemoryRequirements(struct vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetDeviceImageSparseMemoryRequirements_reply(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetDeviceImageSparseMemoryRequirements(enc, cmd_flags, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2237,253 +2237,253 @@ static inline void vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetImageDrmFormatModifierPropertiesEXT_reply(device, image, pProperties) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetImageDrmFormatModifierPropertiesEXT(enc, cmd_flags, device, image, pProperties);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_call_vkGetImageMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements)
static inline void vn_call_vkGetImageMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetImageMemoryRequirements(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageMemoryRequirements(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetImageMemoryRequirements_reply(dec, device, image, pMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetImageMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements)
static inline void vn_async_vkGetImageMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetImageMemoryRequirements(vn_instance, 0, device, image, pMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageMemoryRequirements(vn_ring, 0, device, image, pMemoryRequirements, &submit);
}
static inline VkResult vn_call_vkBindImageMemory(struct vn_instance *vn_instance, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
static inline VkResult vn_call_vkBindImageMemory(struct vn_ring *vn_ring, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkBindImageMemory(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, memory, memoryOffset, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindImageMemory(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, memory, memoryOffset, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkBindImageMemory_reply(dec, device, image, memory, memoryOffset);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkBindImageMemory(struct vn_instance *vn_instance, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
static inline void vn_async_vkBindImageMemory(struct vn_ring *vn_ring, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset)
{
struct vn_instance_submit_command submit;
vn_submit_vkBindImageMemory(vn_instance, 0, device, image, memory, memoryOffset, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindImageMemory(vn_ring, 0, device, image, memory, memoryOffset, &submit);
}
static inline void vn_call_vkGetImageSparseMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
static inline void vn_call_vkGetImageSparseMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetImageSparseMemoryRequirements_reply(dec, device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetImageSparseMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
static inline void vn_async_vkGetImageSparseMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements(vn_instance, 0, device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements(vn_ring, 0, device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
}
static inline VkResult vn_call_vkCreateImage(struct vn_instance *vn_instance, VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
static inline VkResult vn_call_vkCreateImage(struct vn_ring *vn_ring, VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateImage(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pImage, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateImage(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pImage, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateImage_reply(dec, device, pCreateInfo, pAllocator, pImage);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateImage(struct vn_instance *vn_instance, VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
static inline void vn_async_vkCreateImage(struct vn_ring *vn_ring, VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateImage(vn_instance, 0, device, pCreateInfo, pAllocator, pImage, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateImage(vn_ring, 0, device, pCreateInfo, pAllocator, pImage, &submit);
}
static inline void vn_call_vkDestroyImage(struct vn_instance *vn_instance, VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyImage(struct vn_ring *vn_ring, VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyImage(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyImage(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyImage_reply(dec, device, image, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyImage(struct vn_instance *vn_instance, VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyImage(struct vn_ring *vn_ring, VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyImage(vn_instance, 0, device, image, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyImage(vn_ring, 0, device, image, pAllocator, &submit);
}
static inline void vn_call_vkGetImageSubresourceLayout(struct vn_instance *vn_instance, VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
static inline void vn_call_vkGetImageSubresourceLayout(struct vn_ring *vn_ring, VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetImageSubresourceLayout(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pSubresource, pLayout, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageSubresourceLayout(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pSubresource, pLayout, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetImageSubresourceLayout_reply(dec, device, image, pSubresource, pLayout);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetImageSubresourceLayout(struct vn_instance *vn_instance, VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
static inline void vn_async_vkGetImageSubresourceLayout(struct vn_ring *vn_ring, VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetImageSubresourceLayout(vn_instance, 0, device, image, pSubresource, pLayout, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageSubresourceLayout(vn_ring, 0, device, image, pSubresource, pLayout, &submit);
}
static inline VkResult vn_call_vkBindImageMemory2(struct vn_instance *vn_instance, VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos)
static inline VkResult vn_call_vkBindImageMemory2(struct vn_ring *vn_ring, VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkBindImageMemory2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, bindInfoCount, pBindInfos, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindImageMemory2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, bindInfoCount, pBindInfos, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkBindImageMemory2_reply(dec, device, bindInfoCount, pBindInfos);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkBindImageMemory2(struct vn_instance *vn_instance, VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos)
static inline void vn_async_vkBindImageMemory2(struct vn_ring *vn_ring, VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos)
{
struct vn_instance_submit_command submit;
vn_submit_vkBindImageMemory2(vn_instance, 0, device, bindInfoCount, pBindInfos, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkBindImageMemory2(vn_ring, 0, device, bindInfoCount, pBindInfos, &submit);
}
static inline void vn_call_vkGetImageMemoryRequirements2(struct vn_instance *vn_instance, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_call_vkGetImageMemoryRequirements2(struct vn_ring *vn_ring, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetImageMemoryRequirements2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageMemoryRequirements2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetImageMemoryRequirements2_reply(dec, device, pInfo, pMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetImageMemoryRequirements2(struct vn_instance *vn_instance, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_async_vkGetImageMemoryRequirements2(struct vn_ring *vn_ring, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetImageMemoryRequirements2(vn_instance, 0, device, pInfo, pMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageMemoryRequirements2(vn_ring, 0, device, pInfo, pMemoryRequirements, &submit);
}
static inline void vn_call_vkGetImageSparseMemoryRequirements2(struct vn_instance *vn_instance, VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
static inline void vn_call_vkGetImageSparseMemoryRequirements2(struct vn_ring *vn_ring, VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetImageSparseMemoryRequirements2_reply(dec, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetImageSparseMemoryRequirements2(struct vn_instance *vn_instance, VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
static inline void vn_async_vkGetImageSparseMemoryRequirements2(struct vn_ring *vn_ring, VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements2(vn_instance, 0, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageSparseMemoryRequirements2(vn_ring, 0, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
}
static inline void vn_call_vkGetDeviceImageMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_call_vkGetDeviceImageMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceImageMemoryRequirements(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceImageMemoryRequirements(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetDeviceImageMemoryRequirements_reply(dec, device, pInfo, pMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetDeviceImageMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
static inline void vn_async_vkGetDeviceImageMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceImageMemoryRequirements(vn_instance, 0, device, pInfo, pMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceImageMemoryRequirements(vn_ring, 0, device, pInfo, pMemoryRequirements, &submit);
}
static inline void vn_call_vkGetDeviceImageSparseMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
static inline void vn_call_vkGetDeviceImageSparseMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceImageSparseMemoryRequirements(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceImageSparseMemoryRequirements(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetDeviceImageSparseMemoryRequirements_reply(dec, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetDeviceImageSparseMemoryRequirements(struct vn_instance *vn_instance, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
static inline void vn_async_vkGetDeviceImageSparseMemoryRequirements(struct vn_ring *vn_ring, VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetDeviceImageSparseMemoryRequirements(vn_instance, 0, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetDeviceImageSparseMemoryRequirements(vn_ring, 0, device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, &submit);
}
static inline VkResult vn_call_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_instance *vn_instance, VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties)
static inline VkResult vn_call_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_ring *vn_ring, VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pProperties, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, image, pProperties, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetImageDrmFormatModifierPropertiesEXT_reply(dec, device, image, pProperties);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_instance *vn_instance, VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties)
static inline void vn_async_vkGetImageDrmFormatModifierPropertiesEXT(struct vn_ring *vn_ring, VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(vn_instance, 0, device, image, pProperties, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetImageDrmFormatModifierPropertiesEXT(vn_ring, 0, device, image, pProperties, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_IMAGE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_IMAGE_VIEW_H
#define VN_PROTOCOL_DRIVER_IMAGE_VIEW_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkImageViewUsageCreateInfo chain */
@ -365,7 +365,7 @@ static inline void vn_decode_vkDestroyImageView_reply(struct vn_cs_decoder *dec,
/* skip pAllocator */
}
static inline void vn_submit_vkCreateImageView(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateImageView(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -377,16 +377,16 @@ static inline void vn_submit_vkCreateImageView(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateImageView_reply(device, pCreateInfo, pAllocator, pView) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateImageView(enc, cmd_flags, device, pCreateInfo, pAllocator, pView);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyImageView(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyImageView(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -398,54 +398,54 @@ static inline void vn_submit_vkDestroyImageView(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyImageView_reply(device, imageView, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyImageView(enc, cmd_flags, device, imageView, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateImageView(struct vn_instance *vn_instance, VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
static inline VkResult vn_call_vkCreateImageView(struct vn_ring *vn_ring, VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateImageView(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pView, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateImageView(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pView, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateImageView_reply(dec, device, pCreateInfo, pAllocator, pView);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateImageView(struct vn_instance *vn_instance, VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
static inline void vn_async_vkCreateImageView(struct vn_ring *vn_ring, VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateImageView(vn_instance, 0, device, pCreateInfo, pAllocator, pView, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateImageView(vn_ring, 0, device, pCreateInfo, pAllocator, pView, &submit);
}
static inline void vn_call_vkDestroyImageView(struct vn_instance *vn_instance, VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyImageView(struct vn_ring *vn_ring, VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyImageView(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, imageView, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyImageView(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, imageView, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyImageView_reply(dec, device, imageView, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyImageView(struct vn_instance *vn_instance, VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyImageView(struct vn_ring *vn_ring, VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyImageView(vn_instance, 0, device, imageView, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyImageView(vn_ring, 0, device, imageView, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_IMAGE_VIEW_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_INSTANCE_H
#define VN_PROTOCOL_DRIVER_INSTANCE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/*
@ -556,7 +556,7 @@ static inline VkResult vn_decode_vkEnumerateInstanceExtensionProperties_reply(st
return ret;
}
static inline void vn_submit_vkCreateInstance(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateInstance(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -568,16 +568,16 @@ static inline void vn_submit_vkCreateInstance(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateInstance_reply(pCreateInfo, pAllocator, pInstance) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateInstance(enc, cmd_flags, pCreateInfo, pAllocator, pInstance);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyInstance(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkInstance instance, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyInstance(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkInstance instance, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -589,16 +589,16 @@ static inline void vn_submit_vkDestroyInstance(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyInstance_reply(instance, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyInstance(enc, cmd_flags, instance, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkEnumerateInstanceVersion(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint32_t* pApiVersion, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkEnumerateInstanceVersion(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint32_t* pApiVersion, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -610,16 +610,16 @@ static inline void vn_submit_vkEnumerateInstanceVersion(struct vn_instance *vn_i
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkEnumerateInstanceVersion_reply(pApiVersion) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkEnumerateInstanceVersion(enc, cmd_flags, pApiVersion);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkEnumerateInstanceLayerProperties(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint32_t* pPropertyCount, VkLayerProperties* pProperties, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkEnumerateInstanceLayerProperties(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint32_t* pPropertyCount, VkLayerProperties* pProperties, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -631,16 +631,16 @@ static inline void vn_submit_vkEnumerateInstanceLayerProperties(struct vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkEnumerateInstanceLayerProperties_reply(pPropertyCount, pProperties) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkEnumerateInstanceLayerProperties(enc, cmd_flags, pPropertyCount, pProperties);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkEnumerateInstanceExtensionProperties(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkEnumerateInstanceExtensionProperties(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -652,120 +652,120 @@ static inline void vn_submit_vkEnumerateInstanceExtensionProperties(struct vn_in
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkEnumerateInstanceExtensionProperties_reply(pLayerName, pPropertyCount, pProperties) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkEnumerateInstanceExtensionProperties(enc, cmd_flags, pLayerName, pPropertyCount, pProperties);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateInstance(struct vn_instance *vn_instance, const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
static inline VkResult vn_call_vkCreateInstance(struct vn_ring *vn_ring, const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateInstance(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pCreateInfo, pAllocator, pInstance, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateInstance(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pCreateInfo, pAllocator, pInstance, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateInstance_reply(dec, pCreateInfo, pAllocator, pInstance);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateInstance(struct vn_instance *vn_instance, const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
static inline void vn_async_vkCreateInstance(struct vn_ring *vn_ring, const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateInstance(vn_instance, 0, pCreateInfo, pAllocator, pInstance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateInstance(vn_ring, 0, pCreateInfo, pAllocator, pInstance, &submit);
}
static inline void vn_call_vkDestroyInstance(struct vn_instance *vn_instance, VkInstance instance, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyInstance(struct vn_ring *vn_ring, VkInstance instance, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyInstance(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, instance, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyInstance(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, instance, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyInstance_reply(dec, instance, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyInstance(struct vn_instance *vn_instance, VkInstance instance, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyInstance(struct vn_ring *vn_ring, VkInstance instance, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyInstance(vn_instance, 0, instance, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyInstance(vn_ring, 0, instance, pAllocator, &submit);
}
static inline VkResult vn_call_vkEnumerateInstanceVersion(struct vn_instance *vn_instance, uint32_t* pApiVersion)
static inline VkResult vn_call_vkEnumerateInstanceVersion(struct vn_ring *vn_ring, uint32_t* pApiVersion)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkEnumerateInstanceVersion(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pApiVersion, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkEnumerateInstanceVersion(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pApiVersion, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkEnumerateInstanceVersion_reply(dec, pApiVersion);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkEnumerateInstanceVersion(struct vn_instance *vn_instance, uint32_t* pApiVersion)
static inline void vn_async_vkEnumerateInstanceVersion(struct vn_ring *vn_ring, uint32_t* pApiVersion)
{
struct vn_instance_submit_command submit;
vn_submit_vkEnumerateInstanceVersion(vn_instance, 0, pApiVersion, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkEnumerateInstanceVersion(vn_ring, 0, pApiVersion, &submit);
}
static inline VkResult vn_call_vkEnumerateInstanceLayerProperties(struct vn_instance *vn_instance, uint32_t* pPropertyCount, VkLayerProperties* pProperties)
static inline VkResult vn_call_vkEnumerateInstanceLayerProperties(struct vn_ring *vn_ring, uint32_t* pPropertyCount, VkLayerProperties* pProperties)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkEnumerateInstanceLayerProperties(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pPropertyCount, pProperties, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkEnumerateInstanceLayerProperties(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pPropertyCount, pProperties, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkEnumerateInstanceLayerProperties_reply(dec, pPropertyCount, pProperties);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkEnumerateInstanceLayerProperties(struct vn_instance *vn_instance, uint32_t* pPropertyCount, VkLayerProperties* pProperties)
static inline void vn_async_vkEnumerateInstanceLayerProperties(struct vn_ring *vn_ring, uint32_t* pPropertyCount, VkLayerProperties* pProperties)
{
struct vn_instance_submit_command submit;
vn_submit_vkEnumerateInstanceLayerProperties(vn_instance, 0, pPropertyCount, pProperties, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkEnumerateInstanceLayerProperties(vn_ring, 0, pPropertyCount, pProperties, &submit);
}
static inline VkResult vn_call_vkEnumerateInstanceExtensionProperties(struct vn_instance *vn_instance, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties)
static inline VkResult vn_call_vkEnumerateInstanceExtensionProperties(struct vn_ring *vn_ring, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkEnumerateInstanceExtensionProperties(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pLayerName, pPropertyCount, pProperties, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkEnumerateInstanceExtensionProperties(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pLayerName, pPropertyCount, pProperties, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkEnumerateInstanceExtensionProperties_reply(dec, pLayerName, pPropertyCount, pProperties);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkEnumerateInstanceExtensionProperties(struct vn_instance *vn_instance, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties)
static inline void vn_async_vkEnumerateInstanceExtensionProperties(struct vn_ring *vn_ring, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties)
{
struct vn_instance_submit_command submit;
vn_submit_vkEnumerateInstanceExtensionProperties(vn_instance, 0, pLayerName, pPropertyCount, pProperties, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkEnumerateInstanceExtensionProperties(vn_ring, 0, pLayerName, pPropertyCount, pProperties, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_INSTANCE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_PIPELINE_H
#define VN_PROTOCOL_DRIVER_PIPELINE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkSpecializationMapEntry */
@ -2629,7 +2629,7 @@ static inline void vn_decode_vkDestroyPipeline_reply(struct vn_cs_decoder *dec,
/* skip pAllocator */
}
static inline void vn_submit_vkCreateGraphicsPipelines(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateGraphicsPipelines(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2641,16 +2641,16 @@ static inline void vn_submit_vkCreateGraphicsPipelines(struct vn_instance *vn_in
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateGraphicsPipelines_reply(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateGraphicsPipelines(enc, cmd_flags, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkCreateComputePipelines(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateComputePipelines(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2662,16 +2662,16 @@ static inline void vn_submit_vkCreateComputePipelines(struct vn_instance *vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateComputePipelines_reply(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateComputePipelines(enc, cmd_flags, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyPipeline(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyPipeline(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -2683,76 +2683,76 @@ static inline void vn_submit_vkDestroyPipeline(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyPipeline_reply(device, pipeline, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyPipeline(enc, cmd_flags, device, pipeline, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateGraphicsPipelines(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
static inline VkResult vn_call_vkCreateGraphicsPipelines(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateGraphicsPipelines(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateGraphicsPipelines(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateGraphicsPipelines_reply(dec, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateGraphicsPipelines(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
static inline void vn_async_vkCreateGraphicsPipelines(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateGraphicsPipelines(vn_instance, 0, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateGraphicsPipelines(vn_ring, 0, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
}
static inline VkResult vn_call_vkCreateComputePipelines(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
static inline VkResult vn_call_vkCreateComputePipelines(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateComputePipelines(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateComputePipelines(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateComputePipelines_reply(dec, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateComputePipelines(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
static inline void vn_async_vkCreateComputePipelines(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateComputePipelines(vn_instance, 0, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateComputePipelines(vn_ring, 0, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, &submit);
}
static inline void vn_call_vkDestroyPipeline(struct vn_instance *vn_instance, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyPipeline(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPipeline(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipeline, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPipeline(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipeline, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyPipeline_reply(dec, device, pipeline, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyPipeline(struct vn_instance *vn_instance, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyPipeline(struct vn_ring *vn_ring, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPipeline(vn_instance, 0, device, pipeline, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPipeline(vn_ring, 0, device, pipeline, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_PIPELINE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_PIPELINE_CACHE_H
#define VN_PROTOCOL_DRIVER_PIPELINE_CACHE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkPipelineCacheCreateInfo chain */
@ -350,7 +350,7 @@ static inline VkResult vn_decode_vkMergePipelineCaches_reply(struct vn_cs_decode
return ret;
}
static inline void vn_submit_vkCreatePipelineCache(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreatePipelineCache(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -362,16 +362,16 @@ static inline void vn_submit_vkCreatePipelineCache(struct vn_instance *vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreatePipelineCache_reply(device, pCreateInfo, pAllocator, pPipelineCache) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreatePipelineCache(enc, cmd_flags, device, pCreateInfo, pAllocator, pPipelineCache);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyPipelineCache(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyPipelineCache(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -383,16 +383,16 @@ static inline void vn_submit_vkDestroyPipelineCache(struct vn_instance *vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyPipelineCache_reply(device, pipelineCache, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyPipelineCache(enc, cmd_flags, device, pipelineCache, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetPipelineCacheData(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetPipelineCacheData(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -404,16 +404,16 @@ static inline void vn_submit_vkGetPipelineCacheData(struct vn_instance *vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetPipelineCacheData_reply(device, pipelineCache, pDataSize, pData) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetPipelineCacheData(enc, cmd_flags, device, pipelineCache, pDataSize, pData);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkMergePipelineCaches(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkMergePipelineCaches(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -425,98 +425,98 @@ static inline void vn_submit_vkMergePipelineCaches(struct vn_instance *vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkMergePipelineCaches_reply(device, dstCache, srcCacheCount, pSrcCaches) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkMergePipelineCaches(enc, cmd_flags, device, dstCache, srcCacheCount, pSrcCaches);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreatePipelineCache(struct vn_instance *vn_instance, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache)
static inline VkResult vn_call_vkCreatePipelineCache(struct vn_ring *vn_ring, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreatePipelineCache(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pPipelineCache, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreatePipelineCache(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pPipelineCache, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreatePipelineCache_reply(dec, device, pCreateInfo, pAllocator, pPipelineCache);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreatePipelineCache(struct vn_instance *vn_instance, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache)
static inline void vn_async_vkCreatePipelineCache(struct vn_ring *vn_ring, VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreatePipelineCache(vn_instance, 0, device, pCreateInfo, pAllocator, pPipelineCache, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreatePipelineCache(vn_ring, 0, device, pCreateInfo, pAllocator, pPipelineCache, &submit);
}
static inline void vn_call_vkDestroyPipelineCache(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyPipelineCache(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPipelineCache(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPipelineCache(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyPipelineCache_reply(dec, device, pipelineCache, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyPipelineCache(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyPipelineCache(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPipelineCache(vn_instance, 0, device, pipelineCache, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPipelineCache(vn_ring, 0, device, pipelineCache, pAllocator, &submit);
}
static inline VkResult vn_call_vkGetPipelineCacheData(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData)
static inline VkResult vn_call_vkGetPipelineCacheData(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetPipelineCacheData(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, pDataSize, pData, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetPipelineCacheData(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineCache, pDataSize, pData, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetPipelineCacheData_reply(dec, device, pipelineCache, pDataSize, pData);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetPipelineCacheData(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData)
static inline void vn_async_vkGetPipelineCacheData(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetPipelineCacheData(vn_instance, 0, device, pipelineCache, pDataSize, pData, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetPipelineCacheData(vn_ring, 0, device, pipelineCache, pDataSize, pData, &submit);
}
static inline VkResult vn_call_vkMergePipelineCaches(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
static inline VkResult vn_call_vkMergePipelineCaches(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkMergePipelineCaches(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, dstCache, srcCacheCount, pSrcCaches, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkMergePipelineCaches(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, dstCache, srcCacheCount, pSrcCaches, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkMergePipelineCaches_reply(dec, device, dstCache, srcCacheCount, pSrcCaches);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkMergePipelineCaches(struct vn_instance *vn_instance, VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
static inline void vn_async_vkMergePipelineCaches(struct vn_ring *vn_ring, VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches)
{
struct vn_instance_submit_command submit;
vn_submit_vkMergePipelineCaches(vn_instance, 0, device, dstCache, srcCacheCount, pSrcCaches, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkMergePipelineCaches(vn_ring, 0, device, dstCache, srcCacheCount, pSrcCaches, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_PIPELINE_CACHE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_PIPELINE_LAYOUT_H
#define VN_PROTOCOL_DRIVER_PIPELINE_LAYOUT_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkPushConstantRange */
@ -240,7 +240,7 @@ static inline void vn_decode_vkDestroyPipelineLayout_reply(struct vn_cs_decoder
/* skip pAllocator */
}
static inline void vn_submit_vkCreatePipelineLayout(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreatePipelineLayout(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -252,16 +252,16 @@ static inline void vn_submit_vkCreatePipelineLayout(struct vn_instance *vn_insta
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreatePipelineLayout_reply(device, pCreateInfo, pAllocator, pPipelineLayout) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreatePipelineLayout(enc, cmd_flags, device, pCreateInfo, pAllocator, pPipelineLayout);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyPipelineLayout(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyPipelineLayout(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -273,54 +273,54 @@ static inline void vn_submit_vkDestroyPipelineLayout(struct vn_instance *vn_inst
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyPipelineLayout_reply(device, pipelineLayout, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyPipelineLayout(enc, cmd_flags, device, pipelineLayout, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreatePipelineLayout(struct vn_instance *vn_instance, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout)
static inline VkResult vn_call_vkCreatePipelineLayout(struct vn_ring *vn_ring, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreatePipelineLayout(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pPipelineLayout, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreatePipelineLayout(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pPipelineLayout, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreatePipelineLayout_reply(dec, device, pCreateInfo, pAllocator, pPipelineLayout);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreatePipelineLayout(struct vn_instance *vn_instance, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout)
static inline void vn_async_vkCreatePipelineLayout(struct vn_ring *vn_ring, VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreatePipelineLayout(vn_instance, 0, device, pCreateInfo, pAllocator, pPipelineLayout, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreatePipelineLayout(vn_ring, 0, device, pCreateInfo, pAllocator, pPipelineLayout, &submit);
}
static inline void vn_call_vkDestroyPipelineLayout(struct vn_instance *vn_instance, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyPipelineLayout(struct vn_ring *vn_ring, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPipelineLayout(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineLayout, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPipelineLayout(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pipelineLayout, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyPipelineLayout_reply(dec, device, pipelineLayout, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyPipelineLayout(struct vn_instance *vn_instance, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyPipelineLayout(struct vn_ring *vn_ring, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPipelineLayout(vn_instance, 0, device, pipelineLayout, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPipelineLayout(vn_ring, 0, device, pipelineLayout, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_PIPELINE_LAYOUT_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_PRIVATE_DATA_SLOT_H
#define VN_PROTOCOL_DRIVER_PRIVATE_DATA_SLOT_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkPrivateDataSlotCreateInfo chain */
@ -312,7 +312,7 @@ static inline void vn_decode_vkGetPrivateData_reply(struct vn_cs_decoder *dec, V
}
}
static inline void vn_submit_vkCreatePrivateDataSlot(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreatePrivateDataSlot(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -324,16 +324,16 @@ static inline void vn_submit_vkCreatePrivateDataSlot(struct vn_instance *vn_inst
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreatePrivateDataSlot_reply(device, pCreateInfo, pAllocator, pPrivateDataSlot) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreatePrivateDataSlot(enc, cmd_flags, device, pCreateInfo, pAllocator, pPrivateDataSlot);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyPrivateDataSlot(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyPrivateDataSlot(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -345,16 +345,16 @@ static inline void vn_submit_vkDestroyPrivateDataSlot(struct vn_instance *vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyPrivateDataSlot_reply(device, privateDataSlot, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyPrivateDataSlot(enc, cmd_flags, device, privateDataSlot, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkSetPrivateData(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkSetPrivateData(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -366,16 +366,16 @@ static inline void vn_submit_vkSetPrivateData(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkSetPrivateData_reply(device, objectType, objectHandle, privateDataSlot, data) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkSetPrivateData(enc, cmd_flags, device, objectType, objectHandle, privateDataSlot, data);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetPrivateData(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetPrivateData(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -387,95 +387,95 @@ static inline void vn_submit_vkGetPrivateData(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetPrivateData_reply(device, objectType, objectHandle, privateDataSlot, pData) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetPrivateData(enc, cmd_flags, device, objectType, objectHandle, privateDataSlot, pData);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreatePrivateDataSlot(struct vn_instance *vn_instance, VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot)
static inline VkResult vn_call_vkCreatePrivateDataSlot(struct vn_ring *vn_ring, VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreatePrivateDataSlot(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pPrivateDataSlot, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreatePrivateDataSlot(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pPrivateDataSlot, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreatePrivateDataSlot_reply(dec, device, pCreateInfo, pAllocator, pPrivateDataSlot);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreatePrivateDataSlot(struct vn_instance *vn_instance, VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot)
static inline void vn_async_vkCreatePrivateDataSlot(struct vn_ring *vn_ring, VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreatePrivateDataSlot(vn_instance, 0, device, pCreateInfo, pAllocator, pPrivateDataSlot, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreatePrivateDataSlot(vn_ring, 0, device, pCreateInfo, pAllocator, pPrivateDataSlot, &submit);
}
static inline void vn_call_vkDestroyPrivateDataSlot(struct vn_instance *vn_instance, VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyPrivateDataSlot(struct vn_ring *vn_ring, VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPrivateDataSlot(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, privateDataSlot, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPrivateDataSlot(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, privateDataSlot, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyPrivateDataSlot_reply(dec, device, privateDataSlot, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyPrivateDataSlot(struct vn_instance *vn_instance, VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyPrivateDataSlot(struct vn_ring *vn_ring, VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyPrivateDataSlot(vn_instance, 0, device, privateDataSlot, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyPrivateDataSlot(vn_ring, 0, device, privateDataSlot, pAllocator, &submit);
}
static inline VkResult vn_call_vkSetPrivateData(struct vn_instance *vn_instance, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data)
static inline VkResult vn_call_vkSetPrivateData(struct vn_ring *vn_ring, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkSetPrivateData(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, objectType, objectHandle, privateDataSlot, data, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSetPrivateData(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, objectType, objectHandle, privateDataSlot, data, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkSetPrivateData_reply(dec, device, objectType, objectHandle, privateDataSlot, data);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkSetPrivateData(struct vn_instance *vn_instance, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data)
static inline void vn_async_vkSetPrivateData(struct vn_ring *vn_ring, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data)
{
struct vn_instance_submit_command submit;
vn_submit_vkSetPrivateData(vn_instance, 0, device, objectType, objectHandle, privateDataSlot, data, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSetPrivateData(vn_ring, 0, device, objectType, objectHandle, privateDataSlot, data, &submit);
}
static inline void vn_call_vkGetPrivateData(struct vn_instance *vn_instance, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData)
static inline void vn_call_vkGetPrivateData(struct vn_ring *vn_ring, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetPrivateData(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, objectType, objectHandle, privateDataSlot, pData, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetPrivateData(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, objectType, objectHandle, privateDataSlot, pData, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetPrivateData_reply(dec, device, objectType, objectHandle, privateDataSlot, pData);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetPrivateData(struct vn_instance *vn_instance, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData)
static inline void vn_async_vkGetPrivateData(struct vn_ring *vn_ring, VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetPrivateData(vn_instance, 0, device, objectType, objectHandle, privateDataSlot, pData, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetPrivateData(vn_ring, 0, device, objectType, objectHandle, privateDataSlot, pData, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_PRIVATE_DATA_SLOT_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_QUERY_POOL_H
#define VN_PROTOCOL_DRIVER_QUERY_POOL_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkQueryPoolCreateInfo chain */
@ -331,7 +331,7 @@ static inline void vn_decode_vkResetQueryPool_reply(struct vn_cs_decoder *dec, V
/* skip queryCount */
}
static inline void vn_submit_vkCreateQueryPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateQueryPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -343,16 +343,16 @@ static inline void vn_submit_vkCreateQueryPool(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateQueryPool_reply(device, pCreateInfo, pAllocator, pQueryPool) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateQueryPool(enc, cmd_flags, device, pCreateInfo, pAllocator, pQueryPool);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyQueryPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyQueryPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -364,16 +364,16 @@ static inline void vn_submit_vkDestroyQueryPool(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyQueryPool_reply(device, queryPool, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyQueryPool(enc, cmd_flags, device, queryPool, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetQueryPoolResults(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetQueryPoolResults(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -385,16 +385,16 @@ static inline void vn_submit_vkGetQueryPoolResults(struct vn_instance *vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetQueryPoolResults_reply(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetQueryPoolResults(enc, cmd_flags, device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkResetQueryPool(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkResetQueryPool(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -406,95 +406,95 @@ static inline void vn_submit_vkResetQueryPool(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkResetQueryPool_reply(device, queryPool, firstQuery, queryCount) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkResetQueryPool(enc, cmd_flags, device, queryPool, firstQuery, queryCount);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateQueryPool(struct vn_instance *vn_instance, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool)
static inline VkResult vn_call_vkCreateQueryPool(struct vn_ring *vn_ring, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateQueryPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pQueryPool, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateQueryPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pQueryPool, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateQueryPool_reply(dec, device, pCreateInfo, pAllocator, pQueryPool);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateQueryPool(struct vn_instance *vn_instance, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool)
static inline void vn_async_vkCreateQueryPool(struct vn_ring *vn_ring, VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateQueryPool(vn_instance, 0, device, pCreateInfo, pAllocator, pQueryPool, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateQueryPool(vn_ring, 0, device, pCreateInfo, pAllocator, pQueryPool, &submit);
}
static inline void vn_call_vkDestroyQueryPool(struct vn_instance *vn_instance, VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyQueryPool(struct vn_ring *vn_ring, VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyQueryPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, queryPool, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyQueryPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, queryPool, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyQueryPool_reply(dec, device, queryPool, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyQueryPool(struct vn_instance *vn_instance, VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyQueryPool(struct vn_ring *vn_ring, VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyQueryPool(vn_instance, 0, device, queryPool, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyQueryPool(vn_ring, 0, device, queryPool, pAllocator, &submit);
}
static inline VkResult vn_call_vkGetQueryPoolResults(struct vn_instance *vn_instance, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags)
static inline VkResult vn_call_vkGetQueryPoolResults(struct vn_ring *vn_ring, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetQueryPoolResults(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetQueryPoolResults(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetQueryPoolResults_reply(dec, device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetQueryPoolResults(struct vn_instance *vn_instance, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags)
static inline void vn_async_vkGetQueryPoolResults(struct vn_ring *vn_ring, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetQueryPoolResults(vn_instance, 0, device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetQueryPoolResults(vn_ring, 0, device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, &submit);
}
static inline void vn_call_vkResetQueryPool(struct vn_instance *vn_instance, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
static inline void vn_call_vkResetQueryPool(struct vn_ring *vn_ring, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkResetQueryPool(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, queryPool, firstQuery, queryCount, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetQueryPool(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, queryPool, firstQuery, queryCount, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkResetQueryPool_reply(dec, device, queryPool, firstQuery, queryCount);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkResetQueryPool(struct vn_instance *vn_instance, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
static inline void vn_async_vkResetQueryPool(struct vn_ring *vn_ring, VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
{
struct vn_instance_submit_command submit;
vn_submit_vkResetQueryPool(vn_instance, 0, device, queryPool, firstQuery, queryCount, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkResetQueryPool(vn_ring, 0, device, queryPool, firstQuery, queryCount, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_QUERY_POOL_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_QUEUE_H
#define VN_PROTOCOL_DRIVER_QUEUE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkDeviceGroupSubmitInfo chain */
@ -1254,7 +1254,7 @@ static inline VkResult vn_decode_vkQueueSubmit2_reply(struct vn_cs_decoder *dec,
return ret;
}
static inline void vn_submit_vkQueueSubmit(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkQueueSubmit(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1266,16 +1266,16 @@ static inline void vn_submit_vkQueueSubmit(struct vn_instance *vn_instance, VkCo
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkQueueSubmit_reply(queue, submitCount, pSubmits, fence) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkQueueSubmit(enc, cmd_flags, queue, submitCount, pSubmits, fence);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkQueueWaitIdle(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkQueue queue, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkQueueWaitIdle(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkQueue queue, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1287,16 +1287,16 @@ static inline void vn_submit_vkQueueWaitIdle(struct vn_instance *vn_instance, Vk
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkQueueWaitIdle_reply(queue) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkQueueWaitIdle(enc, cmd_flags, queue);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkQueueBindSparse(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkQueueBindSparse(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1308,16 +1308,16 @@ static inline void vn_submit_vkQueueBindSparse(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkQueueBindSparse_reply(queue, bindInfoCount, pBindInfo, fence) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkQueueBindSparse(enc, cmd_flags, queue, bindInfoCount, pBindInfo, fence);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkQueueSubmit2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkQueueSubmit2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1329,101 +1329,101 @@ static inline void vn_submit_vkQueueSubmit2(struct vn_instance *vn_instance, VkC
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkQueueSubmit2_reply(queue, submitCount, pSubmits, fence) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkQueueSubmit2(enc, cmd_flags, queue, submitCount, pSubmits, fence);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkQueueSubmit(struct vn_instance *vn_instance, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
static inline VkResult vn_call_vkQueueSubmit(struct vn_ring *vn_ring, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkQueueSubmit(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, submitCount, pSubmits, fence, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueSubmit(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, submitCount, pSubmits, fence, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkQueueSubmit_reply(dec, queue, submitCount, pSubmits, fence);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkQueueSubmit(struct vn_instance *vn_instance, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
static inline void vn_async_vkQueueSubmit(struct vn_ring *vn_ring, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
{
struct vn_instance_submit_command submit;
vn_submit_vkQueueSubmit(vn_instance, 0, queue, submitCount, pSubmits, fence, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueSubmit(vn_ring, 0, queue, submitCount, pSubmits, fence, &submit);
}
static inline VkResult vn_call_vkQueueWaitIdle(struct vn_instance *vn_instance, VkQueue queue)
static inline VkResult vn_call_vkQueueWaitIdle(struct vn_ring *vn_ring, VkQueue queue)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkQueueWaitIdle(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueWaitIdle(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkQueueWaitIdle_reply(dec, queue);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkQueueWaitIdle(struct vn_instance *vn_instance, VkQueue queue)
static inline void vn_async_vkQueueWaitIdle(struct vn_ring *vn_ring, VkQueue queue)
{
struct vn_instance_submit_command submit;
vn_submit_vkQueueWaitIdle(vn_instance, 0, queue, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueWaitIdle(vn_ring, 0, queue, &submit);
}
static inline VkResult vn_call_vkQueueBindSparse(struct vn_instance *vn_instance, VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
static inline VkResult vn_call_vkQueueBindSparse(struct vn_ring *vn_ring, VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkQueueBindSparse(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, bindInfoCount, pBindInfo, fence, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueBindSparse(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, bindInfoCount, pBindInfo, fence, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkQueueBindSparse_reply(dec, queue, bindInfoCount, pBindInfo, fence);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkQueueBindSparse(struct vn_instance *vn_instance, VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
static inline void vn_async_vkQueueBindSparse(struct vn_ring *vn_ring, VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
{
struct vn_instance_submit_command submit;
vn_submit_vkQueueBindSparse(vn_instance, 0, queue, bindInfoCount, pBindInfo, fence, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueBindSparse(vn_ring, 0, queue, bindInfoCount, pBindInfo, fence, &submit);
}
static inline VkResult vn_call_vkQueueSubmit2(struct vn_instance *vn_instance, VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence)
static inline VkResult vn_call_vkQueueSubmit2(struct vn_ring *vn_ring, VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkQueueSubmit2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, submitCount, pSubmits, fence, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueSubmit2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, queue, submitCount, pSubmits, fence, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkQueueSubmit2_reply(dec, queue, submitCount, pSubmits, fence);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkQueueSubmit2(struct vn_instance *vn_instance, VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence)
static inline void vn_async_vkQueueSubmit2(struct vn_ring *vn_ring, VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence)
{
struct vn_instance_submit_command submit;
vn_submit_vkQueueSubmit2(vn_instance, 0, queue, submitCount, pSubmits, fence, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkQueueSubmit2(vn_ring, 0, queue, submitCount, pSubmits, fence, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_QUEUE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_RENDER_PASS_H
#define VN_PROTOCOL_DRIVER_RENDER_PASS_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkAttachmentDescription */
@ -1486,7 +1486,7 @@ static inline VkResult vn_decode_vkCreateRenderPass2_reply(struct vn_cs_decoder
return ret;
}
static inline void vn_submit_vkCreateRenderPass(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateRenderPass(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1498,16 +1498,16 @@ static inline void vn_submit_vkCreateRenderPass(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateRenderPass_reply(device, pCreateInfo, pAllocator, pRenderPass) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateRenderPass(enc, cmd_flags, device, pCreateInfo, pAllocator, pRenderPass);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyRenderPass(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyRenderPass(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1519,16 +1519,16 @@ static inline void vn_submit_vkDestroyRenderPass(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyRenderPass_reply(device, renderPass, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyRenderPass(enc, cmd_flags, device, renderPass, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetRenderAreaGranularity(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetRenderAreaGranularity(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1540,16 +1540,16 @@ static inline void vn_submit_vkGetRenderAreaGranularity(struct vn_instance *vn_i
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetRenderAreaGranularity_reply(device, renderPass, pGranularity) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetRenderAreaGranularity(enc, cmd_flags, device, renderPass, pGranularity);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkCreateRenderPass2(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateRenderPass2(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -1561,95 +1561,95 @@ static inline void vn_submit_vkCreateRenderPass2(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateRenderPass2_reply(device, pCreateInfo, pAllocator, pRenderPass) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateRenderPass2(enc, cmd_flags, device, pCreateInfo, pAllocator, pRenderPass);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateRenderPass(struct vn_instance *vn_instance, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
static inline VkResult vn_call_vkCreateRenderPass(struct vn_ring *vn_ring, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateRenderPass(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pRenderPass, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateRenderPass(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pRenderPass, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateRenderPass_reply(dec, device, pCreateInfo, pAllocator, pRenderPass);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateRenderPass(struct vn_instance *vn_instance, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
static inline void vn_async_vkCreateRenderPass(struct vn_ring *vn_ring, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateRenderPass(vn_instance, 0, device, pCreateInfo, pAllocator, pRenderPass, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateRenderPass(vn_ring, 0, device, pCreateInfo, pAllocator, pRenderPass, &submit);
}
static inline void vn_call_vkDestroyRenderPass(struct vn_instance *vn_instance, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyRenderPass(struct vn_ring *vn_ring, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyRenderPass(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, renderPass, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyRenderPass(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, renderPass, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyRenderPass_reply(dec, device, renderPass, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyRenderPass(struct vn_instance *vn_instance, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyRenderPass(struct vn_ring *vn_ring, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyRenderPass(vn_instance, 0, device, renderPass, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyRenderPass(vn_ring, 0, device, renderPass, pAllocator, &submit);
}
static inline void vn_call_vkGetRenderAreaGranularity(struct vn_instance *vn_instance, VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
static inline void vn_call_vkGetRenderAreaGranularity(struct vn_ring *vn_ring, VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetRenderAreaGranularity(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, renderPass, pGranularity, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetRenderAreaGranularity(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, renderPass, pGranularity, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkGetRenderAreaGranularity_reply(dec, device, renderPass, pGranularity);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkGetRenderAreaGranularity(struct vn_instance *vn_instance, VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
static inline void vn_async_vkGetRenderAreaGranularity(struct vn_ring *vn_ring, VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetRenderAreaGranularity(vn_instance, 0, device, renderPass, pGranularity, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetRenderAreaGranularity(vn_ring, 0, device, renderPass, pGranularity, &submit);
}
static inline VkResult vn_call_vkCreateRenderPass2(struct vn_instance *vn_instance, VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
static inline VkResult vn_call_vkCreateRenderPass2(struct vn_ring *vn_ring, VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateRenderPass2(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pRenderPass, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateRenderPass2(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pRenderPass, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateRenderPass2_reply(dec, device, pCreateInfo, pAllocator, pRenderPass);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateRenderPass2(struct vn_instance *vn_instance, VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
static inline void vn_async_vkCreateRenderPass2(struct vn_ring *vn_ring, VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateRenderPass2(vn_instance, 0, device, pCreateInfo, pAllocator, pRenderPass, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateRenderPass2(vn_ring, 0, device, pCreateInfo, pAllocator, pRenderPass, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_RENDER_PASS_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_SAMPLER_H
#define VN_PROTOCOL_DRIVER_SAMPLER_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkSamplerReductionModeCreateInfo chain */
@ -458,7 +458,7 @@ static inline void vn_decode_vkDestroySampler_reply(struct vn_cs_decoder *dec, V
/* skip pAllocator */
}
static inline void vn_submit_vkCreateSampler(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateSampler(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -470,16 +470,16 @@ static inline void vn_submit_vkCreateSampler(struct vn_instance *vn_instance, Vk
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateSampler_reply(device, pCreateInfo, pAllocator, pSampler) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateSampler(enc, cmd_flags, device, pCreateInfo, pAllocator, pSampler);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroySampler(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroySampler(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -491,54 +491,54 @@ static inline void vn_submit_vkDestroySampler(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroySampler_reply(device, sampler, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroySampler(enc, cmd_flags, device, sampler, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateSampler(struct vn_instance *vn_instance, VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler)
static inline VkResult vn_call_vkCreateSampler(struct vn_ring *vn_ring, VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateSampler(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pSampler, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateSampler(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pSampler, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateSampler_reply(dec, device, pCreateInfo, pAllocator, pSampler);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateSampler(struct vn_instance *vn_instance, VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler)
static inline void vn_async_vkCreateSampler(struct vn_ring *vn_ring, VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateSampler(vn_instance, 0, device, pCreateInfo, pAllocator, pSampler, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateSampler(vn_ring, 0, device, pCreateInfo, pAllocator, pSampler, &submit);
}
static inline void vn_call_vkDestroySampler(struct vn_instance *vn_instance, VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroySampler(struct vn_ring *vn_ring, VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroySampler(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, sampler, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroySampler(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, sampler, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroySampler_reply(dec, device, sampler, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroySampler(struct vn_instance *vn_instance, VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroySampler(struct vn_ring *vn_ring, VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroySampler(vn_instance, 0, device, sampler, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroySampler(vn_ring, 0, device, sampler, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_SAMPLER_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_SAMPLER_YCBCR_CONVERSION_H
#define VN_PROTOCOL_DRIVER_SAMPLER_YCBCR_CONVERSION_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkSamplerYcbcrConversionCreateInfo chain */
@ -202,7 +202,7 @@ static inline void vn_decode_vkDestroySamplerYcbcrConversion_reply(struct vn_cs_
/* skip pAllocator */
}
static inline void vn_submit_vkCreateSamplerYcbcrConversion(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateSamplerYcbcrConversion(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -214,16 +214,16 @@ static inline void vn_submit_vkCreateSamplerYcbcrConversion(struct vn_instance *
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateSamplerYcbcrConversion_reply(device, pCreateInfo, pAllocator, pYcbcrConversion) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateSamplerYcbcrConversion(enc, cmd_flags, device, pCreateInfo, pAllocator, pYcbcrConversion);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroySamplerYcbcrConversion(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroySamplerYcbcrConversion(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -235,54 +235,54 @@ static inline void vn_submit_vkDestroySamplerYcbcrConversion(struct vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroySamplerYcbcrConversion_reply(device, ycbcrConversion, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroySamplerYcbcrConversion(enc, cmd_flags, device, ycbcrConversion, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateSamplerYcbcrConversion(struct vn_instance *vn_instance, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion)
static inline VkResult vn_call_vkCreateSamplerYcbcrConversion(struct vn_ring *vn_ring, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateSamplerYcbcrConversion(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pYcbcrConversion, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateSamplerYcbcrConversion(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pYcbcrConversion, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateSamplerYcbcrConversion_reply(dec, device, pCreateInfo, pAllocator, pYcbcrConversion);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateSamplerYcbcrConversion(struct vn_instance *vn_instance, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion)
static inline void vn_async_vkCreateSamplerYcbcrConversion(struct vn_ring *vn_ring, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateSamplerYcbcrConversion(vn_instance, 0, device, pCreateInfo, pAllocator, pYcbcrConversion, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateSamplerYcbcrConversion(vn_ring, 0, device, pCreateInfo, pAllocator, pYcbcrConversion, &submit);
}
static inline void vn_call_vkDestroySamplerYcbcrConversion(struct vn_instance *vn_instance, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroySamplerYcbcrConversion(struct vn_ring *vn_ring, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroySamplerYcbcrConversion(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, ycbcrConversion, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroySamplerYcbcrConversion(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, ycbcrConversion, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroySamplerYcbcrConversion_reply(dec, device, ycbcrConversion, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroySamplerYcbcrConversion(struct vn_instance *vn_instance, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroySamplerYcbcrConversion(struct vn_ring *vn_ring, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroySamplerYcbcrConversion(vn_instance, 0, device, ycbcrConversion, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroySamplerYcbcrConversion(vn_ring, 0, device, ycbcrConversion, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_SAMPLER_YCBCR_CONVERSION_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_SEMAPHORE_H
#define VN_PROTOCOL_DRIVER_SEMAPHORE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/*
@ -745,7 +745,7 @@ static inline void vn_decode_vkImportSemaphoreResourceMESA_reply(struct vn_cs_de
/* skip pImportSemaphoreResourceInfo */
}
static inline void vn_submit_vkCreateSemaphore(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateSemaphore(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -757,16 +757,16 @@ static inline void vn_submit_vkCreateSemaphore(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateSemaphore_reply(device, pCreateInfo, pAllocator, pSemaphore) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateSemaphore(enc, cmd_flags, device, pCreateInfo, pAllocator, pSemaphore);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroySemaphore(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroySemaphore(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -778,16 +778,16 @@ static inline void vn_submit_vkDestroySemaphore(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroySemaphore_reply(device, semaphore, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroySemaphore(enc, cmd_flags, device, semaphore, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkGetSemaphoreCounterValue(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSemaphore semaphore, uint64_t* pValue, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkGetSemaphoreCounterValue(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSemaphore semaphore, uint64_t* pValue, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -799,16 +799,16 @@ static inline void vn_submit_vkGetSemaphoreCounterValue(struct vn_instance *vn_i
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkGetSemaphoreCounterValue_reply(device, semaphore, pValue) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkGetSemaphoreCounterValue(enc, cmd_flags, device, semaphore, pValue);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkWaitSemaphores(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkWaitSemaphores(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -820,16 +820,16 @@ static inline void vn_submit_vkWaitSemaphores(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkWaitSemaphores_reply(device, pWaitInfo, timeout) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkWaitSemaphores(enc, cmd_flags, device, pWaitInfo, timeout);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkSignalSemaphore(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkSignalSemaphore(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -841,16 +841,16 @@ static inline void vn_submit_vkSignalSemaphore(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkSignalSemaphore_reply(device, pSignalInfo) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkSignalSemaphore(enc, cmd_flags, device, pSignalInfo);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkWaitSemaphoreResourceMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSemaphore semaphore, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkWaitSemaphoreResourceMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkSemaphore semaphore, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -862,16 +862,16 @@ static inline void vn_submit_vkWaitSemaphoreResourceMESA(struct vn_instance *vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkWaitSemaphoreResourceMESA_reply(device, semaphore) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkWaitSemaphoreResourceMESA(enc, cmd_flags, device, semaphore);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkImportSemaphoreResourceMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImportSemaphoreResourceInfoMESA* pImportSemaphoreResourceInfo, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkImportSemaphoreResourceMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkImportSemaphoreResourceInfoMESA* pImportSemaphoreResourceInfo, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -883,158 +883,158 @@ static inline void vn_submit_vkImportSemaphoreResourceMESA(struct vn_instance *v
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkImportSemaphoreResourceMESA_reply(device, pImportSemaphoreResourceInfo) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkImportSemaphoreResourceMESA(enc, cmd_flags, device, pImportSemaphoreResourceInfo);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateSemaphore(struct vn_instance *vn_instance, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore)
static inline VkResult vn_call_vkCreateSemaphore(struct vn_ring *vn_ring, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateSemaphore(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pSemaphore, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateSemaphore(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pSemaphore, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateSemaphore_reply(dec, device, pCreateInfo, pAllocator, pSemaphore);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateSemaphore(struct vn_instance *vn_instance, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore)
static inline void vn_async_vkCreateSemaphore(struct vn_ring *vn_ring, VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateSemaphore(vn_instance, 0, device, pCreateInfo, pAllocator, pSemaphore, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateSemaphore(vn_ring, 0, device, pCreateInfo, pAllocator, pSemaphore, &submit);
}
static inline void vn_call_vkDestroySemaphore(struct vn_instance *vn_instance, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroySemaphore(struct vn_ring *vn_ring, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroySemaphore(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, semaphore, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroySemaphore(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, semaphore, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroySemaphore_reply(dec, device, semaphore, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroySemaphore(struct vn_instance *vn_instance, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroySemaphore(struct vn_ring *vn_ring, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroySemaphore(vn_instance, 0, device, semaphore, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroySemaphore(vn_ring, 0, device, semaphore, pAllocator, &submit);
}
static inline VkResult vn_call_vkGetSemaphoreCounterValue(struct vn_instance *vn_instance, VkDevice device, VkSemaphore semaphore, uint64_t* pValue)
static inline VkResult vn_call_vkGetSemaphoreCounterValue(struct vn_ring *vn_ring, VkDevice device, VkSemaphore semaphore, uint64_t* pValue)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkGetSemaphoreCounterValue(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, semaphore, pValue, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetSemaphoreCounterValue(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, semaphore, pValue, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkGetSemaphoreCounterValue_reply(dec, device, semaphore, pValue);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkGetSemaphoreCounterValue(struct vn_instance *vn_instance, VkDevice device, VkSemaphore semaphore, uint64_t* pValue)
static inline void vn_async_vkGetSemaphoreCounterValue(struct vn_ring *vn_ring, VkDevice device, VkSemaphore semaphore, uint64_t* pValue)
{
struct vn_instance_submit_command submit;
vn_submit_vkGetSemaphoreCounterValue(vn_instance, 0, device, semaphore, pValue, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkGetSemaphoreCounterValue(vn_ring, 0, device, semaphore, pValue, &submit);
}
static inline VkResult vn_call_vkWaitSemaphores(struct vn_instance *vn_instance, VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout)
static inline VkResult vn_call_vkWaitSemaphores(struct vn_ring *vn_ring, VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkWaitSemaphores(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pWaitInfo, timeout, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitSemaphores(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pWaitInfo, timeout, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkWaitSemaphores_reply(dec, device, pWaitInfo, timeout);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkWaitSemaphores(struct vn_instance *vn_instance, VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout)
static inline void vn_async_vkWaitSemaphores(struct vn_ring *vn_ring, VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout)
{
struct vn_instance_submit_command submit;
vn_submit_vkWaitSemaphores(vn_instance, 0, device, pWaitInfo, timeout, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitSemaphores(vn_ring, 0, device, pWaitInfo, timeout, &submit);
}
static inline VkResult vn_call_vkSignalSemaphore(struct vn_instance *vn_instance, VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo)
static inline VkResult vn_call_vkSignalSemaphore(struct vn_ring *vn_ring, VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkSignalSemaphore(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pSignalInfo, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSignalSemaphore(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pSignalInfo, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkSignalSemaphore_reply(dec, device, pSignalInfo);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkSignalSemaphore(struct vn_instance *vn_instance, VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo)
static inline void vn_async_vkSignalSemaphore(struct vn_ring *vn_ring, VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo)
{
struct vn_instance_submit_command submit;
vn_submit_vkSignalSemaphore(vn_instance, 0, device, pSignalInfo, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSignalSemaphore(vn_ring, 0, device, pSignalInfo, &submit);
}
static inline void vn_call_vkWaitSemaphoreResourceMESA(struct vn_instance *vn_instance, VkDevice device, VkSemaphore semaphore)
static inline void vn_call_vkWaitSemaphoreResourceMESA(struct vn_ring *vn_ring, VkDevice device, VkSemaphore semaphore)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkWaitSemaphoreResourceMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, semaphore, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitSemaphoreResourceMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, semaphore, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkWaitSemaphoreResourceMESA_reply(dec, device, semaphore);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkWaitSemaphoreResourceMESA(struct vn_instance *vn_instance, VkDevice device, VkSemaphore semaphore)
static inline void vn_async_vkWaitSemaphoreResourceMESA(struct vn_ring *vn_ring, VkDevice device, VkSemaphore semaphore)
{
struct vn_instance_submit_command submit;
vn_submit_vkWaitSemaphoreResourceMESA(vn_instance, 0, device, semaphore, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitSemaphoreResourceMESA(vn_ring, 0, device, semaphore, &submit);
}
static inline void vn_call_vkImportSemaphoreResourceMESA(struct vn_instance *vn_instance, VkDevice device, const VkImportSemaphoreResourceInfoMESA* pImportSemaphoreResourceInfo)
static inline void vn_call_vkImportSemaphoreResourceMESA(struct vn_ring *vn_ring, VkDevice device, const VkImportSemaphoreResourceInfoMESA* pImportSemaphoreResourceInfo)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkImportSemaphoreResourceMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pImportSemaphoreResourceInfo, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkImportSemaphoreResourceMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pImportSemaphoreResourceInfo, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkImportSemaphoreResourceMESA_reply(dec, device, pImportSemaphoreResourceInfo);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkImportSemaphoreResourceMESA(struct vn_instance *vn_instance, VkDevice device, const VkImportSemaphoreResourceInfoMESA* pImportSemaphoreResourceInfo)
static inline void vn_async_vkImportSemaphoreResourceMESA(struct vn_ring *vn_ring, VkDevice device, const VkImportSemaphoreResourceInfoMESA* pImportSemaphoreResourceInfo)
{
struct vn_instance_submit_command submit;
vn_submit_vkImportSemaphoreResourceMESA(vn_instance, 0, device, pImportSemaphoreResourceInfo, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkImportSemaphoreResourceMESA(vn_ring, 0, device, pImportSemaphoreResourceInfo, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_SEMAPHORE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_SHADER_MODULE_H
#define VN_PROTOCOL_DRIVER_SHADER_MODULE_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
static inline size_t vn_sizeof_vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule)
@ -135,7 +135,7 @@ static inline void vn_decode_vkDestroyShaderModule_reply(struct vn_cs_decoder *d
/* skip pAllocator */
}
static inline void vn_submit_vkCreateShaderModule(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateShaderModule(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -147,16 +147,16 @@ static inline void vn_submit_vkCreateShaderModule(struct vn_instance *vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateShaderModule_reply(device, pCreateInfo, pAllocator, pShaderModule) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateShaderModule(enc, cmd_flags, device, pCreateInfo, pAllocator, pShaderModule);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyShaderModule(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyShaderModule(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -168,54 +168,54 @@ static inline void vn_submit_vkDestroyShaderModule(struct vn_instance *vn_instan
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyShaderModule_reply(device, shaderModule, pAllocator) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyShaderModule(enc, cmd_flags, device, shaderModule, pAllocator);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline VkResult vn_call_vkCreateShaderModule(struct vn_instance *vn_instance, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule)
static inline VkResult vn_call_vkCreateShaderModule(struct vn_ring *vn_ring, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateShaderModule(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pShaderModule, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateShaderModule(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, pCreateInfo, pAllocator, pShaderModule, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
const VkResult ret = vn_decode_vkCreateShaderModule_reply(dec, device, pCreateInfo, pAllocator, pShaderModule);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
return ret;
} else {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
static inline void vn_async_vkCreateShaderModule(struct vn_instance *vn_instance, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule)
static inline void vn_async_vkCreateShaderModule(struct vn_ring *vn_ring, VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateShaderModule(vn_instance, 0, device, pCreateInfo, pAllocator, pShaderModule, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateShaderModule(vn_ring, 0, device, pCreateInfo, pAllocator, pShaderModule, &submit);
}
static inline void vn_call_vkDestroyShaderModule(struct vn_instance *vn_instance, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
static inline void vn_call_vkDestroyShaderModule(struct vn_ring *vn_ring, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyShaderModule(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, shaderModule, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyShaderModule(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, device, shaderModule, pAllocator, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyShaderModule_reply(dec, device, shaderModule, pAllocator);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyShaderModule(struct vn_instance *vn_instance, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
static inline void vn_async_vkDestroyShaderModule(struct vn_ring *vn_ring, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyShaderModule(vn_instance, 0, device, shaderModule, pAllocator, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyShaderModule(vn_ring, 0, device, shaderModule, pAllocator, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_SHADER_MODULE_H */

View file

@ -8,7 +8,7 @@
#ifndef VN_PROTOCOL_DRIVER_TRANSPORT_H
#define VN_PROTOCOL_DRIVER_TRANSPORT_H
#include "vn_instance.h"
#include "vn_ring.h"
#include "vn_protocol_driver_structs.h"
/* struct VkCommandStreamDescriptionMESA */
@ -698,7 +698,7 @@ static inline void vn_decode_vkWaitRingSeqnoMESA_reply(struct vn_cs_decoder *dec
/* skip seqno */
}
static inline void vn_submit_vkSetReplyCommandStreamMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, const VkCommandStreamDescriptionMESA* pStream, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkSetReplyCommandStreamMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, const VkCommandStreamDescriptionMESA* pStream, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -710,16 +710,16 @@ static inline void vn_submit_vkSetReplyCommandStreamMESA(struct vn_instance *vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkSetReplyCommandStreamMESA_reply(pStream) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkSetReplyCommandStreamMESA(enc, cmd_flags, pStream);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkSeekReplyCommandStreamMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, size_t position, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkSeekReplyCommandStreamMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, size_t position, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -731,16 +731,16 @@ static inline void vn_submit_vkSeekReplyCommandStreamMESA(struct vn_instance *vn
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkSeekReplyCommandStreamMESA_reply(position) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkSeekReplyCommandStreamMESA(enc, cmd_flags, position);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkExecuteCommandStreamsMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint32_t streamCount, const VkCommandStreamDescriptionMESA* pStreams, const size_t* pReplyPositions, uint32_t dependencyCount, const VkCommandStreamDependencyMESA* pDependencies, VkCommandStreamExecutionFlagsMESA flags, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkExecuteCommandStreamsMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint32_t streamCount, const VkCommandStreamDescriptionMESA* pStreams, const size_t* pReplyPositions, uint32_t dependencyCount, const VkCommandStreamDependencyMESA* pDependencies, VkCommandStreamExecutionFlagsMESA flags, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -752,16 +752,16 @@ static inline void vn_submit_vkExecuteCommandStreamsMESA(struct vn_instance *vn_
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkExecuteCommandStreamsMESA_reply(streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkExecuteCommandStreamsMESA(enc, cmd_flags, streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkCreateRingMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t ring, const VkRingCreateInfoMESA* pCreateInfo, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkCreateRingMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t ring, const VkRingCreateInfoMESA* pCreateInfo, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -773,16 +773,16 @@ static inline void vn_submit_vkCreateRingMESA(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkCreateRingMESA_reply(ring, pCreateInfo) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkCreateRingMESA(enc, cmd_flags, ring, pCreateInfo);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkDestroyRingMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t ring, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkDestroyRingMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t ring, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -794,16 +794,16 @@ static inline void vn_submit_vkDestroyRingMESA(struct vn_instance *vn_instance,
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkDestroyRingMESA_reply(ring) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkDestroyRingMESA(enc, cmd_flags, ring);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkNotifyRingMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t ring, uint32_t seqno, VkRingNotifyFlagsMESA flags, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkNotifyRingMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t ring, uint32_t seqno, VkRingNotifyFlagsMESA flags, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -815,16 +815,16 @@ static inline void vn_submit_vkNotifyRingMESA(struct vn_instance *vn_instance, V
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkNotifyRingMESA_reply(ring, seqno, flags) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkNotifyRingMESA(enc, cmd_flags, ring, seqno, flags);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkWriteRingExtraMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t ring, size_t offset, uint32_t value, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkWriteRingExtraMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t ring, size_t offset, uint32_t value, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -836,16 +836,16 @@ static inline void vn_submit_vkWriteRingExtraMESA(struct vn_instance *vn_instanc
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkWriteRingExtraMESA_reply(ring, offset, value) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkWriteRingExtraMESA(enc, cmd_flags, ring, offset, value);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkSubmitVirtqueueSeqnoMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t ring, uint64_t seqno, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkSubmitVirtqueueSeqnoMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t ring, uint64_t seqno, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -857,16 +857,16 @@ static inline void vn_submit_vkSubmitVirtqueueSeqnoMESA(struct vn_instance *vn_i
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkSubmitVirtqueueSeqnoMESA_reply(ring, seqno) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkSubmitVirtqueueSeqnoMESA(enc, cmd_flags, ring, seqno);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkWaitVirtqueueSeqnoMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t seqno, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkWaitVirtqueueSeqnoMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t seqno, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -878,16 +878,16 @@ static inline void vn_submit_vkWaitVirtqueueSeqnoMESA(struct vn_instance *vn_ins
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkWaitVirtqueueSeqnoMESA_reply(seqno) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkWaitVirtqueueSeqnoMESA(enc, cmd_flags, seqno);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_submit_vkWaitRingSeqnoMESA(struct vn_instance *vn_instance, VkCommandFlagsEXT cmd_flags, uint64_t ring, uint64_t seqno, struct vn_instance_submit_command *submit)
static inline void vn_submit_vkWaitRingSeqnoMESA(struct vn_ring *vn_ring, VkCommandFlagsEXT cmd_flags, uint64_t ring, uint64_t seqno, struct vn_ring_submit_command *submit)
{
uint8_t local_cmd_data[VN_SUBMIT_LOCAL_CMD_SIZE];
void *cmd_data = local_cmd_data;
@ -899,203 +899,203 @@ static inline void vn_submit_vkWaitRingSeqnoMESA(struct vn_instance *vn_instance
}
const size_t reply_size = cmd_flags & VK_COMMAND_GENERATE_REPLY_BIT_EXT ? vn_sizeof_vkWaitRingSeqnoMESA_reply(ring, seqno) : 0;
struct vn_cs_encoder *enc = vn_instance_submit_command_init(vn_instance, submit, cmd_data, cmd_size, reply_size);
struct vn_cs_encoder *enc = vn_ring_submit_command_init(vn_ring, submit, cmd_data, cmd_size, reply_size);
if (cmd_size) {
vn_encode_vkWaitRingSeqnoMESA(enc, cmd_flags, ring, seqno);
vn_instance_submit_command(vn_instance, submit);
vn_ring_submit_command(vn_ring, submit);
if (cmd_data != local_cmd_data)
free(cmd_data);
}
}
static inline void vn_call_vkSetReplyCommandStreamMESA(struct vn_instance *vn_instance, const VkCommandStreamDescriptionMESA* pStream)
static inline void vn_call_vkSetReplyCommandStreamMESA(struct vn_ring *vn_ring, const VkCommandStreamDescriptionMESA* pStream)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkSetReplyCommandStreamMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pStream, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSetReplyCommandStreamMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, pStream, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkSetReplyCommandStreamMESA_reply(dec, pStream);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkSetReplyCommandStreamMESA(struct vn_instance *vn_instance, const VkCommandStreamDescriptionMESA* pStream)
static inline void vn_async_vkSetReplyCommandStreamMESA(struct vn_ring *vn_ring, const VkCommandStreamDescriptionMESA* pStream)
{
struct vn_instance_submit_command submit;
vn_submit_vkSetReplyCommandStreamMESA(vn_instance, 0, pStream, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSetReplyCommandStreamMESA(vn_ring, 0, pStream, &submit);
}
static inline void vn_call_vkSeekReplyCommandStreamMESA(struct vn_instance *vn_instance, size_t position)
static inline void vn_call_vkSeekReplyCommandStreamMESA(struct vn_ring *vn_ring, size_t position)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkSeekReplyCommandStreamMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, position, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSeekReplyCommandStreamMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, position, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkSeekReplyCommandStreamMESA_reply(dec, position);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkSeekReplyCommandStreamMESA(struct vn_instance *vn_instance, size_t position)
static inline void vn_async_vkSeekReplyCommandStreamMESA(struct vn_ring *vn_ring, size_t position)
{
struct vn_instance_submit_command submit;
vn_submit_vkSeekReplyCommandStreamMESA(vn_instance, 0, position, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSeekReplyCommandStreamMESA(vn_ring, 0, position, &submit);
}
static inline void vn_call_vkExecuteCommandStreamsMESA(struct vn_instance *vn_instance, uint32_t streamCount, const VkCommandStreamDescriptionMESA* pStreams, const size_t* pReplyPositions, uint32_t dependencyCount, const VkCommandStreamDependencyMESA* pDependencies, VkCommandStreamExecutionFlagsMESA flags)
static inline void vn_call_vkExecuteCommandStreamsMESA(struct vn_ring *vn_ring, uint32_t streamCount, const VkCommandStreamDescriptionMESA* pStreams, const size_t* pReplyPositions, uint32_t dependencyCount, const VkCommandStreamDependencyMESA* pDependencies, VkCommandStreamExecutionFlagsMESA flags)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkExecuteCommandStreamsMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkExecuteCommandStreamsMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkExecuteCommandStreamsMESA_reply(dec, streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkExecuteCommandStreamsMESA(struct vn_instance *vn_instance, uint32_t streamCount, const VkCommandStreamDescriptionMESA* pStreams, const size_t* pReplyPositions, uint32_t dependencyCount, const VkCommandStreamDependencyMESA* pDependencies, VkCommandStreamExecutionFlagsMESA flags)
static inline void vn_async_vkExecuteCommandStreamsMESA(struct vn_ring *vn_ring, uint32_t streamCount, const VkCommandStreamDescriptionMESA* pStreams, const size_t* pReplyPositions, uint32_t dependencyCount, const VkCommandStreamDependencyMESA* pDependencies, VkCommandStreamExecutionFlagsMESA flags)
{
struct vn_instance_submit_command submit;
vn_submit_vkExecuteCommandStreamsMESA(vn_instance, 0, streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkExecuteCommandStreamsMESA(vn_ring, 0, streamCount, pStreams, pReplyPositions, dependencyCount, pDependencies, flags, &submit);
}
static inline void vn_call_vkCreateRingMESA(struct vn_instance *vn_instance, uint64_t ring, const VkRingCreateInfoMESA* pCreateInfo)
static inline void vn_call_vkCreateRingMESA(struct vn_ring *vn_ring, uint64_t ring, const VkRingCreateInfoMESA* pCreateInfo)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkCreateRingMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, pCreateInfo, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateRingMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, pCreateInfo, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkCreateRingMESA_reply(dec, ring, pCreateInfo);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkCreateRingMESA(struct vn_instance *vn_instance, uint64_t ring, const VkRingCreateInfoMESA* pCreateInfo)
static inline void vn_async_vkCreateRingMESA(struct vn_ring *vn_ring, uint64_t ring, const VkRingCreateInfoMESA* pCreateInfo)
{
struct vn_instance_submit_command submit;
vn_submit_vkCreateRingMESA(vn_instance, 0, ring, pCreateInfo, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkCreateRingMESA(vn_ring, 0, ring, pCreateInfo, &submit);
}
static inline void vn_call_vkDestroyRingMESA(struct vn_instance *vn_instance, uint64_t ring)
static inline void vn_call_vkDestroyRingMESA(struct vn_ring *vn_ring, uint64_t ring)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkDestroyRingMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyRingMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkDestroyRingMESA_reply(dec, ring);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkDestroyRingMESA(struct vn_instance *vn_instance, uint64_t ring)
static inline void vn_async_vkDestroyRingMESA(struct vn_ring *vn_ring, uint64_t ring)
{
struct vn_instance_submit_command submit;
vn_submit_vkDestroyRingMESA(vn_instance, 0, ring, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkDestroyRingMESA(vn_ring, 0, ring, &submit);
}
static inline void vn_call_vkNotifyRingMESA(struct vn_instance *vn_instance, uint64_t ring, uint32_t seqno, VkRingNotifyFlagsMESA flags)
static inline void vn_call_vkNotifyRingMESA(struct vn_ring *vn_ring, uint64_t ring, uint32_t seqno, VkRingNotifyFlagsMESA flags)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkNotifyRingMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, seqno, flags, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkNotifyRingMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, seqno, flags, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkNotifyRingMESA_reply(dec, ring, seqno, flags);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkNotifyRingMESA(struct vn_instance *vn_instance, uint64_t ring, uint32_t seqno, VkRingNotifyFlagsMESA flags)
static inline void vn_async_vkNotifyRingMESA(struct vn_ring *vn_ring, uint64_t ring, uint32_t seqno, VkRingNotifyFlagsMESA flags)
{
struct vn_instance_submit_command submit;
vn_submit_vkNotifyRingMESA(vn_instance, 0, ring, seqno, flags, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkNotifyRingMESA(vn_ring, 0, ring, seqno, flags, &submit);
}
static inline void vn_call_vkWriteRingExtraMESA(struct vn_instance *vn_instance, uint64_t ring, size_t offset, uint32_t value)
static inline void vn_call_vkWriteRingExtraMESA(struct vn_ring *vn_ring, uint64_t ring, size_t offset, uint32_t value)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkWriteRingExtraMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, offset, value, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWriteRingExtraMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, offset, value, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkWriteRingExtraMESA_reply(dec, ring, offset, value);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkWriteRingExtraMESA(struct vn_instance *vn_instance, uint64_t ring, size_t offset, uint32_t value)
static inline void vn_async_vkWriteRingExtraMESA(struct vn_ring *vn_ring, uint64_t ring, size_t offset, uint32_t value)
{
struct vn_instance_submit_command submit;
vn_submit_vkWriteRingExtraMESA(vn_instance, 0, ring, offset, value, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWriteRingExtraMESA(vn_ring, 0, ring, offset, value, &submit);
}
static inline void vn_call_vkSubmitVirtqueueSeqnoMESA(struct vn_instance *vn_instance, uint64_t ring, uint64_t seqno)
static inline void vn_call_vkSubmitVirtqueueSeqnoMESA(struct vn_ring *vn_ring, uint64_t ring, uint64_t seqno)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkSubmitVirtqueueSeqnoMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, seqno, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSubmitVirtqueueSeqnoMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, seqno, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkSubmitVirtqueueSeqnoMESA_reply(dec, ring, seqno);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkSubmitVirtqueueSeqnoMESA(struct vn_instance *vn_instance, uint64_t ring, uint64_t seqno)
static inline void vn_async_vkSubmitVirtqueueSeqnoMESA(struct vn_ring *vn_ring, uint64_t ring, uint64_t seqno)
{
struct vn_instance_submit_command submit;
vn_submit_vkSubmitVirtqueueSeqnoMESA(vn_instance, 0, ring, seqno, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkSubmitVirtqueueSeqnoMESA(vn_ring, 0, ring, seqno, &submit);
}
static inline void vn_call_vkWaitVirtqueueSeqnoMESA(struct vn_instance *vn_instance, uint64_t seqno)
static inline void vn_call_vkWaitVirtqueueSeqnoMESA(struct vn_ring *vn_ring, uint64_t seqno)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkWaitVirtqueueSeqnoMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, seqno, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitVirtqueueSeqnoMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, seqno, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkWaitVirtqueueSeqnoMESA_reply(dec, seqno);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkWaitVirtqueueSeqnoMESA(struct vn_instance *vn_instance, uint64_t seqno)
static inline void vn_async_vkWaitVirtqueueSeqnoMESA(struct vn_ring *vn_ring, uint64_t seqno)
{
struct vn_instance_submit_command submit;
vn_submit_vkWaitVirtqueueSeqnoMESA(vn_instance, 0, seqno, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitVirtqueueSeqnoMESA(vn_ring, 0, seqno, &submit);
}
static inline void vn_call_vkWaitRingSeqnoMESA(struct vn_instance *vn_instance, uint64_t ring, uint64_t seqno)
static inline void vn_call_vkWaitRingSeqnoMESA(struct vn_ring *vn_ring, uint64_t ring, uint64_t seqno)
{
VN_TRACE_FUNC();
struct vn_instance_submit_command submit;
vn_submit_vkWaitRingSeqnoMESA(vn_instance, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, seqno, &submit);
struct vn_cs_decoder *dec = vn_instance_get_command_reply(vn_instance, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitRingSeqnoMESA(vn_ring, VK_COMMAND_GENERATE_REPLY_BIT_EXT, ring, seqno, &submit);
struct vn_cs_decoder *dec = vn_ring_get_command_reply(vn_ring, &submit);
if (dec) {
vn_decode_vkWaitRingSeqnoMESA_reply(dec, ring, seqno);
vn_instance_free_command_reply(vn_instance, &submit);
vn_ring_free_command_reply(vn_ring, &submit);
}
}
static inline void vn_async_vkWaitRingSeqnoMESA(struct vn_instance *vn_instance, uint64_t ring, uint64_t seqno)
static inline void vn_async_vkWaitRingSeqnoMESA(struct vn_ring *vn_ring, uint64_t ring, uint64_t seqno)
{
struct vn_instance_submit_command submit;
vn_submit_vkWaitRingSeqnoMESA(vn_instance, 0, ring, seqno, &submit);
struct vn_ring_submit_command submit;
vn_submit_vkWaitRingSeqnoMESA(vn_ring, 0, ring, seqno, &submit);
}
#endif /* VN_PROTOCOL_DRIVER_TRANSPORT_H */

View file

@ -248,13 +248,13 @@ vn_buffer_init(struct vn_device *dev,
/* Check size instead of entry->valid to be lock free */
if (buf->requirements.memory.memoryRequirements.size) {
vn_async_vkCreateBuffer(dev->instance, dev_handle, create_info, NULL,
&buf_handle);
vn_async_vkCreateBuffer(dev->primary_ring, dev_handle, create_info,
NULL, &buf_handle);
return VK_SUCCESS;
}
/* If cache miss or not cacheable, make synchronous call */
result = vn_call_vkCreateBuffer(dev->instance, dev_handle, create_info,
result = vn_call_vkCreateBuffer(dev->primary_ring, dev_handle, create_info,
NULL, &buf_handle);
if (result != VK_SUCCESS)
return result;
@ -266,7 +266,7 @@ vn_buffer_init(struct vn_device *dev,
buf->requirements.dedicated.pNext = NULL;
vn_call_vkGetBufferMemoryRequirements2(
dev->instance, dev_handle,
dev->primary_ring, dev_handle,
&(VkBufferMemoryRequirementsInfo2){
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
.buffer = buf_handle,
@ -410,7 +410,7 @@ vn_DestroyBuffer(VkDevice device,
if (!buf)
return;
vn_async_vkDestroyBuffer(dev->instance, device, buffer, NULL);
vn_async_vkDestroyBuffer(dev->primary_ring, device, buffer, NULL);
vn_object_base_fini(&buf->base);
vk_free(alloc, buf);
@ -422,7 +422,7 @@ vn_GetBufferDeviceAddress(VkDevice device,
{
struct vn_device *dev = vn_device_from_handle(device);
return vn_call_vkGetBufferDeviceAddress(dev->instance, device, pInfo);
return vn_call_vkGetBufferDeviceAddress(dev->primary_ring, device, pInfo);
}
uint64_t
@ -431,7 +431,7 @@ vn_GetBufferOpaqueCaptureAddress(VkDevice device,
{
struct vn_device *dev = vn_device_from_handle(device);
return vn_call_vkGetBufferOpaqueCaptureAddress(dev->instance, device,
return vn_call_vkGetBufferOpaqueCaptureAddress(dev->primary_ring, device,
pInfo);
}
@ -478,7 +478,7 @@ vn_BindBufferMemory2(VkDevice device,
if (local_infos)
pBindInfos = local_infos;
vn_async_vkBindBufferMemory2(dev->instance, device, bindInfoCount,
vn_async_vkBindBufferMemory2(dev->primary_ring, device, bindInfoCount,
pBindInfos);
vk_free(alloc, local_infos);
@ -507,7 +507,7 @@ vn_CreateBufferView(VkDevice device,
vn_object_base_init(&view->base, VK_OBJECT_TYPE_BUFFER_VIEW, &dev->base);
VkBufferView view_handle = vn_buffer_view_to_handle(view);
vn_async_vkCreateBufferView(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateBufferView(dev->primary_ring, device, pCreateInfo, NULL,
&view_handle);
*pView = view_handle;
@ -528,7 +528,7 @@ vn_DestroyBufferView(VkDevice device,
if (!view)
return;
vn_async_vkDestroyBufferView(dev->instance, device, bufferView, NULL);
vn_async_vkDestroyBufferView(dev->primary_ring, device, bufferView, NULL);
vn_object_base_fini(&view->base);
vk_free(alloc, view);
@ -556,8 +556,8 @@ vn_GetDeviceBufferMemoryRequirements(
}
/* Make the host call if not found in cache or not cacheable */
vn_call_vkGetDeviceBufferMemoryRequirements(dev->instance, device, pInfo,
pMemoryRequirements);
vn_call_vkGetDeviceBufferMemoryRequirements(dev->primary_ring, device,
pInfo, pMemoryRequirements);
/* If cacheable, store mem requirements from the host call */
if (entry)

View file

@ -684,7 +684,7 @@ vn_CreateCommandPool(VkDevice device,
list_inithead(&pool->free_query_feedback_cmds);
VkCommandPool pool_handle = vn_command_pool_to_handle(pool);
vn_async_vkCreateCommandPool(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateCommandPool(dev->primary_ring, device, pCreateInfo, NULL,
&pool_handle);
*pCommandPool = pool_handle;
@ -722,7 +722,8 @@ vn_DestroyCommandPool(VkDevice device,
* object ids while they still refer to the command buffers in the
* renderer.
*/
vn_async_vkDestroyCommandPool(dev->instance, device, commandPool, NULL);
vn_async_vkDestroyCommandPool(dev->primary_ring, device, commandPool,
NULL);
list_for_each_entry_safe(struct vn_command_buffer, cmd,
&pool->command_buffers, head) {
@ -789,7 +790,7 @@ vn_ResetCommandPool(VkDevice device,
&pool->command_buffers, head)
vn_cmd_reset(cmd);
vn_async_vkResetCommandPool(dev->instance, device, commandPool, flags);
vn_async_vkResetCommandPool(dev->primary_ring, device, commandPool, flags);
return VK_SUCCESS;
}
@ -802,7 +803,7 @@ vn_TrimCommandPool(VkDevice device,
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkTrimCommandPool(dev->instance, device, commandPool, flags);
vn_async_vkTrimCommandPool(dev->primary_ring, device, commandPool, flags);
}
/* command buffer commands */
@ -851,7 +852,7 @@ vn_AllocateCommandBuffers(VkDevice device,
pCommandBuffers[i] = cmd_handle;
}
vn_async_vkAllocateCommandBuffers(dev->instance, device, pAllocateInfo,
vn_async_vkAllocateCommandBuffers(dev->primary_ring, device, pAllocateInfo,
pCommandBuffers);
return VK_SUCCESS;
@ -868,7 +869,7 @@ vn_FreeCommandBuffers(VkDevice device,
struct vn_command_pool *pool = vn_command_pool_from_handle(commandPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
vn_async_vkFreeCommandBuffers(dev->instance, device, commandPool,
vn_async_vkFreeCommandBuffers(dev->primary_ring, device, commandPool,
commandBufferCount, pCommandBuffers);
for (uint32_t i = 0; i < commandBufferCount; i++) {
@ -903,11 +904,11 @@ vn_ResetCommandBuffer(VkCommandBuffer commandBuffer,
VN_TRACE_FUNC();
struct vn_command_buffer *cmd =
vn_command_buffer_from_handle(commandBuffer);
struct vn_instance *instance = cmd->pool->device->instance;
struct vn_ring *ring = cmd->pool->device->primary_ring;
vn_cmd_reset(cmd);
vn_async_vkResetCommandBuffer(instance, commandBuffer, flags);
vn_async_vkResetCommandBuffer(ring, commandBuffer, flags);
return VK_SUCCESS;
}
@ -1069,7 +1070,7 @@ vn_BeginCommandBuffer(VkCommandBuffer commandBuffer,
static void
vn_cmd_submit(struct vn_command_buffer *cmd)
{
struct vn_instance *instance = cmd->pool->device->instance;
struct vn_ring *ring = cmd->pool->device->primary_ring;
if (cmd->state != VN_COMMAND_BUFFER_STATE_RECORDING)
return;
@ -1081,7 +1082,7 @@ vn_cmd_submit(struct vn_command_buffer *cmd)
return;
}
if (vn_instance_ring_submit(instance, &cmd->cs) != VK_SUCCESS) {
if (vn_ring_submit_command_simple(ring, &cmd->cs) != VK_SUCCESS) {
cmd->state = VN_COMMAND_BUFFER_STATE_INVALID;
return;
}

View file

@ -27,7 +27,7 @@ vn_descriptor_set_layout_destroy(struct vn_device *dev,
vn_descriptor_set_layout_to_handle(layout);
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
vn_async_vkDestroyDescriptorSetLayout(dev->instance, dev_handle,
vn_async_vkDestroyDescriptorSetLayout(dev->primary_ring, dev_handle,
layout_handle, NULL);
vn_object_base_fini(&layout->base);
@ -96,8 +96,8 @@ vn_GetDescriptorSetLayoutSupport(
struct vn_device *dev = vn_device_from_handle(device);
/* TODO per-device cache */
vn_call_vkGetDescriptorSetLayoutSupport(dev->instance, device, pCreateInfo,
pSupport);
vn_call_vkGetDescriptorSetLayoutSupport(dev->primary_ring, device,
pCreateInfo, pSupport);
}
static void
@ -187,7 +187,7 @@ vn_descriptor_set_layout_init(
}
}
vn_async_vkCreateDescriptorSetLayout(dev->instance, dev_handle,
vn_async_vkCreateDescriptorSetLayout(dev->primary_ring, dev_handle,
create_info, NULL, &layout_handle);
}
@ -387,8 +387,8 @@ vn_CreateDescriptorPool(VkDevice device,
list_inithead(&pool->descriptor_sets);
VkDescriptorPool pool_handle = vn_descriptor_pool_to_handle(pool);
vn_async_vkCreateDescriptorPool(dev->instance, device, pCreateInfo, NULL,
&pool_handle);
vn_async_vkCreateDescriptorPool(dev->primary_ring, device, pCreateInfo,
NULL, &pool_handle);
*pDescriptorPool = pool_handle;
@ -415,7 +415,7 @@ vn_DestroyDescriptorPool(VkDevice device,
* pool->descriptor_sets. Otherwise, another thread might reuse their
* object ids while they still refer to the sets in the renderer.
*/
vn_async_vkDestroyDescriptorPool(dev->instance, device, descriptorPool,
vn_async_vkDestroyDescriptorPool(dev->primary_ring, device, descriptorPool,
NULL);
list_for_each_entry_safe(struct vn_descriptor_set, set,
@ -602,7 +602,7 @@ vn_ResetDescriptorPool(VkDevice device,
vn_descriptor_pool_from_handle(descriptorPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
vn_async_vkResetDescriptorPool(dev->instance, device, descriptorPool,
vn_async_vkResetDescriptorPool(dev->primary_ring, device, descriptorPool,
flags);
list_for_each_entry_safe(struct vn_descriptor_set, set,
@ -704,11 +704,11 @@ vn_AllocateDescriptorSets(VkDevice device,
}
if (pool->async_set_allocation) {
vn_async_vkAllocateDescriptorSets(dev->instance, device, pAllocateInfo,
pDescriptorSets);
vn_async_vkAllocateDescriptorSets(dev->primary_ring, device,
pAllocateInfo, pDescriptorSets);
} else {
result = vn_call_vkAllocateDescriptorSets(
dev->instance, device, pAllocateInfo, pDescriptorSets);
dev->primary_ring, device, pAllocateInfo, pDescriptorSets);
if (result != VK_SUCCESS)
goto fail;
}
@ -746,7 +746,7 @@ vn_FreeDescriptorSets(VkDevice device,
vn_descriptor_pool_from_handle(descriptorPool);
const VkAllocationCallbacks *alloc = &pool->allocator;
vn_async_vkFreeDescriptorSets(dev->instance, device, descriptorPool,
vn_async_vkFreeDescriptorSets(dev->primary_ring, device, descriptorPool,
descriptorSetCount, pDescriptorSets);
for (uint32_t i = 0; i < descriptorSetCount; i++) {
@ -1008,9 +1008,9 @@ vn_UpdateDescriptorSets(VkDevice device,
return;
}
vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
update->writes, descriptorCopyCount,
pDescriptorCopies);
vn_async_vkUpdateDescriptorSets(dev->primary_ring, device,
update->write_count, update->writes,
descriptorCopyCount, pDescriptorCopies);
vk_free(alloc, update);
}
@ -1310,8 +1310,9 @@ vn_UpdateDescriptorSetWithTemplate(
struct vn_update_descriptor_sets *update =
vn_update_descriptor_set_with_template_locked(templ, set, pData);
vn_async_vkUpdateDescriptorSets(dev->instance, device, update->write_count,
update->writes, 0, NULL);
vn_async_vkUpdateDescriptorSets(dev->primary_ring, device,
update->write_count, update->writes, 0,
NULL);
mtx_unlock(&templ->mutex);
}

View file

@ -73,7 +73,7 @@ vn_queue_init(struct vn_device *dev,
};
VkQueue queue_handle = vn_queue_to_handle(queue);
vn_async_vkGetDeviceQueue2(dev->instance, vn_device_to_handle(dev),
vn_async_vkGetDeviceQueue2(dev->primary_ring, vn_device_to_handle(dev),
&device_queue_info, &queue_handle);
return VK_SUCCESS;
@ -459,8 +459,8 @@ vn_device_init(struct vn_device *dev,
if (!create_info)
return VK_ERROR_OUT_OF_HOST_MEMORY;
result = vn_call_vkCreateDevice(instance, physical_dev_handle, create_info,
NULL, &dev_handle);
result = vn_call_vkCreateDevice(dev->primary_ring, physical_dev_handle,
create_info, NULL, &dev_handle);
/* free the fixed extensions here since no longer needed below */
if (create_info == &local_create_info)
@ -520,7 +520,7 @@ out_memory_report_fini:
vn_device_memory_report_fini(dev);
out_destroy_device:
vn_call_vkDestroyDevice(instance, dev_handle, NULL);
vn_call_vkDestroyDevice(dev->primary_ring, dev_handle, NULL);
return result;
}
@ -605,13 +605,13 @@ vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
* another thread might reuse their object ids while they still refer to
* the queues in the renderer.
*/
vn_async_vkDestroyDevice(dev->instance, device, NULL);
vn_async_vkDestroyDevice(dev->primary_ring, device, NULL);
/* We must emit vn_call_vkDestroyDevice before releasing bound ring_idx.
* Otherwise, another thread might reuse their ring_idx while they
* are still bound to the queues in the renderer.
*/
if (dev->instance->renderer->info.supports_multiple_timelines) {
if (dev->renderer->info.supports_multiple_timelines) {
for (uint32_t i = 0; i < dev->queue_count; i++) {
vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
}
@ -642,8 +642,8 @@ vn_GetDeviceGroupPeerMemoryFeatures(
/* TODO get and cache the values in vkCreateDevice */
vn_call_vkGetDeviceGroupPeerMemoryFeatures(
dev->instance, device, heapIndex, localDeviceIndex, remoteDeviceIndex,
pPeerMemoryFeatures);
dev->primary_ring, device, heapIndex, localDeviceIndex,
remoteDeviceIndex, pPeerMemoryFeatures);
}
VkResult
@ -671,7 +671,7 @@ vn_GetCalibratedTimestampsEXT(
uint64_t device_max_deviation = 0;
ret = vn_call_vkGetCalibratedTimestampsEXT(
dev->instance, device, 1, &pTimestampInfos[domain],
dev->primary_ring, device, 1, &pTimestampInfos[domain],
&pTimestamps[domain], &device_max_deviation);
if (ret != VK_SUCCESS)

View file

@ -17,6 +17,7 @@
#include "vn_buffer.h"
#include "vn_device.h"
#include "vn_image.h"
#include "vn_instance.h"
#include "vn_physical_device.h"
/* device memory commands */
@ -29,13 +30,13 @@ vn_device_memory_alloc_simple(struct vn_device *dev,
VkDevice dev_handle = vn_device_to_handle(dev);
VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
if (VN_PERF(NO_ASYNC_MEM_ALLOC)) {
return vn_call_vkAllocateMemory(dev->instance, dev_handle, alloc_info,
NULL, &mem_handle);
return vn_call_vkAllocateMemory(dev->primary_ring, dev_handle,
alloc_info, NULL, &mem_handle);
}
struct vn_instance_submit_command instance_submit;
vn_submit_vkAllocateMemory(dev->instance, 0, dev_handle, alloc_info, NULL,
&mem_handle, &instance_submit);
struct vn_ring_submit_command instance_submit;
vn_submit_vkAllocateMemory(dev->primary_ring, 0, dev_handle, alloc_info,
NULL, &mem_handle, &instance_submit);
if (!instance_submit.ring_seqno_valid)
return VK_ERROR_OUT_OF_HOST_MEMORY;
@ -50,7 +51,7 @@ vn_device_memory_free_simple(struct vn_device *dev,
{
VkDevice dev_handle = vn_device_to_handle(dev);
VkDeviceMemory mem_handle = vn_device_memory_to_handle(mem);
vn_async_vkFreeMemory(dev->instance, dev_handle, mem_handle, NULL);
vn_async_vkFreeMemory(dev->primary_ring, dev_handle, mem_handle, NULL);
}
static VkResult
@ -647,8 +648,8 @@ vn_GetDeviceMemoryOpaqueCaptureAddress(
vn_device_memory_from_handle(pInfo->memory);
assert(!mem->base_memory);
return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->instance, device,
pInfo);
return vn_call_vkGetDeviceMemoryOpaqueCaptureAddress(dev->primary_ring,
device, pInfo);
}
VkResult
@ -769,7 +770,7 @@ vn_GetDeviceMemoryCommitment(VkDevice device,
vn_device_memory_from_handle(memory);
assert(!mem->base_memory);
vn_call_vkGetDeviceMemoryCommitment(dev->instance, device, memory,
vn_call_vkGetDeviceMemoryCommitment(dev->primary_ring, device, memory,
pCommittedMemoryInBytes);
}
@ -819,8 +820,8 @@ vn_get_memory_dma_buf_properties(struct vn_device *dev,
.sType = VK_STRUCTURE_TYPE_MEMORY_RESOURCE_PROPERTIES_MESA,
.pNext = &alloc_size_props,
};
result = vn_call_vkGetMemoryResourcePropertiesMESA(dev->instance, device,
bo->res_id, &props);
result = vn_call_vkGetMemoryResourcePropertiesMESA(
dev->primary_ring, device, bo->res_id, &props);
vn_renderer_bo_unref(dev->renderer, bo);
if (result != VK_SUCCESS)
return result;

View file

@ -79,7 +79,7 @@ vn_image_init_memory_requirements(struct vn_image *img,
VkImage img_handle = vn_image_to_handle(img);
if (plane_count == 1) {
vn_call_vkGetImageMemoryRequirements2(
dev->instance, dev_handle,
dev->primary_ring, dev_handle,
&(VkImageMemoryRequirementsInfo2){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
.image = img_handle,
@ -94,7 +94,7 @@ vn_image_init_memory_requirements(struct vn_image *img,
} else {
for (uint32_t i = 0; i < plane_count; i++) {
vn_call_vkGetImageMemoryRequirements2(
dev->instance, dev_handle,
dev->primary_ring, dev_handle,
&(VkImageMemoryRequirementsInfo2){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
.pNext =
@ -210,8 +210,8 @@ vn_image_init(struct vn_device *dev,
img->sharing_mode = create_info->sharingMode;
/* TODO async */
result =
vn_call_vkCreateImage(dev->instance, device, create_info, NULL, &image);
result = vn_call_vkCreateImage(dev->primary_ring, device, create_info,
NULL, &image);
if (result != VK_SUCCESS)
return result;
@ -447,7 +447,7 @@ vn_DestroyImage(VkDevice device,
/* must not ask renderer to destroy uninitialized deferred image */
if (!img->deferred_info || img->deferred_info->initialized)
vn_async_vkDestroyImage(dev->instance, device, image, NULL);
vn_async_vkDestroyImage(dev->primary_ring, device, image, NULL);
vn_image_deferred_info_fini(img, alloc);
@ -519,7 +519,7 @@ vn_GetImageSparseMemoryRequirements2(
}
/* TODO local or per-device cache */
vn_call_vkGetImageSparseMemoryRequirements2(dev->instance, device, pInfo,
vn_call_vkGetImageSparseMemoryRequirements2(dev->primary_ring, device, pInfo,
pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
}
@ -596,7 +596,7 @@ vn_BindImageMemory2(VkDevice device,
if (local_infos)
pBindInfos = local_infos;
vn_async_vkBindImageMemory2(dev->instance, device, bindInfoCount,
vn_async_vkBindImageMemory2(dev->primary_ring, device, bindInfoCount,
pBindInfos);
vk_free(alloc, local_infos);
@ -614,7 +614,7 @@ vn_GetImageDrmFormatModifierPropertiesEXT(
/* TODO local cache */
return vn_call_vkGetImageDrmFormatModifierPropertiesEXT(
dev->instance, device, image, pProperties);
dev->primary_ring, device, image, pProperties);
}
void
@ -658,7 +658,7 @@ vn_GetImageSubresourceLayout(VkDevice device,
}
/* TODO local cache */
vn_call_vkGetImageSubresourceLayout(dev->instance, device, image,
vn_call_vkGetImageSubresourceLayout(dev->primary_ring, device, image,
pSubresource, pLayout);
}
@ -696,7 +696,7 @@ vn_CreateImageView(VkDevice device,
view->image = img;
VkImageView view_handle = vn_image_view_to_handle(view);
vn_async_vkCreateImageView(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateImageView(dev->primary_ring, device, pCreateInfo, NULL,
&view_handle);
*pView = view_handle;
@ -717,7 +717,7 @@ vn_DestroyImageView(VkDevice device,
if (!view)
return;
vn_async_vkDestroyImageView(dev->instance, device, imageView, NULL);
vn_async_vkDestroyImageView(dev->primary_ring, device, imageView, NULL);
vn_object_base_fini(&view->base);
vk_free(alloc, view);
@ -744,7 +744,7 @@ vn_CreateSampler(VkDevice device,
vn_object_base_init(&sampler->base, VK_OBJECT_TYPE_SAMPLER, &dev->base);
VkSampler sampler_handle = vn_sampler_to_handle(sampler);
vn_async_vkCreateSampler(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateSampler(dev->primary_ring, device, pCreateInfo, NULL,
&sampler_handle);
*pSampler = sampler_handle;
@ -765,7 +765,7 @@ vn_DestroySampler(VkDevice device,
if (!sampler)
return;
vn_async_vkDestroySampler(dev->instance, device, _sampler, NULL);
vn_async_vkDestroySampler(dev->primary_ring, device, _sampler, NULL);
vn_object_base_fini(&sampler->base);
vk_free(alloc, sampler);
@ -813,8 +813,8 @@ vn_CreateSamplerYcbcrConversion(
VkSamplerYcbcrConversion conv_handle =
vn_sampler_ycbcr_conversion_to_handle(conv);
vn_async_vkCreateSamplerYcbcrConversion(dev->instance, device, pCreateInfo,
NULL, &conv_handle);
vn_async_vkCreateSamplerYcbcrConversion(dev->primary_ring, device,
pCreateInfo, NULL, &conv_handle);
*pYcbcrConversion = conv_handle;
@ -835,7 +835,7 @@ vn_DestroySamplerYcbcrConversion(VkDevice device,
if (!conv)
return;
vn_async_vkDestroySamplerYcbcrConversion(dev->instance, device,
vn_async_vkDestroySamplerYcbcrConversion(dev->primary_ring, device,
ycbcrConversion, NULL);
vn_object_base_fini(&conv->base);
@ -851,8 +851,8 @@ vn_GetDeviceImageMemoryRequirements(
struct vn_device *dev = vn_device_from_handle(device);
/* TODO per-device cache */
vn_call_vkGetDeviceImageMemoryRequirements(dev->instance, device, pInfo,
pMemoryRequirements);
vn_call_vkGetDeviceImageMemoryRequirements(dev->primary_ring, device,
pInfo, pMemoryRequirements);
}
void
@ -872,6 +872,6 @@ vn_GetDeviceImageSparseMemoryRequirements(
/* TODO per-device cache */
vn_call_vkGetDeviceImageSparseMemoryRequirements(
dev->instance, device, pInfo, pSparseMemoryRequirementCount,
dev->primary_ring, device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
}

View file

@ -18,6 +18,7 @@
#include "vn_icd.h"
#include "vn_physical_device.h"
#include "vn_renderer.h"
#include "vn_ring.h"
#define VN_INSTANCE_RING_SIZE (128 * 1024)
@ -72,8 +73,8 @@ static VkResult
vn_instance_init_renderer_versions(struct vn_instance *instance)
{
uint32_t instance_version = 0;
VkResult result =
vn_call_vkEnumerateInstanceVersion(instance, &instance_version);
VkResult result = vn_call_vkEnumerateInstanceVersion(instance->ring.ring,
&instance_version);
if (result != VK_SUCCESS) {
if (VN_DEBUG(INIT))
vn_log(instance, "failed to enumerate renderer instance version");
@ -233,7 +234,7 @@ void
vn_instance_wait_roundtrip(struct vn_instance *instance,
uint64_t roundtrip_seqno)
{
vn_async_vkWaitVirtqueueSeqnoMESA(instance, roundtrip_seqno);
vn_async_vkWaitVirtqueueSeqnoMESA(instance->ring.ring, roundtrip_seqno);
}
/* instance commands */
@ -356,8 +357,8 @@ vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
}
VkInstance instance_handle = vn_instance_to_handle(instance);
result =
vn_call_vkCreateInstance(instance, pCreateInfo, NULL, &instance_handle);
result = vn_call_vkCreateInstance(instance->ring.ring, pCreateInfo, NULL,
&instance_handle);
if (result != VK_SUCCESS)
goto out_ring_fini;
@ -424,7 +425,7 @@ vn_DestroyInstance(VkInstance _instance,
mtx_destroy(&instance->physical_device.mutex);
mtx_destroy(&instance->ring_idx_mutex);
vn_call_vkDestroyInstance(instance, _instance, NULL);
vn_call_vkDestroyInstance(instance->ring.ring, _instance, NULL);
vn_instance_fini_ring(instance);

View file

@ -18,7 +18,6 @@
#include "vn_cs.h"
#include "vn_renderer.h"
#include "vn_renderer_util.h"
#include "vn_ring.h"
/* require and request at least Vulkan 1.1 at both instance and device levels
*/
@ -100,68 +99,6 @@ vn_instance_roundtrip(struct vn_instance *instance)
vn_instance_wait_roundtrip(instance, roundtrip_seqno);
}
static inline VkResult
vn_instance_ring_submit(struct vn_instance *instance,
const struct vn_cs_encoder *cs)
{
return vn_ring_submit_command_simple(instance->ring.ring, cs);
}
struct vn_instance_submit_command {
/* empty command implies errors */
struct vn_cs_encoder command;
struct vn_cs_encoder_buffer buffer;
/* non-zero implies waiting */
size_t reply_size;
/* when reply_size is non-zero, NULL can be returned on errors */
struct vn_renderer_shmem *reply_shmem;
struct vn_cs_decoder reply;
/* valid when instance ring submission succeeds */
bool ring_seqno_valid;
uint32_t ring_seqno;
};
static inline struct vn_cs_encoder *
vn_instance_submit_command_init(struct vn_instance *instance,
struct vn_instance_submit_command *submit,
void *cmd_data,
size_t cmd_size,
size_t reply_size)
{
submit->buffer = VN_CS_ENCODER_BUFFER_INITIALIZER(cmd_data);
submit->command = VN_CS_ENCODER_INITIALIZER(&submit->buffer, cmd_size);
submit->reply_size = reply_size;
submit->reply_shmem = NULL;
return &submit->command;
}
static inline void
vn_instance_submit_command(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
vn_ring_submit_command(instance->ring.ring,
(struct vn_ring_submit_command *)submit);
}
static inline struct vn_cs_decoder *
vn_instance_get_command_reply(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
return submit->reply_shmem ? &submit->reply : NULL;
}
static inline void
vn_instance_free_command_reply(struct vn_instance *instance,
struct vn_instance_submit_command *submit)
{
assert(submit->reply_shmem);
vn_renderer_shmem_unref(instance->renderer, submit->reply_shmem);
}
static inline struct vn_renderer_shmem *
vn_instance_cs_shmem_alloc(struct vn_instance *instance,
size_t size,

View file

@ -69,9 +69,9 @@ static void
vn_physical_device_init_features(struct vn_physical_device *physical_dev)
{
const uint32_t renderer_version = physical_dev->renderer_version;
struct vn_instance *instance = physical_dev->instance;
const struct vk_device_extension_table *exts =
&physical_dev->renderer_extensions;
struct vn_ring *ring = physical_dev->instance->ring.ring;
VkPhysicalDeviceFeatures2 feats2 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
};
@ -275,7 +275,7 @@ vn_physical_device_init_features(struct vn_physical_device *physical_dev)
/* clang-format on */
vn_call_vkGetPhysicalDeviceFeatures2(
instance, vn_physical_device_to_handle(physical_dev), &feats2);
ring, vn_physical_device_to_handle(physical_dev), &feats2);
struct vk_features *feats = &physical_dev->base.base.supported_features;
vk_set_physical_device_features(feats, &feats2);
@ -380,8 +380,8 @@ static void
vn_physical_device_init_properties(struct vn_physical_device *physical_dev)
{
const uint32_t renderer_version = physical_dev->renderer_version;
struct vn_physical_device_properties *props = &physical_dev->properties;
struct vn_instance *instance = physical_dev->instance;
struct vn_physical_device_properties *props = &physical_dev->properties;
const struct vk_device_extension_table *exts =
&physical_dev->renderer_extensions;
VkPhysicalDeviceProperties2 props2 = {
@ -471,7 +471,8 @@ vn_physical_device_init_properties(struct vn_physical_device *physical_dev)
/* clang-format on */
vn_call_vkGetPhysicalDeviceProperties2(
instance, vn_physical_device_to_handle(physical_dev), &props2);
instance->ring.ring, vn_physical_device_to_handle(physical_dev),
&props2);
VkPhysicalDeviceProperties *vk10_props = &props->vulkan_1_0;
VkPhysicalDeviceVulkan11Properties *vk11_props = &props->vulkan_1_1;
@ -697,11 +698,12 @@ vn_physical_device_init_queue_family_properties(
struct vn_physical_device *physical_dev)
{
struct vn_instance *instance = physical_dev->instance;
struct vn_ring *ring = instance->ring.ring;
const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
uint32_t count;
vn_call_vkGetPhysicalDeviceQueueFamilyProperties2(
instance, vn_physical_device_to_handle(physical_dev), &count, NULL);
ring, vn_physical_device_to_handle(physical_dev), &count, NULL);
VkQueueFamilyProperties2 *props =
vk_alloc(alloc, sizeof(*props) * count, VN_DEFAULT_ALIGN,
@ -714,7 +716,7 @@ vn_physical_device_init_queue_family_properties(
props[i].pNext = NULL;
}
vn_call_vkGetPhysicalDeviceQueueFamilyProperties2(
instance, vn_physical_device_to_handle(physical_dev), &count, props);
ring, vn_physical_device_to_handle(physical_dev), &count, props);
/* Filter out queue families that exclusively support sparse binding as
* we need additional support for submitting feedback commands
@ -748,11 +750,12 @@ vn_physical_device_init_memory_properties(
struct vn_physical_device *physical_dev)
{
struct vn_instance *instance = physical_dev->instance;
struct vn_ring *ring = instance->ring.ring;
VkPhysicalDeviceMemoryProperties2 props2 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2,
};
vn_call_vkGetPhysicalDeviceMemoryProperties2(
instance, vn_physical_device_to_handle(physical_dev), &props2);
ring, vn_physical_device_to_handle(physical_dev), &props2);
physical_dev->memory_properties = props2.memoryProperties;
@ -862,6 +865,7 @@ vn_physical_device_init_external_fence_handles(
* and idle waiting.
*/
if (physical_dev->renderer_extensions.KHR_external_fence_fd) {
struct vn_ring *ring = physical_dev->instance->ring.ring;
const VkPhysicalDeviceExternalFenceInfo info = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO,
.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
@ -870,8 +874,7 @@ vn_physical_device_init_external_fence_handles(
.sType = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES,
};
vn_call_vkGetPhysicalDeviceExternalFenceProperties(
physical_dev->instance, vn_physical_device_to_handle(physical_dev),
&info, &props);
ring, vn_physical_device_to_handle(physical_dev), &info, &props);
physical_dev->renderer_sync_fd.fence_exportable =
props.externalFenceFeatures &
@ -909,6 +912,7 @@ vn_physical_device_init_external_semaphore_handles(
* host side rather than the guest side.
*/
if (physical_dev->renderer_extensions.KHR_external_semaphore_fd) {
struct vn_ring *ring = physical_dev->instance->ring.ring;
const VkPhysicalDeviceExternalSemaphoreInfo info = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
@ -917,8 +921,7 @@ vn_physical_device_init_external_semaphore_handles(
.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
};
vn_call_vkGetPhysicalDeviceExternalSemaphoreProperties(
physical_dev->instance, vn_physical_device_to_handle(physical_dev),
&info, &props);
ring, vn_physical_device_to_handle(physical_dev), &info, &props);
physical_dev->renderer_sync_fd.semaphore_exportable =
props.externalSemaphoreFeatures &
@ -1221,13 +1224,13 @@ vn_physical_device_init_renderer_extensions(
struct vn_physical_device *physical_dev)
{
struct vn_instance *instance = physical_dev->instance;
struct vn_ring *ring = instance->ring.ring;
const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
/* get renderer extensions */
uint32_t count;
VkResult result = vn_call_vkEnumerateDeviceExtensionProperties(
instance, vn_physical_device_to_handle(physical_dev), NULL, &count,
NULL);
ring, vn_physical_device_to_handle(physical_dev), NULL, &count, NULL);
if (result != VK_SUCCESS)
return result;
@ -1239,7 +1242,7 @@ vn_physical_device_init_renderer_extensions(
return VK_ERROR_OUT_OF_HOST_MEMORY;
result = vn_call_vkEnumerateDeviceExtensionProperties(
instance, vn_physical_device_to_handle(physical_dev), NULL, &count,
ring, vn_physical_device_to_handle(physical_dev), NULL, &count,
exts);
if (result < VK_SUCCESS) {
vk_free(alloc, exts);
@ -1287,6 +1290,7 @@ vn_physical_device_init_renderer_version(
struct vn_physical_device *physical_dev)
{
struct vn_instance *instance = physical_dev->instance;
struct vn_ring *ring = instance->ring.ring;
/*
* We either check and enable VK_KHR_get_physical_device_properties2, or we
@ -1294,7 +1298,7 @@ vn_physical_device_init_renderer_version(
*/
VkPhysicalDeviceProperties props;
vn_call_vkGetPhysicalDeviceProperties(
instance, vn_physical_device_to_handle(physical_dev), &props);
ring, vn_physical_device_to_handle(physical_dev), &props);
if (props.apiVersion < VN_MIN_RENDERER_VERSION) {
if (VN_DEBUG(INIT)) {
vn_log(instance, "%s has unsupported renderer device version %d.%d",
@ -1390,11 +1394,12 @@ vn_instance_enumerate_physical_device_groups_locked(
uint32_t physical_dev_count)
{
VkInstance instance_handle = vn_instance_to_handle(instance);
struct vn_ring *ring = instance->ring.ring;
const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
VkResult result;
uint32_t count;
result = vn_call_vkEnumeratePhysicalDeviceGroups(instance, instance_handle,
result = vn_call_vkEnumeratePhysicalDeviceGroups(ring, instance_handle,
&count, NULL);
if (result != VK_SUCCESS)
return result;
@ -1430,7 +1435,7 @@ vn_instance_enumerate_physical_device_groups_locked(
}
}
result = vn_call_vkEnumeratePhysicalDeviceGroups(instance, instance_handle,
result = vn_call_vkEnumeratePhysicalDeviceGroups(ring, instance_handle,
&count, groups);
if (result != VK_SUCCESS) {
vk_free(alloc, groups);
@ -1484,13 +1489,14 @@ enumerate_physical_devices(struct vn_instance *instance,
uint32_t *out_count)
{
const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
struct vn_ring *ring = instance->ring.ring;
struct vn_physical_device *physical_devs = NULL;
VkPhysicalDevice *handles = NULL;
VkResult result;
uint32_t count = 0;
result = vn_call_vkEnumeratePhysicalDevices(
instance, vn_instance_to_handle(instance), &count, NULL);
ring, vn_instance_to_handle(instance), &count, NULL);
if (result != VK_SUCCESS || !count)
return result;
@ -1528,7 +1534,7 @@ enumerate_physical_devices(struct vn_instance *instance,
}
result = vn_call_vkEnumeratePhysicalDevices(
instance, vn_instance_to_handle(instance), &count, handles);
ring, vn_instance_to_handle(instance), &count, handles);
if (result != VK_SUCCESS)
goto fail;
@ -1857,7 +1863,7 @@ vn_GetPhysicalDeviceMemoryProperties2(
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_instance *instance = physical_dev->instance;
struct vn_ring *ring = physical_dev->instance->ring.ring;
VkPhysicalDeviceMemoryBudgetPropertiesEXT *memory_budget = NULL;
/* Don't waste time searching for unsupported structs. */
@ -1871,7 +1877,7 @@ vn_GetPhysicalDeviceMemoryProperties2(
* copy. For dynamic properties, we must query the server.
*/
if (memory_budget) {
vn_call_vkGetPhysicalDeviceMemoryProperties2(instance, physicalDevice,
vn_call_vkGetPhysicalDeviceMemoryProperties2(ring, physicalDevice,
pMemoryProperties);
}
@ -1890,6 +1896,7 @@ vn_GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice,
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_ring *ring = physical_dev->instance->ring.ring;
struct vn_format_properties_entry *entry = NULL;
if (!pFormatProperties->pNext) {
@ -1900,8 +1907,8 @@ vn_GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice,
}
}
vn_call_vkGetPhysicalDeviceFormatProperties2(
physical_dev->instance, physicalDevice, format, pFormatProperties);
vn_call_vkGetPhysicalDeviceFormatProperties2(ring, physicalDevice, format,
pFormatProperties);
if (entry) {
vn_physical_device_add_format_properties(
@ -2060,6 +2067,7 @@ vn_GetPhysicalDeviceImageFormatProperties2(
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_ring *ring = physical_dev->instance->ring.ring;
const VkExternalMemoryHandleTypeFlagBits renderer_handle_type =
physical_dev->external_memory.renderer_handle_type;
const VkExternalMemoryHandleTypeFlags supported_handle_types =
@ -2181,8 +2189,7 @@ vn_GetPhysicalDeviceImageFormatProperties2(
VkResult result;
/* TODO per-device cache */
result = vn_call_vkGetPhysicalDeviceImageFormatProperties2(
physical_dev->instance, physicalDevice, pImageFormatInfo,
pImageFormatProperties);
ring, physicalDevice, pImageFormatInfo, pImageFormatProperties);
if (result != VK_SUCCESS || !external_info)
return vn_result(physical_dev->instance, result);
@ -2252,6 +2259,7 @@ vn_GetPhysicalDeviceSparseImageFormatProperties2(
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_ring *ring = physical_dev->instance->ring.ring;
/* If VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT is not supported for the given
* arguments, pPropertyCount will be set to zero upon return, and no data
* will be written to pProperties.
@ -2263,8 +2271,7 @@ vn_GetPhysicalDeviceSparseImageFormatProperties2(
/* TODO per-device cache */
vn_call_vkGetPhysicalDeviceSparseImageFormatProperties2(
physical_dev->instance, physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
ring, physicalDevice, pFormatInfo, pPropertyCount, pProperties);
}
void
@ -2275,6 +2282,7 @@ vn_GetPhysicalDeviceExternalBufferProperties(
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_ring *ring = physical_dev->instance->ring.ring;
const VkExternalMemoryHandleTypeFlagBits renderer_handle_type =
physical_dev->external_memory.renderer_handle_type;
const VkExternalMemoryHandleTypeFlags supported_handle_types =
@ -2301,8 +2309,7 @@ vn_GetPhysicalDeviceExternalBufferProperties(
/* TODO per-device cache */
vn_call_vkGetPhysicalDeviceExternalBufferProperties(
physical_dev->instance, physicalDevice, pExternalBufferInfo,
pExternalBufferProperties);
ring, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
if (renderer_handle_type ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT &&
@ -2403,7 +2410,8 @@ vn_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
{
struct vn_physical_device *physical_dev =
vn_physical_device_from_handle(physicalDevice);
struct vn_ring *ring = physical_dev->instance->ring.ring;
return vn_call_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(
physical_dev->instance, physicalDevice, pTimeDomainCount, pTimeDomains);
ring, physicalDevice, pTimeDomainCount, pTimeDomains);
}

View file

@ -223,7 +223,7 @@ vn_CreateShaderModule(VkDevice device,
vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateShaderModule(dev->primary_ring, device, pCreateInfo, NULL,
&mod_handle);
*pShaderModule = mod_handle;
@ -244,7 +244,8 @@ vn_DestroyShaderModule(VkDevice device,
if (!mod)
return;
vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
vn_async_vkDestroyShaderModule(dev->primary_ring, device, shaderModule,
NULL);
vn_object_base_fini(&mod->base);
vk_free(alloc, mod);
@ -262,7 +263,7 @@ vn_pipeline_layout_destroy(struct vn_device *dev,
dev, pipeline_layout->push_descriptor_set_layout);
}
vn_async_vkDestroyPipelineLayout(
dev->instance, vn_device_to_handle(dev),
dev->primary_ring, vn_device_to_handle(dev),
vn_pipeline_layout_to_handle(pipeline_layout), NULL);
vn_object_base_fini(&pipeline_layout->base);
@ -329,8 +330,8 @@ vn_CreatePipelineLayout(VkDevice device,
layout->has_push_constant_ranges = pCreateInfo->pPushConstantRanges > 0;
VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
&layout_handle);
vn_async_vkCreatePipelineLayout(dev->primary_ring, device, pCreateInfo,
NULL, &layout_handle);
*pPipelineLayout = layout_handle;
@ -386,8 +387,8 @@ vn_CreatePipelineCache(VkDevice device,
}
VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
&cache_handle);
vn_async_vkCreatePipelineCache(dev->primary_ring, device, pCreateInfo,
NULL, &cache_handle);
*pPipelineCache = cache_handle;
@ -409,7 +410,7 @@ vn_DestroyPipelineCache(VkDevice device,
if (!cache)
return;
vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
vn_async_vkDestroyPipelineCache(dev->primary_ring, device, pipelineCache,
NULL);
vn_object_base_fini(&cache->base);
@ -429,7 +430,7 @@ vn_GetPipelineCacheData(VkDevice device,
struct vk_pipeline_cache_header *header = pData;
VkResult result;
if (!pData) {
result = vn_call_vkGetPipelineCacheData(dev->instance, device,
result = vn_call_vkGetPipelineCacheData(dev->primary_ring, device,
pipelineCache, pDataSize, NULL);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
@ -453,7 +454,7 @@ vn_GetPipelineCacheData(VkDevice device,
*pDataSize -= header->header_size;
result =
vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
vn_call_vkGetPipelineCacheData(dev->primary_ring, device, pipelineCache,
pDataSize, pData + header->header_size);
if (result < VK_SUCCESS)
return vn_error(dev->instance, result);
@ -472,7 +473,7 @@ vn_MergePipelineCaches(VkDevice device,
VN_TRACE_FUNC();
struct vn_device *dev = vn_device_from_handle(device);
vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
vn_async_vkMergePipelineCaches(dev->primary_ring, device, dstCache,
srcCacheCount, pSrcCaches);
return VK_SUCCESS;
@ -1405,14 +1406,14 @@ vn_CreateGraphicsPipelines(VkDevice device,
if (want_sync) {
result = vn_call_vkCreateGraphicsPipelines(
dev->instance, device, pipelineCache, createInfoCount, pCreateInfos,
NULL, pPipelines);
dev->primary_ring, device, pipelineCache, createInfoCount,
pCreateInfos, NULL, pPipelines);
if (result != VK_SUCCESS)
vn_destroy_failed_pipelines(dev, createInfoCount, pPipelines, alloc);
} else {
vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
createInfoCount, pCreateInfos, NULL,
pPipelines);
vn_async_vkCreateGraphicsPipelines(dev->primary_ring, device,
pipelineCache, createInfoCount,
pCreateInfos, NULL, pPipelines);
result = VK_SUCCESS;
}
@ -1459,14 +1460,14 @@ vn_CreateComputePipelines(VkDevice device,
if (want_sync) {
result = vn_call_vkCreateComputePipelines(
dev->instance, device, pipelineCache, createInfoCount, pCreateInfos,
NULL, pPipelines);
dev->primary_ring, device, pipelineCache, createInfoCount,
pCreateInfos, NULL, pPipelines);
if (result != VK_SUCCESS)
vn_destroy_failed_pipelines(dev, createInfoCount, pPipelines, alloc);
} else {
vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
createInfoCount, pCreateInfos, NULL,
pPipelines);
vn_async_vkCreateComputePipelines(dev->primary_ring, device,
pipelineCache, createInfoCount,
pCreateInfos, NULL, pPipelines);
result = VK_SUCCESS;
}
@ -1491,7 +1492,7 @@ vn_DestroyPipeline(VkDevice device,
vn_pipeline_layout_unref(dev, pipeline->layout);
}
vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
vn_async_vkDestroyPipeline(dev->primary_ring, device, _pipeline, NULL);
vn_object_base_fini(&pipeline->base);
vk_free(alloc, pipeline);

View file

@ -123,7 +123,7 @@ vn_CreateQueryPool(VkDevice device,
};
VkQueryPool pool_handle = vn_query_pool_to_handle(pool);
vn_async_vkCreateQueryPool(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateQueryPool(dev->primary_ring, device, pCreateInfo, NULL,
&pool_handle);
*pQueryPool = pool_handle;
@ -149,7 +149,7 @@ vn_DestroyQueryPool(VkDevice device,
if (pool->feedback)
vn_feedback_buffer_destroy(dev, pool->feedback, alloc);
vn_async_vkDestroyQueryPool(dev->instance, device, queryPool, NULL);
vn_async_vkDestroyQueryPool(dev->primary_ring, device, queryPool, NULL);
vn_object_base_fini(&pool->base);
vk_free(alloc, pool);
@ -165,7 +165,7 @@ vn_ResetQueryPool(VkDevice device,
struct vn_device *dev = vn_device_from_handle(device);
struct vn_query_pool *pool = vn_query_pool_from_handle(queryPool);
vn_async_vkResetQueryPool(dev->instance, device, queryPool, firstQuery,
vn_async_vkResetQueryPool(dev->primary_ring, device, queryPool, firstQuery,
queryCount);
if (pool->feedback) {
/* Feedback results are always 64 bit and include availability bit
@ -336,8 +336,8 @@ vn_GetQueryPoolResults(VkDevice device,
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
result = vn_call_vkGetQueryPoolResults(
dev->instance, device, queryPool, firstQuery, queryCount, packed_size,
packed_data, packed_stride, packed_flags);
dev->primary_ring, device, queryPool, firstQuery, queryCount,
packed_size, packed_data, packed_stride, packed_flags);
if (packed_data == pData)
return vn_result(dev->instance, result);

View file

@ -246,7 +246,7 @@ vn_queue_submission_fix_batch_semaphores(struct vn_queue_submission *submit,
.semaphore = sem_handle,
.resourceId = 0,
};
vn_async_vkImportSemaphoreResourceMESA(dev->instance, dev_handle,
vn_async_vkImportSemaphoreResourceMESA(dev->primary_ring, dev_handle,
&res_info);
}
@ -1100,11 +1100,11 @@ vn_queue_submit(struct vn_queue_submission *submit)
if (VN_PERF(NO_ASYNC_QUEUE_SUBMIT)) {
if (submit->batch_type == VK_STRUCTURE_TYPE_SUBMIT_INFO_2) {
result = vn_call_vkQueueSubmit2(
instance, submit->queue_handle, submit->batch_count,
dev->primary_ring, submit->queue_handle, submit->batch_count,
submit->submit_batches2, submit->fence_handle);
} else {
result = vn_call_vkQueueSubmit(
instance, submit->queue_handle, submit->batch_count,
dev->primary_ring, submit->queue_handle, submit->batch_count,
submit->submit_batches, submit->fence_handle);
}
@ -1113,13 +1113,13 @@ vn_queue_submit(struct vn_queue_submission *submit)
return vn_error(instance, result);
}
} else {
struct vn_instance_submit_command instance_submit;
struct vn_ring_submit_command instance_submit;
if (submit->batch_type == VK_STRUCTURE_TYPE_SUBMIT_INFO_2) {
vn_submit_vkQueueSubmit2(
instance, 0, submit->queue_handle, submit->batch_count,
dev->primary_ring, 0, submit->queue_handle, submit->batch_count,
submit->submit_batches2, submit->fence_handle, &instance_submit);
} else {
vn_submit_vkQueueSubmit(instance, 0, submit->queue_handle,
vn_submit_vkQueueSubmit(dev->primary_ring, 0, submit->queue_handle,
submit->batch_count, submit->submit_batches,
submit->fence_handle, &instance_submit);
}
@ -1211,13 +1211,13 @@ vn_queue_bind_sparse_submit(struct vn_queue_submission *submit)
if (VN_PERF(NO_ASYNC_QUEUE_SUBMIT)) {
result = vn_call_vkQueueBindSparse(
instance, submit->queue_handle, submit->batch_count,
dev->primary_ring, submit->queue_handle, submit->batch_count,
submit->sparse_batches, submit->fence_handle);
if (result != VK_SUCCESS)
return vn_error(instance, result);
} else {
struct vn_instance_submit_command instance_submit;
vn_submit_vkQueueBindSparse(instance, 0, submit->queue_handle,
struct vn_ring_submit_command instance_submit;
vn_submit_vkQueueBindSparse(dev->primary_ring, 0, submit->queue_handle,
submit->batch_count, submit->sparse_batches,
submit->fence_handle, &instance_submit);
@ -1503,7 +1503,7 @@ vn_fence_feedback_init(struct vn_device *dev,
/* Fence feedback implementation relies on vkWaitForFences to cover the gap
* between feedback slot signaling and the actual fence signal operation.
*/
if (unlikely(!dev->instance->renderer->info.allow_vk_wait_syncs))
if (unlikely(!dev->renderer->info.allow_vk_wait_syncs))
return VK_SUCCESS;
if (VN_PERF(NO_FENCE_FEEDBACK))
@ -1600,7 +1600,8 @@ vn_CreateFence(VkDevice device,
goto out_payloads_fini;
*pFence = vn_fence_to_handle(fence);
vn_async_vkCreateFence(dev->instance, device, pCreateInfo, NULL, pFence);
vn_async_vkCreateFence(dev->primary_ring, device, pCreateInfo, NULL,
pFence);
return VK_SUCCESS;
@ -1628,7 +1629,7 @@ vn_DestroyFence(VkDevice device,
if (!fence)
return;
vn_async_vkDestroyFence(dev->instance, device, _fence, NULL);
vn_async_vkDestroyFence(dev->primary_ring, device, _fence, NULL);
vn_fence_feedback_fini(dev, fence, alloc);
@ -1647,9 +1648,9 @@ vn_ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences)
/* TODO if the fence is shared-by-ref, this needs to be synchronous */
if (false)
vn_call_vkResetFences(dev->instance, device, fenceCount, pFences);
vn_call_vkResetFences(dev->primary_ring, device, fenceCount, pFences);
else
vn_async_vkResetFences(dev->instance, device, fenceCount, pFences);
vn_async_vkResetFences(dev->primary_ring, device, fenceCount, pFences);
for (uint32_t i = 0; i < fenceCount; i++) {
struct vn_fence *fence = vn_fence_from_handle(pFences[i]);
@ -1688,11 +1689,11 @@ vn_GetFenceStatus(VkDevice device, VkFence _fence)
* longer sees any fence status checks and falsely believes the
* caller does not sync.
*/
vn_async_vkWaitForFences(dev->instance, device, 1, &_fence,
vn_async_vkWaitForFences(dev->primary_ring, device, 1, &_fence,
VK_TRUE, UINT64_MAX);
}
} else {
result = vn_call_vkGetFenceStatus(dev->instance, device, _fence);
result = vn_call_vkGetFenceStatus(dev->primary_ring, device, _fence);
}
break;
case VN_SYNC_TYPE_IMPORTED_SYNC_FD:
@ -1912,7 +1913,7 @@ vn_GetFenceFdKHR(VkDevice device,
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
vn_async_vkResetFenceResourceMESA(dev->instance, device,
vn_async_vkResetFenceResourceMESA(dev->primary_ring, device,
pGetFdInfo->fence);
vn_sync_payload_release(dev, &fence->temporary);
@ -2108,7 +2109,7 @@ vn_CreateSemaphore(VkDevice device,
}
VkSemaphore sem_handle = vn_semaphore_to_handle(sem);
vn_async_vkCreateSemaphore(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateSemaphore(dev->primary_ring, device, pCreateInfo, NULL,
&sem_handle);
*pSemaphore = sem_handle;
@ -2139,7 +2140,7 @@ vn_DestroySemaphore(VkDevice device,
if (!sem)
return;
vn_async_vkDestroySemaphore(dev->instance, device, semaphore, NULL);
vn_async_vkDestroySemaphore(dev->primary_ring, device, semaphore, NULL);
if (sem->type == VK_SEMAPHORE_TYPE_TIMELINE)
vn_timeline_semaphore_feedback_fini(dev, sem);
@ -2192,7 +2193,7 @@ vn_GetSemaphoreCounterValue(VkDevice device,
.pValues = pValue,
};
vn_async_vkWaitSemaphores(dev->instance, device, &wait_info,
vn_async_vkWaitSemaphores(dev->primary_ring, device, &wait_info,
UINT64_MAX);
sem->feedback.signaled_counter = *pValue;
}
@ -2200,7 +2201,7 @@ vn_GetSemaphoreCounterValue(VkDevice device,
return VK_SUCCESS;
} else {
return vn_call_vkGetSemaphoreCounterValue(dev->instance, device,
return vn_call_vkGetSemaphoreCounterValue(dev->primary_ring, device,
semaphore, pValue);
}
}
@ -2215,9 +2216,9 @@ vn_SignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo)
/* TODO if the semaphore is shared-by-ref, this needs to be synchronous */
if (false)
vn_call_vkSignalSemaphore(dev->instance, device, pSignalInfo);
vn_call_vkSignalSemaphore(dev->primary_ring, device, pSignalInfo);
else
vn_async_vkSignalSemaphore(dev->instance, device, pSignalInfo);
vn_async_vkSignalSemaphore(dev->primary_ring, device, pSignalInfo);
if (sem->feedback.slot) {
simple_mtx_lock(&sem->feedback.async_wait_mtx);
@ -2404,12 +2405,12 @@ vn_GetSemaphoreFdKHR(VkDevice device,
.semaphore = pGetFdInfo->semaphore,
.resourceId = 0,
};
vn_async_vkImportSemaphoreResourceMESA(dev->instance, device,
vn_async_vkImportSemaphoreResourceMESA(dev->primary_ring, device,
&res_info);
}
/* perform wait operation on the host semaphore */
vn_async_vkWaitSemaphoreResourceMESA(dev->instance, device,
vn_async_vkWaitSemaphoreResourceMESA(dev->primary_ring, device,
pGetFdInfo->semaphore);
vn_sync_payload_release(dev, &sem->temporary);
@ -2474,7 +2475,7 @@ vn_CreateEvent(VkDevice device,
}
VkEvent ev_handle = vn_event_to_handle(ev);
vn_async_vkCreateEvent(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateEvent(dev->primary_ring, device, pCreateInfo, NULL,
&ev_handle);
*pEvent = ev_handle;
@ -2496,7 +2497,7 @@ vn_DestroyEvent(VkDevice device,
if (!ev)
return;
vn_async_vkDestroyEvent(dev->instance, device, event, NULL);
vn_async_vkDestroyEvent(dev->primary_ring, device, event, NULL);
vn_event_feedback_fini(dev, ev);
@ -2515,7 +2516,7 @@ vn_GetEventStatus(VkDevice device, VkEvent event)
if (ev->feedback_slot)
result = vn_feedback_get_status(ev->feedback_slot);
else
result = vn_call_vkGetEventStatus(dev->instance, device, event);
result = vn_call_vkGetEventStatus(dev->primary_ring, device, event);
return vn_result(dev->instance, result);
}
@ -2529,9 +2530,9 @@ vn_SetEvent(VkDevice device, VkEvent event)
if (ev->feedback_slot) {
vn_feedback_set_status(ev->feedback_slot, VK_EVENT_SET);
vn_async_vkSetEvent(dev->instance, device, event);
vn_async_vkSetEvent(dev->primary_ring, device, event);
} else {
VkResult result = vn_call_vkSetEvent(dev->instance, device, event);
VkResult result = vn_call_vkSetEvent(dev->primary_ring, device, event);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
}
@ -2548,9 +2549,10 @@ vn_ResetEvent(VkDevice device, VkEvent event)
if (ev->feedback_slot) {
vn_feedback_reset_status(ev->feedback_slot);
vn_async_vkResetEvent(dev->instance, device, event);
vn_async_vkResetEvent(dev->primary_ring, device, event);
} else {
VkResult result = vn_call_vkResetEvent(dev->instance, device, event);
VkResult result =
vn_call_vkResetEvent(dev->primary_ring, device, event);
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
}

View file

@ -237,7 +237,7 @@ vn_CreateRenderPass(VkDevice device,
}
VkRenderPass pass_handle = vn_render_pass_to_handle(pass);
vn_async_vkCreateRenderPass(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateRenderPass(dev->primary_ring, device, pCreateInfo, NULL,
&pass_handle);
if (pCreateInfo == &local_pass_info)
@ -293,7 +293,7 @@ vn_CreateRenderPass2(VkDevice device,
pass->subpasses[i].view_mask = pCreateInfo->pSubpasses[i].viewMask;
VkRenderPass pass_handle = vn_render_pass_to_handle(pass);
vn_async_vkCreateRenderPass2(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateRenderPass2(dev->primary_ring, device, pCreateInfo, NULL,
&pass_handle);
if (pCreateInfo == &local_pass_info)
@ -317,7 +317,7 @@ vn_DestroyRenderPass(VkDevice device,
if (!pass)
return;
vn_async_vkDestroyRenderPass(dev->instance, device, renderPass, NULL);
vn_async_vkDestroyRenderPass(dev->primary_ring, device, renderPass, NULL);
vn_object_base_fini(&pass->base);
vk_free(alloc, pass);
@ -332,8 +332,8 @@ vn_GetRenderAreaGranularity(VkDevice device,
struct vn_render_pass *pass = vn_render_pass_from_handle(renderPass);
if (!pass->granularity.width) {
vn_call_vkGetRenderAreaGranularity(dev->instance, device, renderPass,
&pass->granularity);
vn_call_vkGetRenderAreaGranularity(dev->primary_ring, device,
renderPass, &pass->granularity);
}
*pGranularity = pass->granularity;
@ -371,7 +371,7 @@ vn_CreateFramebuffer(VkDevice device,
sizeof(*pCreateInfo->pAttachments) * view_count);
VkFramebuffer fb_handle = vn_framebuffer_to_handle(fb);
vn_async_vkCreateFramebuffer(dev->instance, device, pCreateInfo, NULL,
vn_async_vkCreateFramebuffer(dev->primary_ring, device, pCreateInfo, NULL,
&fb_handle);
*pFramebuffer = fb_handle;
@ -392,7 +392,8 @@ vn_DestroyFramebuffer(VkDevice device,
if (!fb)
return;
vn_async_vkDestroyFramebuffer(dev->instance, device, framebuffer, NULL);
vn_async_vkDestroyFramebuffer(dev->primary_ring, device, framebuffer,
NULL);
vn_object_base_fini(&fb->base);
vk_free(alloc, fb);