lavapipe: drop lavapipe specific macro for generic one.

These can just all use the standard VK macro we define.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38198>
This commit is contained in:
Dave Airlie 2025-11-02 05:01:12 +10:00
parent be2e4b6fde
commit cde13f3674
12 changed files with 181 additions and 184 deletions

View file

@ -79,7 +79,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_BeginCommandBuffer(
VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo* pBeginInfo)
{
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
vk_command_buffer_begin(&cmd_buffer->vk, pBeginInfo);
@ -89,7 +89,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_BeginCommandBuffer(
VKAPI_ATTR VkResult VKAPI_CALL lvp_EndCommandBuffer(
VkCommandBuffer commandBuffer)
{
LVP_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(lvp_cmd_buffer, cmd_buffer, commandBuffer);
return vk_command_buffer_end(&cmd_buffer->vk);
}

View file

@ -61,7 +61,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDescriptorSetLayout(
const VkAllocationCallbacks* pAllocator,
VkDescriptorSetLayout* pSetLayout)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
@ -223,7 +223,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_pipeline_layout *layout = lvp_pipeline_layout_create(device, pCreateInfo, pAllocator);
*pPipelineLayout = lvp_pipeline_layout_to_handle(layout);
@ -398,14 +398,14 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_AllocateDescriptorSets(
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
struct lvp_descriptor_set *set;
uint32_t i;
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout,
VK_FROM_HANDLE(lvp_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
result = lvp_descriptor_set_create(device, layout, &set);
@ -429,9 +429,9 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_FreeDescriptorSets(
uint32_t count,
const VkDescriptorSet* pDescriptorSets)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
for (uint32_t i = 0; i < count; i++) {
LVP_FROM_HANDLE(lvp_descriptor_set, set, pDescriptorSets[i]);
VK_FROM_HANDLE(lvp_descriptor_set, set, pDescriptorSets[i]);
if (!set)
continue;
@ -448,11 +448,11 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet* pDescriptorCopies)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
LVP_FROM_HANDLE(lvp_descriptor_set, set, write->dstSet);
VK_FROM_HANDLE(lvp_descriptor_set, set, write->dstSet);
const struct lvp_descriptor_set_binding_layout *bind_layout =
&set->layout->binding[write->dstBinding];
@ -471,7 +471,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_SAMPLER:
if (!bind_layout->immutable_samplers) {
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_sampler, sampler, write->pImageInfo[j].sampler);
VK_FROM_HANDLE(lvp_sampler, sampler, write->pImageInfo[j].sampler);
uint32_t didx = j * bind_layout->stride;
for (unsigned k = 0; k < bind_layout->stride; k++) {
@ -484,7 +484,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_image_view, iview,
VK_FROM_HANDLE(lvp_image_view, iview,
write->pImageInfo[j].imageView);
uint32_t didx = j * bind_layout->stride;
if (iview) {
@ -496,7 +496,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
}
if (!bind_layout->immutable_samplers) {
LVP_FROM_HANDLE(lvp_sampler, sampler,
VK_FROM_HANDLE(lvp_sampler, sampler,
write->pImageInfo[j].sampler);
for (unsigned p = 0; p < plane_count; p++) {
@ -515,7 +515,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_image_view, iview,
VK_FROM_HANDLE(lvp_image_view, iview,
write->pImageInfo[j].imageView);
uint32_t didx = j * bind_layout->stride;
if (iview) {
@ -536,7 +536,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_image_view, iview,
VK_FROM_HANDLE(lvp_image_view, iview,
write->pImageInfo[j].imageView);
uint32_t didx = j * bind_layout->stride;
if (iview) {
@ -555,7 +555,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_buffer_view, bview,
VK_FROM_HANDLE(lvp_buffer_view, bview,
write->pTexelBufferView[j]);
assert(bind_layout->stride == 1);
if (bview) {
@ -570,7 +570,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_buffer_view, bview,
VK_FROM_HANDLE(lvp_buffer_view, bview,
write->pTexelBufferView[j]);
assert(bind_layout->stride == 1);
if (bview) {
@ -585,7 +585,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
VK_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
assert(bind_layout->stride == 1);
if (buffer) {
struct pipe_constant_buffer ubo = {
@ -607,7 +607,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
LVP_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
VK_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
assert(bind_layout->stride == 1);
if (buffer) {
struct pipe_shader_buffer ubo = {
@ -644,8 +644,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_UpdateDescriptorSets(
for (uint32_t i = 0; i < descriptorCopyCount; i++) {
const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
LVP_FROM_HANDLE(lvp_descriptor_set, src, copy->srcSet);
LVP_FROM_HANDLE(lvp_descriptor_set, dst, copy->dstSet);
VK_FROM_HANDLE(lvp_descriptor_set, src, copy->srcSet);
VK_FROM_HANDLE(lvp_descriptor_set, dst, copy->dstSet);
const struct lvp_descriptor_set_binding_layout *src_layout =
&src->layout->binding[copy->srcBinding];
@ -677,7 +677,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDescriptorPool(
const VkAllocationCallbacks* pAllocator,
VkDescriptorPool* pDescriptorPool)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_descriptor_pool *pool;
size_t size = sizeof(struct lvp_descriptor_pool);
pool = vk_zalloc2(&device->vk.alloc, pAllocator, size, 8,
@ -708,8 +708,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyDescriptorPool(
VkDescriptorPool _pool,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
if (!_pool)
return;
@ -724,8 +724,8 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetDescriptorPool(
VkDescriptorPool _pool,
VkDescriptorPoolResetFlags flags)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
lvp_reset_descriptor_pool(device, pool);
return VK_SUCCESS;
@ -756,9 +756,9 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_descriptor_set, set, descriptorSet);
LVP_FROM_HANDLE(vk_descriptor_update_template, templ, descriptorUpdateTemplate);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_descriptor_set, set, descriptorSet);
VK_FROM_HANDLE(vk_descriptor_update_template, templ, descriptorUpdateTemplate);
uint32_t i, j;
for (i = 0; i < templ->entry_count; ++i) {
@ -784,7 +784,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
switch (entry->type) {
case VK_DESCRIPTOR_TYPE_SAMPLER: {
VkDescriptorImageInfo *info = (VkDescriptorImageInfo *)pSrc;
LVP_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
VK_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
for (unsigned k = 0; k < bind_layout->stride; k++) {
desc[idx + k].sampler = sampler->desc.sampler;
@ -794,7 +794,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
}
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
VkDescriptorImageInfo *info = (VkDescriptorImageInfo *)pSrc;
LVP_FROM_HANDLE(lvp_image_view, iview, info->imageView);
VK_FROM_HANDLE(lvp_image_view, iview, info->imageView);
if (iview) {
for (unsigned p = 0; p < iview->plane_count; p++) {
@ -803,7 +803,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
}
if (!bind_layout->immutable_samplers) {
LVP_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
VK_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
for (unsigned p = 0; p < iview->plane_count; p++) {
desc[idx + p].sampler = sampler->desc.sampler;
@ -820,7 +820,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
}
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
VkDescriptorImageInfo *info = (VkDescriptorImageInfo *)pSrc;
LVP_FROM_HANDLE(lvp_image_view, iview, info->imageView);
VK_FROM_HANDLE(lvp_image_view, iview, info->imageView);
if (iview) {
for (unsigned p = 0; p < iview->plane_count; p++) {
@ -837,7 +837,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
}
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
LVP_FROM_HANDLE(lvp_image_view, iview,
VK_FROM_HANDLE(lvp_image_view, iview,
((VkDescriptorImageInfo *)pSrc)->imageView);
if (iview) {
@ -852,7 +852,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
LVP_FROM_HANDLE(lvp_buffer_view, bview,
VK_FROM_HANDLE(lvp_buffer_view, bview,
*(VkBufferView *)pSrc);
assert(bind_layout->stride == 1);
if (bview) {
@ -865,7 +865,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
break;
}
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
LVP_FROM_HANDLE(lvp_buffer_view, bview,
VK_FROM_HANDLE(lvp_buffer_view, bview,
*(VkBufferView *)pSrc);
assert(bind_layout->stride == 1);
if (bview) {
@ -880,7 +880,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
VkDescriptorBufferInfo *info = (VkDescriptorBufferInfo *)pSrc;
LVP_FROM_HANDLE(lvp_buffer, buffer, info->buffer);
VK_FROM_HANDLE(lvp_buffer, buffer, info->buffer);
assert(bind_layout->stride == 1);
if (buffer) {
struct pipe_constant_buffer ubo = {
@ -902,7 +902,7 @@ lvp_descriptor_set_update_with_template(VkDevice _device, VkDescriptorSet descri
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
VkDescriptorBufferInfo *info = (VkDescriptorBufferInfo *)pSrc;
LVP_FROM_HANDLE(lvp_buffer, buffer, info->buffer);
VK_FROM_HANDLE(lvp_buffer, buffer, info->buffer);
assert(bind_layout->stride == 1);
if (buffer) {
@ -951,7 +951,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorSetLayoutSizeEXT(
VkDescriptorSetLayout _layout,
VkDeviceSize* pSize)
{
LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout, _layout);
VK_FROM_HANDLE(lvp_descriptor_set_layout, layout, _layout);
*pSize = layout->size * sizeof(struct lp_descriptor);
@ -965,7 +965,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorSetLayoutBindingOffsetEXT(
uint32_t binding,
VkDeviceSize* pOffset)
{
LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout, _layout);
VK_FROM_HANDLE(lvp_descriptor_set_layout, layout, _layout);
assert(binding < layout->binding_count);
const struct lvp_descriptor_set_binding_layout *bind_layout = &layout->binding[binding];
@ -981,7 +981,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
size_t size,
void* pDescriptor)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lp_descriptor *desc = pDescriptor;
@ -997,7 +997,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
}
case VK_DESCRIPTOR_TYPE_SAMPLER: {
if (pCreateInfo->data.pSampler) {
LVP_FROM_HANDLE(lvp_sampler, sampler, pCreateInfo->data.pSampler[0]);
VK_FROM_HANDLE(lvp_sampler, sampler, pCreateInfo->data.pSampler[0]);
desc->sampler = sampler->desc.sampler;
desc->texture.sampler_index = sampler->desc.texture.sampler_index;
} else {
@ -1010,7 +1010,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
const VkDescriptorImageInfo *info = pCreateInfo->data.pCombinedImageSampler;
if (info && info->imageView) {
LVP_FROM_HANDLE(lvp_image_view, iview, info->imageView);
VK_FROM_HANDLE(lvp_image_view, iview, info->imageView);
unsigned plane_count = iview->plane_count;
@ -1019,7 +1019,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
desc[p].functions = iview->planes[p].texture_handle->functions;
if (info->sampler) {
LVP_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
VK_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
desc[p].sampler = sampler->desc.sampler;
desc[p].texture.sampler_index = sampler->desc.texture.sampler_index;
} else {
@ -1041,7 +1041,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
if (pCreateInfo->data.pSampledImage && pCreateInfo->data.pSampledImage->imageView) {
LVP_FROM_HANDLE(lvp_image_view, iview, pCreateInfo->data.pSampledImage->imageView);
VK_FROM_HANDLE(lvp_image_view, iview, pCreateInfo->data.pSampledImage->imageView);
unsigned plane_count = iview->plane_count;
@ -1064,7 +1064,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
if (pCreateInfo->data.pStorageImage && pCreateInfo->data.pStorageImage->imageView) {
LVP_FROM_HANDLE(lvp_image_view, iview, pCreateInfo->data.pStorageImage->imageView);
VK_FROM_HANDLE(lvp_image_view, iview, pCreateInfo->data.pStorageImage->imageView);
unsigned plane_count = iview->plane_count;

View file

@ -1506,7 +1506,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyInstance(
VkInstance _instance,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_instance, instance, _instance);
VK_FROM_HANDLE(lvp_instance, instance, _instance);
if (!instance)
return;
@ -1820,7 +1820,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDevice(
const VkAllocationCallbacks* pAllocator,
VkDevice* pDevice)
{
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
struct lvp_device *device;
struct lvp_instance *instance = (struct lvp_instance *)physical_device->vk.instance;
@ -1910,7 +1910,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyDevice(
VkDevice _device,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
lvp_device_finish_accel_struct_state(device);
@ -2015,7 +2015,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_AllocateMemory(
const VkAllocationCallbacks* pAllocator,
VkDeviceMemory* pMem)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_device_memory *mem;
ASSERTED const VkImportMemoryFdInfoKHR *import_info = NULL;
const VkMemoryAllocateFlagsInfo *mem_flags = NULL;
@ -2145,8 +2145,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_FreeMemory(
VkDeviceMemory _mem,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_device_memory, mem, _mem);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device_memory, mem, _mem);
if (mem == NULL)
return;
@ -2179,7 +2179,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_MapMemory2KHR(
const VkMemoryMapInfoKHR* pMemoryMapInfo,
void** ppData)
{
LVP_FROM_HANDLE(lvp_device_memory, mem, pMemoryMapInfo->memory);
VK_FROM_HANDLE(lvp_device_memory, mem, pMemoryMapInfo->memory);
if (mem == NULL) {
*ppData = NULL;
@ -2253,7 +2253,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDeviceImageMemoryRequirements(
VkImage _image;
if (lvp_CreateImage(_device, pInfo->pCreateInfo, NULL, &_image) != VK_SUCCESS)
return;
LVP_FROM_HANDLE(lvp_image, image, _image);
VK_FROM_HANDLE(lvp_image, image, _image);
/* Per spec VUs of VkImageMemoryRequirementsInfo2 */
const bool need_plane_info =
@ -2278,7 +2278,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetBufferMemoryRequirements(
VkBuffer _buffer,
VkMemoryRequirements* pMemoryRequirements)
{
LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
VK_FROM_HANDLE(lvp_buffer, buffer, _buffer);
pMemoryRequirements->alignment = 64;
if (buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
@ -2327,7 +2327,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetImageMemoryRequirements(
VkImage _image,
VkMemoryRequirements* pMemoryRequirements)
{
LVP_FROM_HANDLE(lvp_image, image, _image);
VK_FROM_HANDLE(lvp_image, image, _image);
pMemoryRequirements->memoryTypeBits = 1;
pMemoryRequirements->size = image->size;
@ -2369,10 +2369,10 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_BindBufferMemory2(VkDevice _device,
uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
for (uint32_t i = 0; i < bindInfoCount; ++i) {
LVP_FROM_HANDLE(lvp_device_memory, mem, pBindInfos[i].memory);
LVP_FROM_HANDLE(lvp_buffer, buffer, pBindInfos[i].buffer);
VK_FROM_HANDLE(lvp_device_memory, mem, pBindInfos[i].memory);
VK_FROM_HANDLE(lvp_buffer, buffer, pBindInfos[i].buffer);
VkBindMemoryStatusKHR *status = (void*)vk_find_struct_const(&pBindInfos[i], BIND_MEMORY_STATUS_KHR);
buffer->mem = mem;
@ -2419,8 +2419,8 @@ static VkResult
lvp_image_bind(struct lvp_device *device,
const VkBindImageMemoryInfo *bind_info)
{
LVP_FROM_HANDLE(lvp_device_memory, mem, bind_info->memory);
LVP_FROM_HANDLE(lvp_image, image, bind_info->image);
VK_FROM_HANDLE(lvp_device_memory, mem, bind_info->memory);
VK_FROM_HANDLE(lvp_image, image, bind_info->image);
uint64_t mem_offset = bind_info->memoryOffset;
VkResult result;
@ -2467,7 +2467,7 @@ lvp_BindImageMemory2(VkDevice _device,
uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < bindInfoCount; i++) {
@ -2488,7 +2488,7 @@ lvp_BindImageMemory2(VkDevice _device,
VkResult
lvp_GetMemoryFdKHR(VkDevice _device, const VkMemoryGetFdInfoKHR *pGetFdInfo, int *pFD)
{
LVP_FROM_HANDLE(lvp_device_memory, memory, pGetFdInfo->memory);
VK_FROM_HANDLE(lvp_device_memory, memory, pGetFdInfo->memory);
assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
assert_memhandle_type(pGetFdInfo->handleType);
@ -2504,7 +2504,7 @@ lvp_GetMemoryFdPropertiesKHR(VkDevice _device,
int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
assert(pMemoryFdProperties->sType == VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR);
@ -2525,7 +2525,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateEvent(
const VkAllocationCallbacks* pAllocator,
VkEvent* pEvent)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_event *event = vk_alloc2(&device->vk.alloc, pAllocator,
sizeof(*event), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@ -2545,8 +2545,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyEvent(
VkEvent _event,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_event, event, _event);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_event, event, _event);
if (!event)
return;
@ -2559,7 +2559,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_GetEventStatus(
VkDevice _device,
VkEvent _event)
{
LVP_FROM_HANDLE(lvp_event, event, _event);
VK_FROM_HANDLE(lvp_event, event, _event);
if (event->event_storage == 1)
return VK_EVENT_SET;
return VK_EVENT_RESET;
@ -2569,7 +2569,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_SetEvent(
VkDevice _device,
VkEvent _event)
{
LVP_FROM_HANDLE(lvp_event, event, _event);
VK_FROM_HANDLE(lvp_event, event, _event);
event->event_storage = 1;
return VK_SUCCESS;
@ -2579,7 +2579,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_ResetEvent(
VkDevice _device,
VkEvent _event)
{
LVP_FROM_HANDLE(lvp_event, event, _event);
VK_FROM_HANDLE(lvp_event, event, _event);
event->event_storage = 0;
return VK_SUCCESS;
@ -2631,7 +2631,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateSampler(
const VkAllocationCallbacks* pAllocator,
VkSampler* pSampler)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_sampler *sampler;
sampler = vk_sampler_create(&device->vk, pCreateInfo,
@ -2651,8 +2651,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroySampler(
VkSampler _sampler,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_sampler, sampler, _sampler);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_sampler, sampler, _sampler);
if (!_sampler)
return;
@ -2666,7 +2666,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePrivateDataSlot(
const VkAllocationCallbacks* pAllocator,
VkPrivateDataSlot* pPrivateDataSlot)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
return vk_private_data_slot_create(&device->vk, pCreateInfo, pAllocator,
pPrivateDataSlot);
}
@ -2676,7 +2676,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyPrivateDataSlot(
VkPrivateDataSlot privateDataSlot,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
vk_private_data_slot_destroy(&device->vk, privateDataSlot, pAllocator);
}
@ -2687,7 +2687,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_SetPrivateData(
VkPrivateDataSlot privateDataSlot,
uint64_t data)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
return vk_object_base_set_private_data(&device->vk, objectType,
objectHandle, privateDataSlot,
data);
@ -2700,7 +2700,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPrivateData(
VkPrivateDataSlot privateDataSlot,
uint64_t* pData)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
vk_object_base_get_private_data(&device->vk, objectType, objectHandle,
privateDataSlot, pData);
}
@ -2710,7 +2710,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPhysicalDeviceExternalFenceProperties(
const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
VkExternalFenceProperties *pExternalFenceProperties)
{
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
const VkExternalFenceHandleTypeFlagBits handle_type = pExternalFenceInfo->handleType;
if (handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT &&
@ -2734,7 +2734,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPhysicalDeviceExternalSemaphoreProperties(
const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
{
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
const VkSemaphoreTypeCreateInfo *type_info =
vk_find_struct_const(pExternalSemaphoreInfo->pNext, SEMAPHORE_TYPE_CREATE_INFO);
const VkSemaphoreType type = !type_info ? VK_SEMAPHORE_TYPE_BINARY : type_info->semaphoreType;
@ -2834,7 +2834,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_SetDeviceMemoryPriorityEXT(
VkDeviceMemory _memory,
float priority)
{
LVP_FROM_HANDLE(lvp_device_memory, mem, _memory);
VK_FROM_HANDLE(lvp_device_memory, mem, _memory);
set_mem_priority(mem, get_mem_priority(priority));
}

View file

@ -29,7 +29,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateIndirectExecutionSetEXT(
const VkAllocationCallbacks* pAllocator,
VkIndirectExecutionSetEXT* pIndirectExecutionSet)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
bool is_shaders = pCreateInfo->type == VK_INDIRECT_EXECUTION_SET_INFO_TYPE_SHADER_OBJECTS_EXT;
size_t size = 0;
if (is_shaders) {
@ -65,7 +65,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyIndirectExecutionSetEXT(
VkIndirectExecutionSetEXT indirectExecutionSet,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_indirect_execution_set, iset, indirectExecutionSet);
if (!iset)
@ -138,7 +138,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateIndirectCommandsLayoutEXT(
const VkAllocationCallbacks* pAllocator,
VkIndirectCommandsLayoutEXT* pIndirectCommandsLayout)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_indirect_command_layout_ext *elayout;
size_t token_size = pCreateInfo->tokenCount * sizeof(VkIndirectCommandsLayoutTokenEXT);
@ -203,7 +203,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyIndirectCommandsLayoutEXT(
VkIndirectCommandsLayoutEXT indirectCommandsLayout,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_indirect_command_layout_ext, elayout, indirectCommandsLayout);
if (!elayout)

View file

@ -558,7 +558,7 @@ handle_compute_shader(struct rendering_state *state, struct lvp_shader *shader)
static void handle_compute_pipeline(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
VK_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
handle_compute_shader(state, &pipeline->shaders[MESA_SHADER_COMPUTE]);
}
@ -566,7 +566,7 @@ static void handle_compute_pipeline(struct vk_cmd_queue_entry *cmd,
static void handle_ray_tracing_pipeline(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
VK_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
struct lvp_shader *shader = &pipeline->shaders[MESA_SHADER_RAYGEN];
@ -1066,7 +1066,7 @@ static void handle_graphics_pipeline(struct lvp_pipeline *pipeline,
static void handle_pipeline(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
VK_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
pipeline->used = true;
if (pipeline->type == LVP_PIPELINE_COMPUTE) {
handle_compute_pipeline(cmd, state);
@ -1195,7 +1195,7 @@ apply_dynamic_offsets(struct lvp_descriptor_set **out_set, const uint32_t *offse
static void
handle_descriptor_sets(VkBindDescriptorSetsInfoKHR *bds, struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, bds->layout);
VK_FROM_HANDLE(lvp_pipeline_layout, layout, bds->layout);
uint32_t dynamic_offset_index = 0;
@ -2113,7 +2113,7 @@ static void handle_copy_image_to_buffer2(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
const struct VkCopyImageToBufferInfo2 *copycmd = cmd->u.copy_image_to_buffer2.copy_image_to_buffer_info;
LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
VK_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
struct pipe_box box, dbox;
struct pipe_transfer *src_t, *dst_t;
uint8_t *src_data, *dst_data;
@ -2191,7 +2191,7 @@ handle_copy_memory_to_image_indirect(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
const VkCopyMemoryToImageIndirectInfoKHR *copycmd = cmd->u.copy_memory_to_image_indirect_khr.copy_memory_to_image_indirect_info;
LVP_FROM_HANDLE(lvp_image, image, copycmd->dstImage);
VK_FROM_HANDLE(lvp_image, image, copycmd->dstImage);
for (uint32_t i = 0; i < copycmd->copyCount; i++) {
uint8_t *ptr = (void*)(uintptr_t)copycmd->copyAddressRange.address;
@ -2232,7 +2232,7 @@ static void handle_copy_buffer_to_image(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
const struct VkCopyBufferToImageInfo2 *copycmd = cmd->u.copy_buffer_to_image2.copy_buffer_to_image_info;
LVP_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
VK_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
for (uint32_t i = 0; i < copycmd->regionCount; i++) {
const VkBufferImageCopy2 *region = &copycmd->pRegions[i];
@ -2342,8 +2342,8 @@ static void handle_copy_image(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
const struct VkCopyImageInfo2 *copycmd = cmd->u.copy_image2.copy_image_info;
LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
LVP_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
VK_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
VK_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
for (uint32_t i = 0; i < copycmd->regionCount; i++) {
const VkImageCopy2 *region = &copycmd->pRegions[i];
@ -2456,8 +2456,8 @@ static void handle_blit_image(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
VkBlitImageInfo2 *blitcmd = cmd->u.blit_image2.blit_image_info;
LVP_FROM_HANDLE(lvp_image, src_image, blitcmd->srcImage);
LVP_FROM_HANDLE(lvp_image, dst_image, blitcmd->dstImage);
VK_FROM_HANDLE(lvp_image, src_image, blitcmd->srcImage);
VK_FROM_HANDLE(lvp_image, dst_image, blitcmd->dstImage);
struct pipe_blit_info info = {
.src.resource = src_image->planes[0].bo,
@ -2784,7 +2784,7 @@ static void handle_execute_commands(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state, bool print_cmds)
{
for (unsigned i = 0; i < cmd->u.execute_commands.command_buffer_count; i++) {
LVP_FROM_HANDLE(lvp_cmd_buffer, secondary_buf, cmd->u.execute_commands.command_buffers[i]);
VK_FROM_HANDLE(lvp_cmd_buffer, secondary_buf, cmd->u.execute_commands.command_buffers[i]);
lvp_execute_cmd_buffer(&secondary_buf->vk.cmd_queue.cmds, state, print_cmds);
}
}
@ -2792,7 +2792,7 @@ static void handle_execute_commands(struct vk_cmd_queue_entry *cmd,
static void handle_event_set2(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_event, event, cmd->u.set_event2.event);
VK_FROM_HANDLE(lvp_event, event, cmd->u.set_event2.event);
VkPipelineStageFlags2 src_stage_mask =
vk_collect_dependency_info_src_stages(cmd->u.set_event2.dependency_info);
@ -2805,7 +2805,7 @@ static void handle_event_set2(struct vk_cmd_queue_entry *cmd,
static void handle_event_reset2(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_event, event, cmd->u.reset_event2.event);
VK_FROM_HANDLE(lvp_event, event, cmd->u.reset_event2.event);
if (cmd->u.reset_event2.stage_mask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)
state->pctx->flush(state->pctx, NULL, 0);
@ -2817,7 +2817,7 @@ static void handle_wait_events2(struct vk_cmd_queue_entry *cmd,
{
finish_fence(state);
for (unsigned i = 0; i < cmd->u.wait_events2.event_count; i++) {
LVP_FROM_HANDLE(lvp_event, event, cmd->u.wait_events2.events[i]);
VK_FROM_HANDLE(lvp_event, event, cmd->u.wait_events2.events[i]);
while (event->event_storage != true);
}
@ -2833,7 +2833,7 @@ static void handle_begin_query(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_begin_query *qcmd = &cmd->u.begin_query;
LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pool->pipeline_stats & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
@ -2859,7 +2859,7 @@ static void handle_end_query(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_end_query *qcmd = &cmd->u.end_query;
LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
assert(pool->queries[qcmd->query]);
state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
@ -2870,7 +2870,7 @@ static void handle_begin_query_indexed_ext(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_begin_query_indexed_ext *qcmd = &cmd->u.begin_query_indexed_ext;
LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pool->pipeline_stats & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
@ -2896,7 +2896,7 @@ static void handle_end_query_indexed_ext(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_end_query_indexed_ext *qcmd = &cmd->u.end_query_indexed_ext;
LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
assert(pool->queries[qcmd->query]);
state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
@ -2906,7 +2906,7 @@ static void handle_reset_query_pool(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_reset_query_pool *qcmd = &cmd->u.reset_query_pool;
LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
if (pool->base_type >= PIPE_QUERY_TYPES)
return;
@ -2923,7 +2923,7 @@ static void handle_write_timestamp2(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_write_timestamp2 *qcmd = &cmd->u.write_timestamp2;
LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
if (!(qcmd->stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT))
state->pctx->flush(state->pctx, NULL, 0);
@ -2942,7 +2942,7 @@ static void handle_copy_query_pool_results(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
struct vk_cmd_copy_query_pool_results *copycmd = &cmd->u.copy_query_pool_results;
LVP_FROM_HANDLE(lvp_query_pool, pool, copycmd->query_pool);
VK_FROM_HANDLE(lvp_query_pool, pool, copycmd->query_pool);
enum pipe_query_flags flags = (copycmd->flags & VK_QUERY_RESULT_WAIT_BIT) ? PIPE_QUERY_WAIT : 0;
if (copycmd->flags & VK_QUERY_RESULT_PARTIAL_BIT)
@ -3034,7 +3034,7 @@ static void handle_copy_query_pool_results(struct vk_cmd_queue_entry *cmd,
static void handle_clear_color_image(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_image, image, cmd->u.clear_color_image.image);
VK_FROM_HANDLE(lvp_image, image, cmd->u.clear_color_image.image);
enum pipe_format format = image->planes[0].bo->format;
const struct util_format_description *desc = util_format_description(format);
@ -3076,7 +3076,7 @@ static void handle_clear_color_image(struct vk_cmd_queue_entry *cmd,
static void handle_clear_ds_image(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_image, image, cmd->u.clear_depth_stencil_image.image);
VK_FROM_HANDLE(lvp_image, image, cmd->u.clear_depth_stencil_image.image);
for (unsigned i = 0; i < cmd->u.clear_depth_stencil_image.range_count; i++) {
VkImageSubresourceRange *range = &cmd->u.clear_depth_stencil_image.ranges[i];
uint32_t ds_clear_flags = 0;
@ -3172,8 +3172,8 @@ static void handle_resolve_image(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
VkResolveImageInfo2 *resolvecmd = cmd->u.resolve_image2.resolve_image_info;
LVP_FROM_HANDLE(lvp_image, src_image, resolvecmd->srcImage);
LVP_FROM_HANDLE(lvp_image, dst_image, resolvecmd->dstImage);
VK_FROM_HANDLE(lvp_image, src_image, resolvecmd->srcImage);
VK_FROM_HANDLE(lvp_image, dst_image, resolvecmd->dstImage);
struct pipe_blit_info info = {0};
info.src.resource = src_image->planes[0].bo;
@ -3251,7 +3251,7 @@ static void handle_push_descriptor_set(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
VkPushDescriptorSetInfoKHR *pds = cmd->u.push_descriptor_set2.push_descriptor_set_info;
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, pds->layout);
VK_FROM_HANDLE(lvp_pipeline_layout, layout, pds->layout);
struct lvp_descriptor_set_layout *set_layout = (struct lvp_descriptor_set_layout *)layout->vk.set_layouts[pds->set];
struct lvp_descriptor_set *set;
@ -3288,8 +3288,8 @@ static void handle_push_descriptor_set_with_template(struct vk_cmd_queue_entry *
struct rendering_state *state)
{
VkPushDescriptorSetWithTemplateInfoKHR *pds = cmd->u.push_descriptor_set_with_template2.push_descriptor_set_with_template_info;
LVP_FROM_HANDLE(vk_descriptor_update_template, templ, pds->descriptorUpdateTemplate);
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, pds->layout);
VK_FROM_HANDLE(vk_descriptor_update_template, templ, pds->descriptorUpdateTemplate);
VK_FROM_HANDLE(lvp_pipeline_layout, layout, pds->layout);
struct lvp_descriptor_set_layout *set_layout = (struct lvp_descriptor_set_layout *)layout->vk.set_layouts[pds->set];
struct lvp_descriptor_set *set;
@ -3816,7 +3816,7 @@ handle_shaders(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
for (unsigned i = 0; i < bind->stage_count; i++) {
mesa_shader_stage stage = vk_to_mesa_shader_stage(bind->stages[i]);
assert(stage != MESA_SHADER_NONE && stage <= MESA_SHADER_MESH);
LVP_FROM_HANDLE(lvp_shader, shader, bind->shaders ? bind->shaders[i] : VK_NULL_HANDLE);
VK_FROM_HANDLE(lvp_shader, shader, bind->shaders ? bind->shaders[i] : VK_NULL_HANDLE);
if (stage == MESA_SHADER_FRAGMENT) {
if (shader) {
state->force_min_sample = shader->pipeline_nir->nir->info.fs.uses_sample_shading;
@ -4301,7 +4301,7 @@ static void
handle_descriptor_buffer_embedded_samplers(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
const VkBindDescriptorBufferEmbeddedSamplersInfoEXT *bind = cmd->u.bind_descriptor_buffer_embedded_samplers2_ext.bind_descriptor_buffer_embedded_samplers_info;
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, bind->layout);
VK_FROM_HANDLE(lvp_pipeline_layout, layout, bind->layout);
if (!layout->vk.set_layouts[bind->set])
return;
@ -4323,7 +4323,7 @@ handle_descriptor_buffer_offsets(struct vk_cmd_queue_entry *cmd, struct renderin
uint32_t types = lvp_pipeline_types_from_shader_stages(dbo->stageFlags);
u_foreach_bit(pipeline_type, types) {
for (unsigned i = 0; i < dbo->setCount; i++) {
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, dbo->layout);
VK_FROM_HANDLE(lvp_pipeline_layout, layout, dbo->layout);
unsigned idx = dbo->firstSet + i;
state->desc_buffer_offsets[pipeline_type][idx].buffer_index = dbo->pBufferIndices[i];
state->desc_buffer_offsets[pipeline_type][idx].offset = dbo->pOffsets[i];

View file

@ -224,7 +224,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPhysicalDeviceFormatProperties2(
VkFormat format,
VkFormatProperties2* pFormatProperties)
{
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VkFormatProperties3 format_props;
lvp_physical_device_get_format_properties(physical_device,
@ -412,7 +412,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_GetPhysicalDeviceImageFormatProperties2(
const VkPhysicalDeviceImageFormatInfo2 *base_info,
VkImageFormatProperties2 *base_props)
{
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
const VkPhysicalDeviceExternalImageFormatInfo *external_info = NULL;
VkExternalImageFormatProperties *external_props = NULL;
VkSamplerYcbcrConversionImageFormatProperties *ycbcr_props = NULL;
@ -527,7 +527,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPhysicalDeviceSparseImageFormatProperties2(
uint32_t *pPropertyCount,
VkSparseImageFormatProperties2 *pProperties)
{
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VkResult result;
if (pFormatInfo->samples > VK_SAMPLE_COUNT_1_BIT) {
@ -572,7 +572,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetDeviceImageSparseMemoryRequirements(
uint32_t* pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
if (!(pInfo->pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
*pSparseMemoryRequirementCount = 0;
@ -601,8 +601,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetImageSparseMemoryRequirements2(
uint32_t* pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, image, pInfo->image);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, image, pInfo->image);
if (!(image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
*pSparseMemoryRequirementCount = 0;
@ -644,7 +644,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPhysicalDeviceExternalBufferProperties(
switch (pExternalBufferInfo->handleType) {
#ifdef HAVE_LIBDRM
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: {
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
int params = physical_device->pscreen->caps.dmabuf;
flags = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
if (params & DRM_PRIME_CAP_EXPORT)

View file

@ -36,7 +36,7 @@ lvp_image_create(VkDevice _device,
const VkAllocationCallbacks* alloc,
VkImage *pImage)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_image *image;
VkResult result = VK_SUCCESS;
#ifdef HAVE_LIBDRM
@ -224,7 +224,7 @@ lvp_CreateImage(VkDevice _device,
VkImage *pImage)
{
#if !DETECT_OS_ANDROID
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
const VkImageSwapchainCreateInfoKHR *swapchain_info =
vk_find_struct_const(pCreateInfo->pNext, IMAGE_SWAPCHAIN_CREATE_INFO_KHR);
if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE) {
@ -242,8 +242,8 @@ VKAPI_ATTR void VKAPI_CALL
lvp_DestroyImage(VkDevice _device, VkImage _image,
const VkAllocationCallbacks *pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, image, _image);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, image, _image);
if (!_image)
return;
@ -374,8 +374,8 @@ lvp_CreateImageView(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkImageView *pView)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, image, pCreateInfo->image);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, image, pCreateInfo->image);
struct lvp_image_view *view;
view = vk_image_view_create(&device->vk, pCreateInfo,
@ -439,8 +439,8 @@ VKAPI_ATTR void VKAPI_CALL
lvp_DestroyImageView(VkDevice _device, VkImageView _iview,
const VkAllocationCallbacks *pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image_view, iview, _iview);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image_view, iview, _iview);
if (!_iview)
return;
@ -464,8 +464,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetImageSubresourceLayout(
const VkImageSubresource* pSubresource,
VkSubresourceLayout* pLayout)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, image, _image);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, image, _image);
uint64_t value;
const uint8_t p = lvp_image_aspects_to_plane(image, pSubresource->aspectMask);
@ -544,7 +544,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateBuffer(
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_buffer *buffer;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
@ -607,8 +607,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyBuffer(
VkBuffer _buffer,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_buffer, buffer, _buffer);
if (!_buffer)
return;
@ -631,8 +631,8 @@ VKAPI_ATTR VkDeviceAddress VKAPI_CALL lvp_GetBufferDeviceAddress(
VkDevice _device,
const VkBufferDeviceAddressInfo* pInfo)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_buffer, buffer, pInfo->buffer);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_buffer, buffer, pInfo->buffer);
simple_mtx_lock(&device->bda_lock);
_mesa_hash_table_insert(&device->bda, buffer->map, buffer);
simple_mtx_unlock(&device->bda_lock);
@ -695,8 +695,8 @@ lvp_CreateBufferView(VkDevice _device,
const VkAllocationCallbacks *pAllocator,
VkBufferView *pView)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_buffer, buffer, pCreateInfo->buffer);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_buffer, buffer, pCreateInfo->buffer);
struct lvp_buffer_view *view;
view = vk_buffer_view_create(&device->vk,
@ -731,8 +731,8 @@ VKAPI_ATTR void VKAPI_CALL
lvp_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_buffer_view, view, bufferView);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_buffer_view, view, bufferView);
if (!bufferView)
return;
@ -752,8 +752,8 @@ lvp_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
VKAPI_ATTR VkResult VKAPI_CALL
lvp_CopyMemoryToImageEXT(VkDevice _device, const VkCopyMemoryToImageInfoEXT *pCopyMemoryToImageInfo)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, image, pCopyMemoryToImageInfo->dstImage);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, image, pCopyMemoryToImageInfo->dstImage);
for (unsigned i = 0; i < pCopyMemoryToImageInfo->regionCount; i++) {
const VkMemoryToImageCopyEXT *copy = &pCopyMemoryToImageInfo->pRegions[i];
const VkImageAspectFlagBits aspects = copy->imageSubresource.aspectMask;
@ -818,8 +818,8 @@ lvp_CopyMemoryToImageEXT(VkDevice _device, const VkCopyMemoryToImageInfoEXT *pCo
VKAPI_ATTR VkResult VKAPI_CALL
lvp_CopyImageToMemoryEXT(VkDevice _device, const VkCopyImageToMemoryInfoEXT *pCopyImageToMemoryInfo)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, image, pCopyImageToMemoryInfo->srcImage);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, image, pCopyImageToMemoryInfo->srcImage);
for (unsigned i = 0; i < pCopyImageToMemoryInfo->regionCount; i++) {
const VkImageToMemoryCopyEXT *copy = &pCopyImageToMemoryInfo->pRegions[i];
@ -870,9 +870,9 @@ lvp_CopyImageToMemoryEXT(VkDevice _device, const VkCopyImageToMemoryInfoEXT *pCo
VKAPI_ATTR VkResult VKAPI_CALL
lvp_CopyImageToImageEXT(VkDevice _device, const VkCopyImageToImageInfoEXT *pCopyImageToImageInfo)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_image, src_image, pCopyImageToImageInfo->srcImage);
LVP_FROM_HANDLE(lvp_image, dst_image, pCopyImageToImageInfo->dstImage);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_image, src_image, pCopyImageToImageInfo->srcImage);
VK_FROM_HANDLE(lvp_image, dst_image, pCopyImageToImageInfo->dstImage);
/* basically the same as handle_copy_image() */
for (unsigned i = 0; i < pCopyImageToImageInfo->regionCount; i++) {
@ -922,10 +922,10 @@ lvp_buffer_bind_sparse(struct lvp_device *device,
struct lvp_queue *queue,
VkSparseBufferMemoryBindInfo *bind)
{
LVP_FROM_HANDLE(lvp_buffer, buffer, bind->buffer);
VK_FROM_HANDLE(lvp_buffer, buffer, bind->buffer);
for (uint32_t i = 0; i < bind->bindCount; i++) {
LVP_FROM_HANDLE(lvp_device_memory, mem, bind->pBinds[i].memory);
VK_FROM_HANDLE(lvp_device_memory, mem, bind->pBinds[i].memory);
device->pscreen->resource_bind_backing(device->pscreen,
buffer->bo,
mem ? mem->pmem : NULL,
@ -942,11 +942,11 @@ lvp_image_bind_opaque_sparse(struct lvp_device *device,
struct lvp_queue *queue,
VkSparseImageOpaqueMemoryBindInfo *bind_info)
{
LVP_FROM_HANDLE(lvp_image, image, bind_info->image);
VK_FROM_HANDLE(lvp_image, image, bind_info->image);
for (uint32_t i = 0; i < bind_info->bindCount; i++) {
const VkSparseMemoryBind *bind = &bind_info->pBinds[i];
LVP_FROM_HANDLE(lvp_device_memory, mem, bind->memory);
VK_FROM_HANDLE(lvp_device_memory, mem, bind->memory);
uint32_t plane_index;
uint32_t offset;
@ -977,13 +977,13 @@ lvp_image_bind_sparse(struct lvp_device *device,
struct lvp_queue *queue,
VkSparseImageMemoryBindInfo *bind_info)
{
LVP_FROM_HANDLE(lvp_image, image, bind_info->image);
VK_FROM_HANDLE(lvp_image, image, bind_info->image);
enum pipe_format format = vk_format_to_pipe_format(image->vk.format);
for (uint32_t i = 0; i < bind_info->bindCount; i++) {
const VkSparseImageMemoryBind *bind = &bind_info->pBinds[i];
LVP_FROM_HANDLE(lvp_device_memory, mem, bind->memory);
VK_FROM_HANDLE(lvp_device_memory, mem, bind->memory);
uint8_t plane = lvp_image_aspects_to_plane(image, bind->subresource.aspectMask);

View file

@ -86,7 +86,7 @@ lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline, b
vk_pipeline_layout_unref(&device->vk, &pipeline->layout->vk);
for (unsigned i = 0; i < pipeline->num_groups; i++) {
LVP_FROM_HANDLE(lvp_pipeline, p, pipeline->groups[i]);
VK_FROM_HANDLE(lvp_pipeline, p, pipeline->groups[i]);
lvp_pipeline_destroy(device, p, locked);
}
@ -108,8 +108,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
VkPipeline _pipeline,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
@ -787,7 +787,7 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
if (libstate) {
for (unsigned i = 0; i < libstate->libraryCount; i++) {
LVP_FROM_HANDLE(lvp_pipeline, p, libstate->pLibraries[i]);
VK_FROM_HANDLE(lvp_pipeline, p, libstate->pLibraries[i]);
vk_graphics_pipeline_state_merge(&pipeline->graphics_state,
&p->graphics_state);
if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
@ -868,7 +868,7 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
}
if (libstate) {
for (unsigned i = 0; i < libstate->libraryCount; i++) {
LVP_FROM_HANDLE(lvp_pipeline, p, libstate->pLibraries[i]);
VK_FROM_HANDLE(lvp_pipeline, p, libstate->pLibraries[i]);
if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
if (p->shaders[MESA_SHADER_FRAGMENT].pipeline_nir)
lvp_pipeline_nir_ref(&pipeline->shaders[MESA_SHADER_FRAGMENT].pipeline_nir, p->shaders[MESA_SHADER_FRAGMENT].pipeline_nir);
@ -940,8 +940,8 @@ lvp_graphics_pipeline_create(
VkPipeline *pPipeline,
bool group)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
struct lvp_pipeline *pipeline;
VkResult result;
@ -1045,8 +1045,8 @@ lvp_compute_pipeline_create(
VkPipelineCreateFlagBits2KHR flags,
VkPipeline *pPipeline)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
struct lvp_pipeline *pipeline;
VkResult result;
@ -1120,8 +1120,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyShaderEXT(
VkShaderEXT _shader,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_shader, shader, _shader);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_shader, shader, _shader);
if (!shader)
return;
@ -1244,7 +1244,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateShadersEXT(
const VkAllocationCallbacks* pAllocator,
VkShaderEXT* pShaders)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
unsigned i;
for (i = 0; i < createInfoCount; i++) {
pShaders[i] = create_shader_object(device, &pCreateInfos[i], pAllocator);
@ -1267,7 +1267,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_GetShaderBinaryDataEXT(
size_t* pDataSize,
void* pData)
{
LVP_FROM_HANDLE(lvp_shader, shader, _shader);
VK_FROM_HANDLE(lvp_shader, shader, _shader);
VkResult ret = VK_SUCCESS;
if (pData) {
if (*pDataSize < shader->blob.size + SHA1_DIGEST_LENGTH + VK_UUID_SIZE) {
@ -1296,7 +1296,7 @@ lvp_exec_graph_pipeline_create(VkDevice _device, VkPipelineCache _cache,
VkPipelineCreateFlagBits2KHR flags,
VkPipeline *out_pipeline)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_pipeline *pipeline;
VkResult result;

View file

@ -29,7 +29,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineCache(
const VkAllocationCallbacks* pAllocator,
VkPipelineCache* pPipelineCache)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
struct lvp_pipeline_cache *cache;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
@ -58,8 +58,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipelineCache(
VkPipelineCache _cache,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
if (!_cache)
return;

View file

@ -659,9 +659,6 @@ get_binding_layout(const struct lvp_pipeline_layout *layout,
return &get_set_layout(layout, set)->binding[binding];
}
#define LVP_FROM_HANDLE(__lvp_type, __name, __handle) \
struct __lvp_type *__name = __lvp_type ## _from_handle(__handle)
VK_DEFINE_HANDLE_CASTS(lvp_cmd_buffer, vk.base, VkCommandBuffer,
VK_OBJECT_TYPE_COMMAND_BUFFER)
VK_DEFINE_HANDLE_CASTS(lvp_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)

View file

@ -30,7 +30,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateQueryPool(
const VkAllocationCallbacks* pAllocator,
VkQueryPool* pQueryPool)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_device, device, _device);
uint32_t query_size = sizeof(struct pipe_query *);
enum pipe_query_type pipeq;
@ -98,8 +98,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyQueryPool(
VkQueryPool _pool,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_query_pool, pool, _pool);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_query_pool, pool, _pool);
if (!pool)
return;
@ -123,8 +123,8 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_GetQueryPoolResults(
VkDeviceSize stride,
VkQueryResultFlags flags)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_query_pool, pool, queryPool);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_query_pool, pool, queryPool);
VkResult vk_result = VK_SUCCESS;
device->vk.dispatch_table.DeviceWaitIdle(_device);
@ -228,8 +228,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_ResetQueryPool(
uint32_t firstQuery,
uint32_t queryCount)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_query_pool, pool, queryPool);
VK_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_query_pool, pool, queryPool);
if (pool->base_type >= PIPE_QUERY_TYPES)
return;

View file

@ -26,7 +26,7 @@
static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
lvp_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
{
LVP_FROM_HANDLE(lvp_physical_device, pdevice, physicalDevice);
VK_FROM_HANDLE(lvp_physical_device, pdevice, physicalDevice);
return vk_instance_get_proc_addr_unchecked(pdevice->vk.instance, pName);
}