mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 19:40:10 +01:00
venus: split out vn_pipeline.[ch]
Move VkShaderModule and VkPipeline{,Layout,Cache} functions to the new
files.
Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
Acked-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10117>
This commit is contained in:
parent
0433952153
commit
8b6ed71cf1
5 changed files with 380 additions and 345 deletions
|
|
@ -36,6 +36,7 @@ libvn_files = files(
|
|||
'vn_cs.c',
|
||||
'vn_device.c',
|
||||
'vn_icd.c',
|
||||
'vn_pipeline.c',
|
||||
'vn_ring.c',
|
||||
'vn_renderer_virtgpu.c',
|
||||
'vn_renderer_vtest.c',
|
||||
|
|
|
|||
|
|
@ -6540,316 +6540,3 @@ vn_GetQueryPoolResults(VkDevice device,
|
|||
vk_free(alloc, packed_data);
|
||||
return vn_result(dev->instance, result);
|
||||
}
|
||||
|
||||
/* shader module commands */
|
||||
|
||||
VkResult
|
||||
vn_CreateShaderModule(VkDevice device,
|
||||
const VkShaderModuleCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkShaderModule *pShaderModule)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
struct vn_shader_module *mod =
|
||||
vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!mod)
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
|
||||
|
||||
VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
|
||||
vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
|
||||
&mod_handle);
|
||||
|
||||
*pShaderModule = mod_handle;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyShaderModule(VkDevice device,
|
||||
VkShaderModule shaderModule,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!mod)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
|
||||
|
||||
vn_object_base_fini(&mod->base);
|
||||
vk_free(alloc, mod);
|
||||
}
|
||||
|
||||
/* pipeline layout commands */
|
||||
|
||||
VkResult
|
||||
vn_CreatePipelineLayout(VkDevice device,
|
||||
const VkPipelineLayoutCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipelineLayout *pPipelineLayout)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
struct vn_pipeline_layout *layout =
|
||||
vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!layout)
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
|
||||
&dev->base);
|
||||
|
||||
VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
|
||||
vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
|
||||
&layout_handle);
|
||||
|
||||
*pPipelineLayout = layout_handle;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyPipelineLayout(VkDevice device,
|
||||
VkPipelineLayout pipelineLayout,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline_layout *layout =
|
||||
vn_pipeline_layout_from_handle(pipelineLayout);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!layout)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
|
||||
NULL);
|
||||
|
||||
vn_object_base_fini(&layout->base);
|
||||
vk_free(alloc, layout);
|
||||
}
|
||||
|
||||
/* pipeline cache commands */
|
||||
|
||||
VkResult
|
||||
vn_CreatePipelineCache(VkDevice device,
|
||||
const VkPipelineCacheCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipelineCache *pPipelineCache)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
struct vn_pipeline_cache *cache =
|
||||
vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!cache)
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
|
||||
&dev->base);
|
||||
|
||||
VkPipelineCacheCreateInfo local_create_info;
|
||||
if (pCreateInfo->initialDataSize) {
|
||||
local_create_info = *pCreateInfo;
|
||||
local_create_info.pInitialData +=
|
||||
sizeof(struct vk_pipeline_cache_header);
|
||||
pCreateInfo = &local_create_info;
|
||||
}
|
||||
|
||||
VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
|
||||
vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
|
||||
&cache_handle);
|
||||
|
||||
*pPipelineCache = cache_handle;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyPipelineCache(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline_cache *cache =
|
||||
vn_pipeline_cache_from_handle(pipelineCache);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!cache)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
|
||||
NULL);
|
||||
|
||||
vn_object_base_fini(&cache->base);
|
||||
vk_free(alloc, cache);
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_GetPipelineCacheData(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
size_t *pDataSize,
|
||||
void *pData)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_physical_device *physical_dev = dev->physical_device;
|
||||
|
||||
struct vk_pipeline_cache_header *header = pData;
|
||||
VkResult result;
|
||||
if (!pData) {
|
||||
result = vn_call_vkGetPipelineCacheData(dev->instance, device,
|
||||
pipelineCache, pDataSize, NULL);
|
||||
if (result != VK_SUCCESS)
|
||||
return vn_error(dev->instance, result);
|
||||
|
||||
*pDataSize += sizeof(*header);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
if (*pDataSize <= sizeof(*header)) {
|
||||
*pDataSize = 0;
|
||||
return VK_INCOMPLETE;
|
||||
}
|
||||
|
||||
const VkPhysicalDeviceProperties *props =
|
||||
&physical_dev->properties.properties;
|
||||
header->header_size = sizeof(*header);
|
||||
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
|
||||
header->vendor_id = props->vendorID;
|
||||
header->device_id = props->deviceID;
|
||||
memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
|
||||
|
||||
*pDataSize -= header->header_size;
|
||||
result =
|
||||
vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
|
||||
pDataSize, pData + header->header_size);
|
||||
if (result < VK_SUCCESS)
|
||||
return vn_error(dev->instance, result);
|
||||
|
||||
*pDataSize += header->header_size;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_MergePipelineCaches(VkDevice device,
|
||||
VkPipelineCache dstCache,
|
||||
uint32_t srcCacheCount,
|
||||
const VkPipelineCache *pSrcCaches)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
|
||||
vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
|
||||
srcCacheCount, pSrcCaches);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
/* pipeline commands */
|
||||
|
||||
VkResult
|
||||
vn_CreateGraphicsPipelines(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
uint32_t createInfoCount,
|
||||
const VkGraphicsPipelineCreateInfo *pCreateInfos,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
for (uint32_t i = 0; i < createInfoCount; i++) {
|
||||
struct vn_pipeline *pipeline =
|
||||
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!pipeline) {
|
||||
for (uint32_t j = 0; j < i; j++)
|
||||
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
|
||||
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
|
||||
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
|
||||
&dev->base);
|
||||
|
||||
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
|
||||
pPipelines[i] = pipeline_handle;
|
||||
}
|
||||
|
||||
vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
|
||||
createInfoCount, pCreateInfos, NULL,
|
||||
pPipelines);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_CreateComputePipelines(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
uint32_t createInfoCount,
|
||||
const VkComputePipelineCreateInfo *pCreateInfos,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
for (uint32_t i = 0; i < createInfoCount; i++) {
|
||||
struct vn_pipeline *pipeline =
|
||||
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!pipeline) {
|
||||
for (uint32_t j = 0; j < i; j++)
|
||||
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
|
||||
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
|
||||
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
|
||||
&dev->base);
|
||||
|
||||
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
|
||||
pPipelines[i] = pipeline_handle;
|
||||
}
|
||||
|
||||
vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
|
||||
createInfoCount, pCreateInfos, NULL,
|
||||
pPipelines);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyPipeline(VkDevice device,
|
||||
VkPipeline _pipeline,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!pipeline)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
|
||||
|
||||
vn_object_base_fini(&pipeline->base);
|
||||
vk_free(alloc, pipeline);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -355,38 +355,6 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(vn_query_pool,
|
|||
VkQueryPool,
|
||||
VK_OBJECT_TYPE_QUERY_POOL)
|
||||
|
||||
struct vn_shader_module {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_shader_module,
|
||||
base.base,
|
||||
VkShaderModule,
|
||||
VK_OBJECT_TYPE_SHADER_MODULE)
|
||||
|
||||
struct vn_pipeline_layout {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline_layout,
|
||||
base.base,
|
||||
VkPipelineLayout,
|
||||
VK_OBJECT_TYPE_PIPELINE_LAYOUT)
|
||||
|
||||
struct vn_pipeline_cache {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline_cache,
|
||||
base.base,
|
||||
VkPipelineCache,
|
||||
VK_OBJECT_TYPE_PIPELINE_CACHE)
|
||||
|
||||
struct vn_pipeline {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline,
|
||||
base.base,
|
||||
VkPipeline,
|
||||
VK_OBJECT_TYPE_PIPELINE)
|
||||
|
||||
VkResult
|
||||
vn_instance_submit_roundtrip(struct vn_instance *instance,
|
||||
uint32_t *roundtrip_seqno);
|
||||
|
|
|
|||
331
src/virtio/vulkan/vn_pipeline.c
Normal file
331
src/virtio/vulkan/vn_pipeline.c
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
/*
|
||||
* Copyright 2019 Google LLC
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* based in part on anv and radv which are:
|
||||
* Copyright © 2015 Intel Corporation
|
||||
* Copyright © 2016 Red Hat.
|
||||
* Copyright © 2016 Bas Nieuwenhuizen
|
||||
*/
|
||||
|
||||
#include "vn_pipeline.h"
|
||||
|
||||
#include "venus-protocol/vn_protocol_driver_pipeline.h"
|
||||
#include "venus-protocol/vn_protocol_driver_pipeline_cache.h"
|
||||
#include "venus-protocol/vn_protocol_driver_pipeline_layout.h"
|
||||
#include "venus-protocol/vn_protocol_driver_shader_module.h"
|
||||
|
||||
#include "vn_device.h"
|
||||
|
||||
/* shader module commands */
|
||||
|
||||
VkResult
|
||||
vn_CreateShaderModule(VkDevice device,
|
||||
const VkShaderModuleCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkShaderModule *pShaderModule)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
struct vn_shader_module *mod =
|
||||
vk_zalloc(alloc, sizeof(*mod), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!mod)
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&mod->base, VK_OBJECT_TYPE_SHADER_MODULE, &dev->base);
|
||||
|
||||
VkShaderModule mod_handle = vn_shader_module_to_handle(mod);
|
||||
vn_async_vkCreateShaderModule(dev->instance, device, pCreateInfo, NULL,
|
||||
&mod_handle);
|
||||
|
||||
*pShaderModule = mod_handle;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyShaderModule(VkDevice device,
|
||||
VkShaderModule shaderModule,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_shader_module *mod = vn_shader_module_from_handle(shaderModule);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!mod)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyShaderModule(dev->instance, device, shaderModule, NULL);
|
||||
|
||||
vn_object_base_fini(&mod->base);
|
||||
vk_free(alloc, mod);
|
||||
}
|
||||
|
||||
/* pipeline layout commands */
|
||||
|
||||
VkResult
|
||||
vn_CreatePipelineLayout(VkDevice device,
|
||||
const VkPipelineLayoutCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipelineLayout *pPipelineLayout)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
struct vn_pipeline_layout *layout =
|
||||
vk_zalloc(alloc, sizeof(*layout), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!layout)
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&layout->base, VK_OBJECT_TYPE_PIPELINE_LAYOUT,
|
||||
&dev->base);
|
||||
|
||||
VkPipelineLayout layout_handle = vn_pipeline_layout_to_handle(layout);
|
||||
vn_async_vkCreatePipelineLayout(dev->instance, device, pCreateInfo, NULL,
|
||||
&layout_handle);
|
||||
|
||||
*pPipelineLayout = layout_handle;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyPipelineLayout(VkDevice device,
|
||||
VkPipelineLayout pipelineLayout,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline_layout *layout =
|
||||
vn_pipeline_layout_from_handle(pipelineLayout);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!layout)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyPipelineLayout(dev->instance, device, pipelineLayout,
|
||||
NULL);
|
||||
|
||||
vn_object_base_fini(&layout->base);
|
||||
vk_free(alloc, layout);
|
||||
}
|
||||
|
||||
/* pipeline cache commands */
|
||||
|
||||
VkResult
|
||||
vn_CreatePipelineCache(VkDevice device,
|
||||
const VkPipelineCacheCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipelineCache *pPipelineCache)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
struct vn_pipeline_cache *cache =
|
||||
vk_zalloc(alloc, sizeof(*cache), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!cache)
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&cache->base, VK_OBJECT_TYPE_PIPELINE_CACHE,
|
||||
&dev->base);
|
||||
|
||||
VkPipelineCacheCreateInfo local_create_info;
|
||||
if (pCreateInfo->initialDataSize) {
|
||||
local_create_info = *pCreateInfo;
|
||||
local_create_info.pInitialData +=
|
||||
sizeof(struct vk_pipeline_cache_header);
|
||||
pCreateInfo = &local_create_info;
|
||||
}
|
||||
|
||||
VkPipelineCache cache_handle = vn_pipeline_cache_to_handle(cache);
|
||||
vn_async_vkCreatePipelineCache(dev->instance, device, pCreateInfo, NULL,
|
||||
&cache_handle);
|
||||
|
||||
*pPipelineCache = cache_handle;
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyPipelineCache(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline_cache *cache =
|
||||
vn_pipeline_cache_from_handle(pipelineCache);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!cache)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyPipelineCache(dev->instance, device, pipelineCache,
|
||||
NULL);
|
||||
|
||||
vn_object_base_fini(&cache->base);
|
||||
vk_free(alloc, cache);
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_GetPipelineCacheData(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
size_t *pDataSize,
|
||||
void *pData)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_physical_device *physical_dev = dev->physical_device;
|
||||
|
||||
struct vk_pipeline_cache_header *header = pData;
|
||||
VkResult result;
|
||||
if (!pData) {
|
||||
result = vn_call_vkGetPipelineCacheData(dev->instance, device,
|
||||
pipelineCache, pDataSize, NULL);
|
||||
if (result != VK_SUCCESS)
|
||||
return vn_error(dev->instance, result);
|
||||
|
||||
*pDataSize += sizeof(*header);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
if (*pDataSize <= sizeof(*header)) {
|
||||
*pDataSize = 0;
|
||||
return VK_INCOMPLETE;
|
||||
}
|
||||
|
||||
const VkPhysicalDeviceProperties *props =
|
||||
&physical_dev->properties.properties;
|
||||
header->header_size = sizeof(*header);
|
||||
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
|
||||
header->vendor_id = props->vendorID;
|
||||
header->device_id = props->deviceID;
|
||||
memcpy(header->uuid, props->pipelineCacheUUID, VK_UUID_SIZE);
|
||||
|
||||
*pDataSize -= header->header_size;
|
||||
result =
|
||||
vn_call_vkGetPipelineCacheData(dev->instance, device, pipelineCache,
|
||||
pDataSize, pData + header->header_size);
|
||||
if (result < VK_SUCCESS)
|
||||
return vn_error(dev->instance, result);
|
||||
|
||||
*pDataSize += header->header_size;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_MergePipelineCaches(VkDevice device,
|
||||
VkPipelineCache dstCache,
|
||||
uint32_t srcCacheCount,
|
||||
const VkPipelineCache *pSrcCaches)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
|
||||
vn_async_vkMergePipelineCaches(dev->instance, device, dstCache,
|
||||
srcCacheCount, pSrcCaches);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
/* pipeline commands */
|
||||
|
||||
VkResult
|
||||
vn_CreateGraphicsPipelines(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
uint32_t createInfoCount,
|
||||
const VkGraphicsPipelineCreateInfo *pCreateInfos,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
for (uint32_t i = 0; i < createInfoCount; i++) {
|
||||
struct vn_pipeline *pipeline =
|
||||
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!pipeline) {
|
||||
for (uint32_t j = 0; j < i; j++)
|
||||
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
|
||||
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
|
||||
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
|
||||
&dev->base);
|
||||
|
||||
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
|
||||
pPipelines[i] = pipeline_handle;
|
||||
}
|
||||
|
||||
vn_async_vkCreateGraphicsPipelines(dev->instance, device, pipelineCache,
|
||||
createInfoCount, pCreateInfos, NULL,
|
||||
pPipelines);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult
|
||||
vn_CreateComputePipelines(VkDevice device,
|
||||
VkPipelineCache pipelineCache,
|
||||
uint32_t createInfoCount,
|
||||
const VkComputePipelineCreateInfo *pCreateInfos,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
for (uint32_t i = 0; i < createInfoCount; i++) {
|
||||
struct vn_pipeline *pipeline =
|
||||
vk_zalloc(alloc, sizeof(*pipeline), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!pipeline) {
|
||||
for (uint32_t j = 0; j < i; j++)
|
||||
vk_free(alloc, vn_pipeline_from_handle(pPipelines[j]));
|
||||
memset(pPipelines, 0, sizeof(*pPipelines) * createInfoCount);
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
}
|
||||
|
||||
vn_object_base_init(&pipeline->base, VK_OBJECT_TYPE_PIPELINE,
|
||||
&dev->base);
|
||||
|
||||
VkPipeline pipeline_handle = vn_pipeline_to_handle(pipeline);
|
||||
pPipelines[i] = pipeline_handle;
|
||||
}
|
||||
|
||||
vn_async_vkCreateComputePipelines(dev->instance, device, pipelineCache,
|
||||
createInfoCount, pCreateInfos, NULL,
|
||||
pPipelines);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
vn_DestroyPipeline(VkDevice device,
|
||||
VkPipeline _pipeline,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct vn_device *dev = vn_device_from_handle(device);
|
||||
struct vn_pipeline *pipeline = vn_pipeline_from_handle(_pipeline);
|
||||
const VkAllocationCallbacks *alloc =
|
||||
pAllocator ? pAllocator : &dev->base.base.alloc;
|
||||
|
||||
if (!pipeline)
|
||||
return;
|
||||
|
||||
vn_async_vkDestroyPipeline(dev->instance, device, _pipeline, NULL);
|
||||
|
||||
vn_object_base_fini(&pipeline->base);
|
||||
vk_free(alloc, pipeline);
|
||||
}
|
||||
48
src/virtio/vulkan/vn_pipeline.h
Normal file
48
src/virtio/vulkan/vn_pipeline.h
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
* Copyright 2019 Google LLC
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* based in part on anv and radv which are:
|
||||
* Copyright © 2015 Intel Corporation
|
||||
* Copyright © 2016 Red Hat.
|
||||
* Copyright © 2016 Bas Nieuwenhuizen
|
||||
*/
|
||||
|
||||
#ifndef VN_PIPELINE_H
|
||||
#define VN_PIPELINE_H
|
||||
|
||||
#include "vn_common.h"
|
||||
|
||||
struct vn_shader_module {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_shader_module,
|
||||
base.base,
|
||||
VkShaderModule,
|
||||
VK_OBJECT_TYPE_SHADER_MODULE)
|
||||
|
||||
struct vn_pipeline_layout {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline_layout,
|
||||
base.base,
|
||||
VkPipelineLayout,
|
||||
VK_OBJECT_TYPE_PIPELINE_LAYOUT)
|
||||
|
||||
struct vn_pipeline_cache {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline_cache,
|
||||
base.base,
|
||||
VkPipelineCache,
|
||||
VK_OBJECT_TYPE_PIPELINE_CACHE)
|
||||
|
||||
struct vn_pipeline {
|
||||
struct vn_object_base base;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_pipeline,
|
||||
base.base,
|
||||
VkPipeline,
|
||||
VK_OBJECT_TYPE_PIPELINE)
|
||||
|
||||
#endif /* VN_PIPELINE_H */
|
||||
Loading…
Add table
Reference in a new issue