mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 17:30:12 +01:00
lavapipe: rename vallium to lavapipe
Just a cooler name, and a lot easier to search for. thanks Marek Acked-by: Marek Olšák <marek.olsak@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6921>
This commit is contained in:
parent
5e8791a0bf
commit
e94fd4cc65
28 changed files with 1502 additions and 1502 deletions
File diff suppressed because it is too large
Load diff
|
|
@ -21,18 +21,18 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
#include "vk_util.h"
|
||||
#include "u_math.h"
|
||||
|
||||
VkResult val_CreateDescriptorSetLayout(
|
||||
VkResult lvp_CreateDescriptorSetLayout(
|
||||
VkDevice _device,
|
||||
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkDescriptorSetLayout* pSetLayout)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_descriptor_set_layout *set_layout;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_descriptor_set_layout *set_layout;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
|
||||
uint32_t max_binding = 0;
|
||||
|
|
@ -43,9 +43,9 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
|
||||
}
|
||||
|
||||
size_t size = sizeof(struct val_descriptor_set_layout) +
|
||||
size_t size = sizeof(struct lvp_descriptor_set_layout) +
|
||||
(max_binding + 1) * sizeof(set_layout->binding[0]) +
|
||||
immutable_sampler_count * sizeof(struct val_sampler *);
|
||||
immutable_sampler_count * sizeof(struct lvp_sampler *);
|
||||
|
||||
set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
|
|
@ -55,8 +55,8 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
vk_object_base_init(&device->vk, &set_layout->base,
|
||||
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
|
||||
/* We just allocate all the samplers at the end of the struct */
|
||||
struct val_sampler **samplers =
|
||||
(struct val_sampler **)&set_layout->binding[max_binding + 1];
|
||||
struct lvp_sampler **samplers =
|
||||
(struct lvp_sampler **)&set_layout->binding[max_binding + 1];
|
||||
|
||||
set_layout->binding_count = max_binding + 1;
|
||||
set_layout->shader_stages = 0;
|
||||
|
|
@ -90,7 +90,7 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
switch (binding->descriptorType) {
|
||||
case VK_DESCRIPTOR_TYPE_SAMPLER:
|
||||
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
|
||||
val_foreach_stage(s, binding->stageFlags) {
|
||||
lvp_foreach_stage(s, binding->stageFlags) {
|
||||
set_layout->binding[b].stage[s].sampler_index = set_layout->stage[s].sampler_count;
|
||||
set_layout->stage[s].sampler_count += binding->descriptorCount;
|
||||
}
|
||||
|
|
@ -102,14 +102,14 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
switch (binding->descriptorType) {
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
|
||||
val_foreach_stage(s, binding->stageFlags) {
|
||||
lvp_foreach_stage(s, binding->stageFlags) {
|
||||
set_layout->binding[b].stage[s].const_buffer_index = set_layout->stage[s].const_buffer_count;
|
||||
set_layout->stage[s].const_buffer_count += binding->descriptorCount;
|
||||
}
|
||||
break;
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
|
||||
val_foreach_stage(s, binding->stageFlags) {
|
||||
lvp_foreach_stage(s, binding->stageFlags) {
|
||||
set_layout->binding[b].stage[s].shader_buffer_index = set_layout->stage[s].shader_buffer_count;
|
||||
set_layout->stage[s].shader_buffer_count += binding->descriptorCount;
|
||||
}
|
||||
|
|
@ -118,7 +118,7 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
|
||||
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
|
||||
val_foreach_stage(s, binding->stageFlags) {
|
||||
lvp_foreach_stage(s, binding->stageFlags) {
|
||||
set_layout->binding[b].stage[s].image_index = set_layout->stage[s].image_count;
|
||||
set_layout->stage[s].image_count += binding->descriptorCount;
|
||||
}
|
||||
|
|
@ -126,7 +126,7 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
|
||||
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
|
||||
val_foreach_stage(s, binding->stageFlags) {
|
||||
lvp_foreach_stage(s, binding->stageFlags) {
|
||||
set_layout->binding[b].stage[s].sampler_view_index = set_layout->stage[s].sampler_view_count;
|
||||
set_layout->stage[s].sampler_view_count += binding->descriptorCount;
|
||||
}
|
||||
|
|
@ -141,7 +141,7 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
|
||||
for (uint32_t i = 0; i < binding->descriptorCount; i++)
|
||||
set_layout->binding[b].immutable_samplers[i] =
|
||||
val_sampler_from_handle(binding->pImmutableSamplers[i]);
|
||||
lvp_sampler_from_handle(binding->pImmutableSamplers[i]);
|
||||
} else {
|
||||
set_layout->binding[b].immutable_samplers = NULL;
|
||||
}
|
||||
|
|
@ -151,18 +151,18 @@ VkResult val_CreateDescriptorSetLayout(
|
|||
|
||||
set_layout->dynamic_offset_count = dynamic_offset_count;
|
||||
|
||||
*pSetLayout = val_descriptor_set_layout_to_handle(set_layout);
|
||||
*pSetLayout = lvp_descriptor_set_layout_to_handle(set_layout);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_DestroyDescriptorSetLayout(
|
||||
void lvp_DestroyDescriptorSetLayout(
|
||||
VkDevice _device,
|
||||
VkDescriptorSetLayout _set_layout,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout, _set_layout);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout, _set_layout);
|
||||
|
||||
if (!_set_layout)
|
||||
return;
|
||||
|
|
@ -170,14 +170,14 @@ void val_DestroyDescriptorSetLayout(
|
|||
vk_free2(&device->alloc, pAllocator, set_layout);
|
||||
}
|
||||
|
||||
VkResult val_CreatePipelineLayout(
|
||||
VkResult lvp_CreatePipelineLayout(
|
||||
VkDevice _device,
|
||||
const VkPipelineLayoutCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkPipelineLayout* pPipelineLayout)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_pipeline_layout *layout;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_pipeline_layout *layout;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
|
||||
|
||||
|
|
@ -191,7 +191,7 @@ VkResult val_CreatePipelineLayout(
|
|||
layout->num_sets = pCreateInfo->setLayoutCount;
|
||||
|
||||
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
|
||||
VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout,
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout,
|
||||
pCreateInfo->pSetLayouts[set]);
|
||||
layout->set[set].layout = set_layout;
|
||||
}
|
||||
|
|
@ -203,18 +203,18 @@ VkResult val_CreatePipelineLayout(
|
|||
range->offset + range->size);
|
||||
}
|
||||
layout->push_constant_size = align(layout->push_constant_size, 16);
|
||||
*pPipelineLayout = val_pipeline_layout_to_handle(layout);
|
||||
*pPipelineLayout = lvp_pipeline_layout_to_handle(layout);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_DestroyPipelineLayout(
|
||||
void lvp_DestroyPipelineLayout(
|
||||
VkDevice _device,
|
||||
VkPipelineLayout _pipelineLayout,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_pipeline_layout, pipeline_layout, _pipelineLayout);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_pipeline_layout, pipeline_layout, _pipelineLayout);
|
||||
|
||||
if (!_pipelineLayout)
|
||||
return;
|
||||
|
|
@ -223,11 +223,11 @@ void val_DestroyPipelineLayout(
|
|||
}
|
||||
|
||||
VkResult
|
||||
val_descriptor_set_create(struct val_device *device,
|
||||
const struct val_descriptor_set_layout *layout,
|
||||
struct val_descriptor_set **out_set)
|
||||
lvp_descriptor_set_create(struct lvp_device *device,
|
||||
const struct lvp_descriptor_set_layout *layout,
|
||||
struct lvp_descriptor_set **out_set)
|
||||
{
|
||||
struct val_descriptor_set *set;
|
||||
struct lvp_descriptor_set *set;
|
||||
size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
|
||||
|
||||
set = vk_alloc(&device->alloc /* XXX: Use the pool */, size, 8,
|
||||
|
|
@ -245,7 +245,7 @@ val_descriptor_set_create(struct val_device *device,
|
|||
set->layout = layout;
|
||||
|
||||
/* Go through and fill out immutable samplers if we have any */
|
||||
struct val_descriptor *desc = set->descriptors;
|
||||
struct lvp_descriptor *desc = set->descriptors;
|
||||
for (uint32_t b = 0; b < layout->binding_count; b++) {
|
||||
if (layout->binding[b].immutable_samplers) {
|
||||
for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
|
||||
|
|
@ -260,62 +260,62 @@ val_descriptor_set_create(struct val_device *device,
|
|||
}
|
||||
|
||||
void
|
||||
val_descriptor_set_destroy(struct val_device *device,
|
||||
struct val_descriptor_set *set)
|
||||
lvp_descriptor_set_destroy(struct lvp_device *device,
|
||||
struct lvp_descriptor_set *set)
|
||||
{
|
||||
vk_object_base_finish(&set->base);
|
||||
vk_free(&device->alloc, set);
|
||||
}
|
||||
|
||||
VkResult val_AllocateDescriptorSets(
|
||||
VkResult lvp_AllocateDescriptorSets(
|
||||
VkDevice _device,
|
||||
const VkDescriptorSetAllocateInfo* pAllocateInfo,
|
||||
VkDescriptorSet* pDescriptorSets)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_descriptor_pool, pool, pAllocateInfo->descriptorPool);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_pool, pool, pAllocateInfo->descriptorPool);
|
||||
VkResult result = VK_SUCCESS;
|
||||
struct val_descriptor_set *set;
|
||||
struct lvp_descriptor_set *set;
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
|
||||
VAL_FROM_HANDLE(val_descriptor_set_layout, layout,
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout,
|
||||
pAllocateInfo->pSetLayouts[i]);
|
||||
|
||||
result = val_descriptor_set_create(device, layout, &set);
|
||||
result = lvp_descriptor_set_create(device, layout, &set);
|
||||
if (result != VK_SUCCESS)
|
||||
break;
|
||||
|
||||
list_addtail(&set->link, &pool->sets);
|
||||
pDescriptorSets[i] = val_descriptor_set_to_handle(set);
|
||||
pDescriptorSets[i] = lvp_descriptor_set_to_handle(set);
|
||||
}
|
||||
|
||||
if (result != VK_SUCCESS)
|
||||
val_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
|
||||
lvp_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
|
||||
i, pDescriptorSets);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
VkResult val_FreeDescriptorSets(
|
||||
VkResult lvp_FreeDescriptorSets(
|
||||
VkDevice _device,
|
||||
VkDescriptorPool descriptorPool,
|
||||
uint32_t count,
|
||||
const VkDescriptorSet* pDescriptorSets)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
for (uint32_t i = 0; i < count; i++) {
|
||||
VAL_FROM_HANDLE(val_descriptor_set, set, pDescriptorSets[i]);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set, set, pDescriptorSets[i]);
|
||||
|
||||
if (!set)
|
||||
continue;
|
||||
list_del(&set->link);
|
||||
val_descriptor_set_destroy(device, set);
|
||||
lvp_descriptor_set_destroy(device, set);
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_UpdateDescriptorSets(
|
||||
void lvp_UpdateDescriptorSets(
|
||||
VkDevice _device,
|
||||
uint32_t descriptorWriteCount,
|
||||
const VkWriteDescriptorSet* pDescriptorWrites,
|
||||
|
|
@ -324,20 +324,20 @@ void val_UpdateDescriptorSets(
|
|||
{
|
||||
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
|
||||
const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
|
||||
VAL_FROM_HANDLE(val_descriptor_set, set, write->dstSet);
|
||||
const struct val_descriptor_set_binding_layout *bind_layout =
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set, set, write->dstSet);
|
||||
const struct lvp_descriptor_set_binding_layout *bind_layout =
|
||||
&set->layout->binding[write->dstBinding];
|
||||
struct val_descriptor *desc =
|
||||
struct lvp_descriptor *desc =
|
||||
&set->descriptors[bind_layout->descriptor_index];
|
||||
desc += write->dstArrayElement;
|
||||
|
||||
switch (write->descriptorType) {
|
||||
case VK_DESCRIPTOR_TYPE_SAMPLER:
|
||||
for (uint32_t j = 0; j < write->descriptorCount; j++) {
|
||||
VAL_FROM_HANDLE(val_sampler, sampler,
|
||||
LVP_FROM_HANDLE(lvp_sampler, sampler,
|
||||
write->pImageInfo[j].sampler);
|
||||
|
||||
desc[j] = (struct val_descriptor) {
|
||||
desc[j] = (struct lvp_descriptor) {
|
||||
.type = VK_DESCRIPTOR_TYPE_SAMPLER,
|
||||
.sampler = sampler,
|
||||
};
|
||||
|
|
@ -346,9 +346,9 @@ void val_UpdateDescriptorSets(
|
|||
|
||||
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
|
||||
for (uint32_t j = 0; j < write->descriptorCount; j++) {
|
||||
VAL_FROM_HANDLE(val_image_view, iview,
|
||||
LVP_FROM_HANDLE(lvp_image_view, iview,
|
||||
write->pImageInfo[j].imageView);
|
||||
VAL_FROM_HANDLE(val_sampler, sampler,
|
||||
LVP_FROM_HANDLE(lvp_sampler, sampler,
|
||||
write->pImageInfo[j].sampler);
|
||||
|
||||
desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
|
|
@ -366,10 +366,10 @@ void val_UpdateDescriptorSets(
|
|||
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
|
||||
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
|
||||
for (uint32_t j = 0; j < write->descriptorCount; j++) {
|
||||
VAL_FROM_HANDLE(val_image_view, iview,
|
||||
LVP_FROM_HANDLE(lvp_image_view, iview,
|
||||
write->pImageInfo[j].imageView);
|
||||
|
||||
desc[j] = (struct val_descriptor) {
|
||||
desc[j] = (struct lvp_descriptor) {
|
||||
.type = write->descriptorType,
|
||||
.image_view = iview,
|
||||
};
|
||||
|
|
@ -379,10 +379,10 @@ void val_UpdateDescriptorSets(
|
|||
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
|
||||
for (uint32_t j = 0; j < write->descriptorCount; j++) {
|
||||
VAL_FROM_HANDLE(val_buffer_view, bview,
|
||||
LVP_FROM_HANDLE(lvp_buffer_view, bview,
|
||||
write->pTexelBufferView[j]);
|
||||
|
||||
desc[j] = (struct val_descriptor) {
|
||||
desc[j] = (struct lvp_descriptor) {
|
||||
.type = write->descriptorType,
|
||||
.buffer_view = bview,
|
||||
};
|
||||
|
|
@ -395,9 +395,9 @@ void val_UpdateDescriptorSets(
|
|||
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
|
||||
for (uint32_t j = 0; j < write->descriptorCount; j++) {
|
||||
assert(write->pBufferInfo[j].buffer);
|
||||
VAL_FROM_HANDLE(val_buffer, buffer, write->pBufferInfo[j].buffer);
|
||||
LVP_FROM_HANDLE(lvp_buffer, buffer, write->pBufferInfo[j].buffer);
|
||||
assert(buffer);
|
||||
desc[j] = (struct val_descriptor) {
|
||||
desc[j] = (struct lvp_descriptor) {
|
||||
.type = write->descriptorType,
|
||||
.buf.offset = write->pBufferInfo[j].offset,
|
||||
.buf.buffer = buffer,
|
||||
|
|
@ -413,18 +413,18 @@ void val_UpdateDescriptorSets(
|
|||
|
||||
for (uint32_t i = 0; i < descriptorCopyCount; i++) {
|
||||
const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
|
||||
VAL_FROM_HANDLE(val_descriptor_set, src, copy->srcSet);
|
||||
VAL_FROM_HANDLE(val_descriptor_set, dst, copy->dstSet);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set, src, copy->srcSet);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set, dst, copy->dstSet);
|
||||
|
||||
const struct val_descriptor_set_binding_layout *src_layout =
|
||||
const struct lvp_descriptor_set_binding_layout *src_layout =
|
||||
&src->layout->binding[copy->srcBinding];
|
||||
struct val_descriptor *src_desc =
|
||||
struct lvp_descriptor *src_desc =
|
||||
&src->descriptors[src_layout->descriptor_index];
|
||||
src_desc += copy->srcArrayElement;
|
||||
|
||||
const struct val_descriptor_set_binding_layout *dst_layout =
|
||||
const struct lvp_descriptor_set_binding_layout *dst_layout =
|
||||
&dst->layout->binding[copy->dstBinding];
|
||||
struct val_descriptor *dst_desc =
|
||||
struct lvp_descriptor *dst_desc =
|
||||
&dst->descriptors[dst_layout->descriptor_index];
|
||||
dst_desc += copy->dstArrayElement;
|
||||
|
||||
|
|
@ -433,15 +433,15 @@ void val_UpdateDescriptorSets(
|
|||
}
|
||||
}
|
||||
|
||||
VkResult val_CreateDescriptorPool(
|
||||
VkResult lvp_CreateDescriptorPool(
|
||||
VkDevice _device,
|
||||
const VkDescriptorPoolCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkDescriptorPool* pDescriptorPool)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_descriptor_pool *pool;
|
||||
size_t size = sizeof(struct val_descriptor_pool);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_descriptor_pool *pool;
|
||||
size_t size = sizeof(struct lvp_descriptor_pool);
|
||||
pool = vk_zalloc2(&device->alloc, pAllocator, size, 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!pool)
|
||||
|
|
@ -451,49 +451,49 @@ VkResult val_CreateDescriptorPool(
|
|||
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
|
||||
pool->flags = pCreateInfo->flags;
|
||||
list_inithead(&pool->sets);
|
||||
*pDescriptorPool = val_descriptor_pool_to_handle(pool);
|
||||
*pDescriptorPool = lvp_descriptor_pool_to_handle(pool);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
static void val_reset_descriptor_pool(struct val_device *device,
|
||||
struct val_descriptor_pool *pool)
|
||||
static void lvp_reset_descriptor_pool(struct lvp_device *device,
|
||||
struct lvp_descriptor_pool *pool)
|
||||
{
|
||||
struct val_descriptor_set *set, *tmp;
|
||||
struct lvp_descriptor_set *set, *tmp;
|
||||
LIST_FOR_EACH_ENTRY_SAFE(set, tmp, &pool->sets, link) {
|
||||
list_del(&set->link);
|
||||
vk_free(&device->alloc, set);
|
||||
}
|
||||
}
|
||||
|
||||
void val_DestroyDescriptorPool(
|
||||
void lvp_DestroyDescriptorPool(
|
||||
VkDevice _device,
|
||||
VkDescriptorPool _pool,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
|
||||
|
||||
if (!_pool)
|
||||
return;
|
||||
|
||||
val_reset_descriptor_pool(device, pool);
|
||||
lvp_reset_descriptor_pool(device, pool);
|
||||
vk_object_base_finish(&pool->base);
|
||||
vk_free2(&device->alloc, pAllocator, pool);
|
||||
}
|
||||
|
||||
VkResult val_ResetDescriptorPool(
|
||||
VkResult lvp_ResetDescriptorPool(
|
||||
VkDevice _device,
|
||||
VkDescriptorPool _pool,
|
||||
VkDescriptorPoolResetFlags flags)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_descriptor_pool, pool, _pool);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_descriptor_pool, pool, _pool);
|
||||
|
||||
val_reset_descriptor_pool(device, pool);
|
||||
lvp_reset_descriptor_pool(device, pool);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_GetDescriptorSetLayoutSupport(VkDevice device,
|
||||
void lvp_GetDescriptorSetLayoutSupport(VkDevice device,
|
||||
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
|
||||
VkDescriptorSetLayoutSupport* pSupport)
|
||||
{
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -31,23 +31,23 @@ import xml.etree.ElementTree as et
|
|||
from collections import OrderedDict, namedtuple
|
||||
from mako.template import Template
|
||||
|
||||
from val_extensions import *
|
||||
from lvp_extensions import *
|
||||
|
||||
# We generate a static hash table for entry point lookup
|
||||
# (vkGetProcAddress). We use a linear congruential generator for our hash
|
||||
# function and a power-of-two size table. The prime numbers are determined
|
||||
# experimentally.
|
||||
|
||||
# We currently don't use layers in val, but keeping the ability for anv
|
||||
# We currently don't use layers in lvp, but keeping the ability for anv
|
||||
# anyways, so we can use it for device groups.
|
||||
LAYERS = [
|
||||
'val'
|
||||
'lvp'
|
||||
]
|
||||
|
||||
TEMPLATE_H = Template("""\
|
||||
/* This file generated from ${filename}, don't edit directly. */
|
||||
|
||||
struct val_instance_dispatch_table {
|
||||
struct lvp_instance_dispatch_table {
|
||||
union {
|
||||
void *entrypoints[${len(instance_entrypoints)}];
|
||||
struct {
|
||||
|
|
@ -66,7 +66,7 @@ struct val_instance_dispatch_table {
|
|||
};
|
||||
};
|
||||
|
||||
struct val_physical_device_dispatch_table {
|
||||
struct lvp_physical_device_dispatch_table {
|
||||
union {
|
||||
void *entrypoints[${len(physical_device_entrypoints)}];
|
||||
struct {
|
||||
|
|
@ -85,7 +85,7 @@ struct val_physical_device_dispatch_table {
|
|||
};
|
||||
};
|
||||
|
||||
struct val_device_dispatch_table {
|
||||
struct lvp_device_dispatch_table {
|
||||
union {
|
||||
void *entrypoints[${len(device_entrypoints)}];
|
||||
struct {
|
||||
|
|
@ -104,12 +104,12 @@ struct val_device_dispatch_table {
|
|||
};
|
||||
};
|
||||
|
||||
extern const struct val_instance_dispatch_table val_instance_dispatch_table;
|
||||
extern const struct lvp_instance_dispatch_table lvp_instance_dispatch_table;
|
||||
%for layer in LAYERS:
|
||||
extern const struct val_physical_device_dispatch_table ${layer}_physical_device_dispatch_table;
|
||||
extern const struct lvp_physical_device_dispatch_table ${layer}_physical_device_dispatch_table;
|
||||
%endfor
|
||||
%for layer in LAYERS:
|
||||
extern const struct val_device_dispatch_table ${layer}_device_dispatch_table;
|
||||
extern const struct lvp_device_dispatch_table ${layer}_device_dispatch_table;
|
||||
%endfor
|
||||
|
||||
% for e in instance_entrypoints:
|
||||
|
|
@ -119,7 +119,7 @@ extern const struct val_device_dispatch_table ${layer}_device_dispatch_table;
|
|||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
% endif
|
||||
${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()});
|
||||
${e.return_type} ${e.prefixed_name('lvp')}(${e.decl_params()});
|
||||
% if e.guard is not None:
|
||||
#endif // ${e.guard}
|
||||
% endif
|
||||
|
|
@ -182,7 +182,7 @@ TEMPLATE_C = Template(u"""\
|
|||
|
||||
/* This file generated from ${filename}, don't edit directly. */
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
#include "util/macros.h"
|
||||
struct string_map_entry {
|
||||
|
|
@ -279,18 +279,18 @@ ${strmap(device_strmap, 'device')}
|
|||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
% endif
|
||||
${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
|
||||
${e.return_type} ${e.prefixed_name('lvp')}(${e.decl_params()}) __attribute__ ((weak));
|
||||
% if e.guard is not None:
|
||||
#endif // ${e.guard}
|
||||
% endif
|
||||
% endfor
|
||||
|
||||
const struct val_instance_dispatch_table val_instance_dispatch_table = {
|
||||
const struct lvp_instance_dispatch_table lvp_instance_dispatch_table = {
|
||||
% for e in instance_entrypoints:
|
||||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
% endif
|
||||
.${e.name} = ${e.prefixed_name('val')},
|
||||
.${e.name} = ${e.prefixed_name('lvp')},
|
||||
% if e.guard is not None:
|
||||
#endif // ${e.guard}
|
||||
% endif
|
||||
|
|
@ -304,18 +304,18 @@ const struct val_instance_dispatch_table val_instance_dispatch_table = {
|
|||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
% endif
|
||||
${e.return_type} ${e.prefixed_name('val')}(${e.decl_params()}) __attribute__ ((weak));
|
||||
${e.return_type} ${e.prefixed_name('lvp')}(${e.decl_params()}) __attribute__ ((weak));
|
||||
% if e.guard is not None:
|
||||
#endif // ${e.guard}
|
||||
% endif
|
||||
% endfor
|
||||
|
||||
const struct val_physical_device_dispatch_table val_physical_device_dispatch_table = {
|
||||
const struct lvp_physical_device_dispatch_table lvp_physical_device_dispatch_table = {
|
||||
% for e in physical_device_entrypoints:
|
||||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
% endif
|
||||
.${e.name} = ${e.prefixed_name('val')},
|
||||
.${e.name} = ${e.prefixed_name('lvp')},
|
||||
% if e.guard is not None:
|
||||
#endif // ${e.guard}
|
||||
% endif
|
||||
|
|
@ -331,19 +331,19 @@ const struct val_physical_device_dispatch_table val_physical_device_dispatch_tab
|
|||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
% endif
|
||||
% if layer == 'val':
|
||||
% if layer == 'lvp':
|
||||
${e.return_type} __attribute__ ((weak))
|
||||
${e.prefixed_name('val')}(${e.decl_params()})
|
||||
${e.prefixed_name('lvp')}(${e.decl_params()})
|
||||
{
|
||||
% if e.params[0].type == 'VkDevice':
|
||||
VAL_FROM_HANDLE(val_device, val_device, ${e.params[0].name});
|
||||
return val_device->dispatch.${e.name}(${e.call_params()});
|
||||
LVP_FROM_HANDLE(lvp_device, lvp_device, ${e.params[0].name});
|
||||
return lvp_device->dispatch.${e.name}(${e.call_params()});
|
||||
% elif e.params[0].type == 'VkCommandBuffer':
|
||||
VAL_FROM_HANDLE(val_cmd_buffer, val_cmd_buffer, ${e.params[0].name});
|
||||
return val_cmd_buffer->device->dispatch.${e.name}(${e.call_params()});
|
||||
LVP_FROM_HANDLE(lvp_cmd_buffer, lvp_cmd_buffer, ${e.params[0].name});
|
||||
return lvp_cmd_buffer->device->dispatch.${e.name}(${e.call_params()});
|
||||
% elif e.params[0].type == 'VkQueue':
|
||||
VAL_FROM_HANDLE(val_queue, val_queue, ${e.params[0].name});
|
||||
return val_queue->device->dispatch.${e.name}(${e.call_params()});
|
||||
LVP_FROM_HANDLE(lvp_queue, lvp_queue, ${e.params[0].name});
|
||||
return lvp_queue->device->dispatch.${e.name}(${e.call_params()});
|
||||
% else:
|
||||
assert(!"Unhandled device child trampoline case: ${e.params[0].type}");
|
||||
% endif
|
||||
|
|
@ -356,7 +356,7 @@ const struct val_physical_device_dispatch_table val_physical_device_dispatch_tab
|
|||
% endif
|
||||
% endfor
|
||||
|
||||
const struct val_device_dispatch_table ${layer}_device_dispatch_table = {
|
||||
const struct lvp_device_dispatch_table ${layer}_device_dispatch_table = {
|
||||
% for e in device_entrypoints:
|
||||
% if e.guard is not None:
|
||||
#ifdef ${e.guard}
|
||||
|
|
@ -376,8 +376,8 @@ const struct val_physical_device_dispatch_table val_physical_device_dispatch_tab
|
|||
* If device is NULL, all device extensions are considered enabled.
|
||||
*/
|
||||
bool
|
||||
val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
|
||||
const struct val_instance_extension_table *instance)
|
||||
lvp_instance_entrypoint_is_enabled(int index, uint32_t core_version,
|
||||
const struct lvp_instance_extension_table *instance)
|
||||
{
|
||||
switch (index) {
|
||||
% for e in instance_entrypoints:
|
||||
|
|
@ -410,8 +410,8 @@ val_instance_entrypoint_is_enabled(int index, uint32_t core_version,
|
|||
* If device is NULL, all device extensions are considered enabled.
|
||||
*/
|
||||
bool
|
||||
val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
|
||||
const struct val_instance_extension_table *instance)
|
||||
lvp_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
|
||||
const struct lvp_instance_extension_table *instance)
|
||||
{
|
||||
switch (index) {
|
||||
% for e in physical_device_entrypoints:
|
||||
|
|
@ -444,9 +444,9 @@ val_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
|
|||
* If device is NULL, all device extensions are considered enabled.
|
||||
*/
|
||||
bool
|
||||
val_device_entrypoint_is_enabled(int index, uint32_t core_version,
|
||||
const struct val_instance_extension_table *instance,
|
||||
const struct val_device_extension_table *device)
|
||||
lvp_device_entrypoint_is_enabled(int index, uint32_t core_version,
|
||||
const struct lvp_instance_extension_table *instance,
|
||||
const struct lvp_device_extension_table *device)
|
||||
{
|
||||
switch (index) {
|
||||
% for e in device_entrypoints:
|
||||
|
|
@ -473,61 +473,61 @@ val_device_entrypoint_is_enabled(int index, uint32_t core_version,
|
|||
}
|
||||
|
||||
int
|
||||
val_get_instance_entrypoint_index(const char *name)
|
||||
lvp_get_instance_entrypoint_index(const char *name)
|
||||
{
|
||||
return instance_string_map_lookup(name);
|
||||
}
|
||||
|
||||
int
|
||||
val_get_physical_device_entrypoint_index(const char *name)
|
||||
lvp_get_physical_device_entrypoint_index(const char *name)
|
||||
{
|
||||
return physical_device_string_map_lookup(name);
|
||||
}
|
||||
|
||||
int
|
||||
val_get_device_entrypoint_index(const char *name)
|
||||
lvp_get_device_entrypoint_index(const char *name)
|
||||
{
|
||||
return device_string_map_lookup(name);
|
||||
}
|
||||
|
||||
const char *
|
||||
val_get_instance_entry_name(int index)
|
||||
lvp_get_instance_entry_name(int index)
|
||||
{
|
||||
return instance_entry_name(index);
|
||||
}
|
||||
|
||||
const char *
|
||||
val_get_physical_device_entry_name(int index)
|
||||
lvp_get_physical_device_entry_name(int index)
|
||||
{
|
||||
return physical_device_entry_name(index);
|
||||
}
|
||||
|
||||
const char *
|
||||
val_get_device_entry_name(int index)
|
||||
lvp_get_device_entry_name(int index)
|
||||
{
|
||||
return device_entry_name(index);
|
||||
}
|
||||
|
||||
static void * __attribute__ ((noinline))
|
||||
val_resolve_device_entrypoint(uint32_t index)
|
||||
lvp_resolve_device_entrypoint(uint32_t index)
|
||||
{
|
||||
return val_device_dispatch_table.entrypoints[index];
|
||||
return lvp_device_dispatch_table.entrypoints[index];
|
||||
}
|
||||
|
||||
void *
|
||||
val_lookup_entrypoint(const char *name)
|
||||
lvp_lookup_entrypoint(const char *name)
|
||||
{
|
||||
int idx = val_get_instance_entrypoint_index(name);
|
||||
int idx = lvp_get_instance_entrypoint_index(name);
|
||||
if (idx >= 0)
|
||||
return val_instance_dispatch_table.entrypoints[idx];
|
||||
return lvp_instance_dispatch_table.entrypoints[idx];
|
||||
|
||||
idx = val_get_physical_device_entrypoint_index(name);
|
||||
idx = lvp_get_physical_device_entrypoint_index(name);
|
||||
if (idx >= 0)
|
||||
return val_physical_device_dispatch_table.entrypoints[idx];
|
||||
return lvp_physical_device_dispatch_table.entrypoints[idx];
|
||||
|
||||
idx = val_get_device_entrypoint_index(name);
|
||||
idx = lvp_get_device_entrypoint_index(name);
|
||||
if (idx >= 0)
|
||||
return val_resolve_device_entrypoint(idx);
|
||||
return lvp_resolve_device_entrypoint(idx);
|
||||
|
||||
return NULL;
|
||||
}""", output_encoding='utf-8')
|
||||
|
|
@ -781,16 +781,16 @@ def main():
|
|||
e.num = num
|
||||
instance_strmap.bake()
|
||||
|
||||
# For outputting entrypoints.h we generate a val_EntryPoint() prototype
|
||||
# For outputting entrypoints.h we generate a lvp_EntryPoint() prototype
|
||||
# per entry point.
|
||||
try:
|
||||
with open(os.path.join(args.outdir, 'val_entrypoints.h'), 'wb') as f:
|
||||
with open(os.path.join(args.outdir, 'lvp_entrypoints.h'), 'wb') as f:
|
||||
f.write(TEMPLATE_H.render(instance_entrypoints=instance_entrypoints,
|
||||
physical_device_entrypoints=physical_device_entrypoints,
|
||||
device_entrypoints=device_entrypoints,
|
||||
LAYERS=LAYERS,
|
||||
filename=os.path.basename(__file__)))
|
||||
with open(os.path.join(args.outdir, 'val_entrypoints.c'), 'wb') as f:
|
||||
with open(os.path.join(args.outdir, 'lvp_entrypoints.c'), 'wb') as f:
|
||||
f.write(TEMPLATE_C.render(instance_entrypoints=instance_entrypoints,
|
||||
physical_device_entrypoints=physical_device_entrypoints,
|
||||
device_entrypoints=device_entrypoints,
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -79,10 +79,10 @@ EXTENSIONS = [
|
|||
Extension('VK_KHR_get_display_properties2', 1, 'VK_USE_PLATFORM_DISPLAY_KHR'),
|
||||
Extension('VK_KHR_get_memory_requirements2', 1, True),
|
||||
Extension('VK_KHR_get_physical_device_properties2', 1, True),
|
||||
Extension('VK_KHR_get_surface_capabilities2', 1, 'VAL_HAS_SURFACE'),
|
||||
Extension('VK_KHR_get_surface_capabilities2', 1, 'LVP_HAS_SURFACE'),
|
||||
Extension('VK_KHR_image_format_list', 1, False),
|
||||
Extension('VK_KHR_imageless_framebuffer', 1, False),
|
||||
Extension('VK_KHR_incremental_present', 1, 'VAL_HAS_SURFACE'),
|
||||
Extension('VK_KHR_incremental_present', 1, 'LVP_HAS_SURFACE'),
|
||||
Extension('VK_KHR_maintenance1', 1, True),
|
||||
Extension('VK_KHR_maintenance2', 1, False),
|
||||
Extension('VK_KHR_maintenance3', 1, False),
|
||||
|
|
@ -95,9 +95,9 @@ EXTENSIONS = [
|
|||
Extension('VK_KHR_shader_draw_parameters', 1, False),
|
||||
Extension('VK_KHR_shader_float16_int8', 1, False),
|
||||
Extension('VK_KHR_storage_buffer_storage_class', 1, True),
|
||||
Extension('VK_KHR_surface', 25, 'VAL_HAS_SURFACE'),
|
||||
Extension('VK_KHR_surface_protected_capabilities', 1, 'VAL_HAS_SURFACE'),
|
||||
Extension('VK_KHR_swapchain', 68, 'VAL_HAS_SURFACE'),
|
||||
Extension('VK_KHR_surface', 25, 'LVP_HAS_SURFACE'),
|
||||
Extension('VK_KHR_surface_protected_capabilities', 1, 'LVP_HAS_SURFACE'),
|
||||
Extension('VK_KHR_swapchain', 68, 'LVP_HAS_SURFACE'),
|
||||
Extension('VK_KHR_uniform_buffer_standard_layout', 1, False),
|
||||
Extension('VK_KHR_variable_pointers', 1, False),
|
||||
Extension('VK_KHR_wayland_surface', 6, 'VK_USE_PLATFORM_WAYLAND_KHR'),
|
||||
|
|
@ -163,4 +163,4 @@ if __name__ == '__main__':
|
|||
dest='xml_files')
|
||||
args = parser.parse_args()
|
||||
|
||||
gen_extensions('val', args.xml_files, API_VERSIONS, MAX_API_VERSION, EXTENSIONS, args.out_c, args.out_h)
|
||||
gen_extensions('lvp', args.xml_files, API_VERSIONS, MAX_API_VERSION, EXTENSIONS, args.out_c, args.out_h)
|
||||
|
|
@ -21,7 +21,7 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
#include "util/format/u_format.h"
|
||||
#include "util/u_math.h"
|
||||
#define COMMON_NAME(x) [VK_FORMAT_##x] = PIPE_FORMAT_##x
|
||||
|
|
@ -148,7 +148,7 @@ enum pipe_format vk_format_to_pipe(VkFormat format)
|
|||
}
|
||||
|
||||
static void
|
||||
val_physical_device_get_format_properties(struct val_physical_device *physical_device,
|
||||
lvp_physical_device_get_format_properties(struct lvp_physical_device *physical_device,
|
||||
VkFormat format,
|
||||
VkFormatProperties *out_properties)
|
||||
{
|
||||
|
|
@ -231,30 +231,30 @@ val_physical_device_get_format_properties(struct val_physical_device *physical_d
|
|||
return;
|
||||
}
|
||||
|
||||
void val_GetPhysicalDeviceFormatProperties(
|
||||
void lvp_GetPhysicalDeviceFormatProperties(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkFormat format,
|
||||
VkFormatProperties* pFormatProperties)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
|
||||
|
||||
val_physical_device_get_format_properties(physical_device,
|
||||
lvp_physical_device_get_format_properties(physical_device,
|
||||
format,
|
||||
pFormatProperties);
|
||||
}
|
||||
|
||||
void val_GetPhysicalDeviceFormatProperties2(
|
||||
void lvp_GetPhysicalDeviceFormatProperties2(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkFormat format,
|
||||
VkFormatProperties2* pFormatProperties)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
|
||||
|
||||
val_physical_device_get_format_properties(physical_device,
|
||||
lvp_physical_device_get_format_properties(physical_device,
|
||||
format,
|
||||
&pFormatProperties->formatProperties);
|
||||
}
|
||||
static VkResult val_get_image_format_properties(struct val_physical_device *physical_device,
|
||||
static VkResult lvp_get_image_format_properties(struct lvp_physical_device *physical_device,
|
||||
const VkPhysicalDeviceImageFormatInfo2 *info,
|
||||
VkImageFormatProperties *pImageFormatProperties)
|
||||
{
|
||||
|
|
@ -265,7 +265,7 @@ static VkResult val_get_image_format_properties(struct val_physical_device *phys
|
|||
uint32_t maxArraySize;
|
||||
VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
|
||||
enum pipe_format pformat = vk_format_to_pipe(info->format);
|
||||
val_physical_device_get_format_properties(physical_device, info->format,
|
||||
lvp_physical_device_get_format_properties(physical_device, info->format,
|
||||
&format_props);
|
||||
if (info->tiling == VK_IMAGE_TILING_LINEAR) {
|
||||
format_feature_flags = format_props.linearTilingFeatures;
|
||||
|
|
@ -377,7 +377,7 @@ static VkResult val_get_image_format_properties(struct val_physical_device *phys
|
|||
return VK_ERROR_FORMAT_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceImageFormatProperties(
|
||||
VkResult lvp_GetPhysicalDeviceImageFormatProperties(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkFormat format,
|
||||
VkImageType type,
|
||||
|
|
@ -386,7 +386,7 @@ VkResult val_GetPhysicalDeviceImageFormatProperties(
|
|||
VkImageCreateFlags createFlags,
|
||||
VkImageFormatProperties* pImageFormatProperties)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
|
||||
|
||||
const VkPhysicalDeviceImageFormatInfo2 info = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
|
||||
|
|
@ -398,18 +398,18 @@ VkResult val_GetPhysicalDeviceImageFormatProperties(
|
|||
.flags = createFlags,
|
||||
};
|
||||
|
||||
return val_get_image_format_properties(physical_device, &info,
|
||||
return lvp_get_image_format_properties(physical_device, &info,
|
||||
pImageFormatProperties);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceImageFormatProperties2(
|
||||
VkResult lvp_GetPhysicalDeviceImageFormatProperties2(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
const VkPhysicalDeviceImageFormatInfo2 *base_info,
|
||||
VkImageFormatProperties2 *base_props)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
|
||||
VkResult result;
|
||||
result = val_get_image_format_properties(physical_device, base_info,
|
||||
result = lvp_get_image_format_properties(physical_device, base_info,
|
||||
&base_props->imageFormatProperties);
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
|
@ -417,7 +417,7 @@ VkResult val_GetPhysicalDeviceImageFormatProperties2(
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_GetPhysicalDeviceSparseImageFormatProperties(
|
||||
void lvp_GetPhysicalDeviceSparseImageFormatProperties(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkFormat format,
|
||||
VkImageType type,
|
||||
|
|
@ -431,7 +431,7 @@ void val_GetPhysicalDeviceSparseImageFormatProperties(
|
|||
*pNumProperties = 0;
|
||||
}
|
||||
|
||||
void val_GetPhysicalDeviceSparseImageFormatProperties2(
|
||||
void lvp_GetPhysicalDeviceSparseImageFormatProperties2(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo,
|
||||
uint32_t *pPropertyCount,
|
||||
|
|
@ -21,20 +21,20 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
#include "util/format/u_format.h"
|
||||
#include "util/u_inlines.h"
|
||||
#include "pipe/p_state.h"
|
||||
|
||||
VkResult
|
||||
val_image_create(VkDevice _device,
|
||||
const struct val_image_create_info *create_info,
|
||||
lvp_image_create(VkDevice _device,
|
||||
const struct lvp_image_create_info *create_info,
|
||||
const VkAllocationCallbacks* alloc,
|
||||
VkImage *pImage)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
const VkImageCreateInfo *pCreateInfo = create_info->vk_info;
|
||||
struct val_image *image;
|
||||
struct lvp_image *image;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO);
|
||||
|
||||
|
|
@ -81,19 +81,19 @@ val_image_create(VkDevice _device,
|
|||
&template,
|
||||
&image->size);
|
||||
}
|
||||
*pImage = val_image_to_handle(image);
|
||||
*pImage = lvp_image_to_handle(image);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult
|
||||
val_CreateImage(VkDevice device,
|
||||
lvp_CreateImage(VkDevice device,
|
||||
const VkImageCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkImage *pImage)
|
||||
{
|
||||
return val_image_create(device,
|
||||
&(struct val_image_create_info) {
|
||||
return lvp_image_create(device,
|
||||
&(struct lvp_image_create_info) {
|
||||
.vk_info = pCreateInfo,
|
||||
.bind_flags = 0,
|
||||
},
|
||||
|
|
@ -102,11 +102,11 @@ val_CreateImage(VkDevice device,
|
|||
}
|
||||
|
||||
void
|
||||
val_DestroyImage(VkDevice _device, VkImage _image,
|
||||
lvp_DestroyImage(VkDevice _device, VkImage _image,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_image, image, _image);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_image, image, _image);
|
||||
|
||||
if (!_image)
|
||||
return;
|
||||
|
|
@ -116,14 +116,14 @@ val_DestroyImage(VkDevice _device, VkImage _image,
|
|||
}
|
||||
|
||||
VkResult
|
||||
val_CreateImageView(VkDevice _device,
|
||||
lvp_CreateImageView(VkDevice _device,
|
||||
const VkImageViewCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkImageView *pView)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_image, image, pCreateInfo->image);
|
||||
struct val_image_view *view;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_image, image, pCreateInfo->image);
|
||||
struct lvp_image_view *view;
|
||||
|
||||
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
|
|
@ -139,17 +139,17 @@ val_CreateImageView(VkDevice _device,
|
|||
view->subresourceRange = pCreateInfo->subresourceRange;
|
||||
view->image = image;
|
||||
view->surface = NULL;
|
||||
*pView = val_image_view_to_handle(view);
|
||||
*pView = lvp_image_view_to_handle(view);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
val_DestroyImageView(VkDevice _device, VkImageView _iview,
|
||||
lvp_DestroyImageView(VkDevice _device, VkImageView _iview,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_image_view, iview, _iview);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_image_view, iview, _iview);
|
||||
|
||||
if (!_iview)
|
||||
return;
|
||||
|
|
@ -159,14 +159,14 @@ val_DestroyImageView(VkDevice _device, VkImageView _iview,
|
|||
vk_free2(&device->alloc, pAllocator, iview);
|
||||
}
|
||||
|
||||
void val_GetImageSubresourceLayout(
|
||||
void lvp_GetImageSubresourceLayout(
|
||||
VkDevice _device,
|
||||
VkImage _image,
|
||||
const VkImageSubresource* pSubresource,
|
||||
VkSubresourceLayout* pLayout)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_image, image, _image);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_image, image, _image);
|
||||
uint32_t stride, offset;
|
||||
device->pscreen->resource_get_info(device->pscreen,
|
||||
image->bo,
|
||||
|
|
@ -187,14 +187,14 @@ void val_GetImageSubresourceLayout(
|
|||
}
|
||||
}
|
||||
|
||||
VkResult val_CreateBuffer(
|
||||
VkResult lvp_CreateBuffer(
|
||||
VkDevice _device,
|
||||
const VkBufferCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkBuffer* pBuffer)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_buffer *buffer;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_buffer *buffer;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
||||
|
||||
|
|
@ -231,18 +231,18 @@ VkResult val_CreateBuffer(
|
|||
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
}
|
||||
*pBuffer = val_buffer_to_handle(buffer);
|
||||
*pBuffer = lvp_buffer_to_handle(buffer);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_DestroyBuffer(
|
||||
void lvp_DestroyBuffer(
|
||||
VkDevice _device,
|
||||
VkBuffer _buffer,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_buffer, buffer, _buffer);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_buffer, buffer, _buffer);
|
||||
|
||||
if (!_buffer)
|
||||
return;
|
||||
|
|
@ -253,14 +253,14 @@ void val_DestroyBuffer(
|
|||
}
|
||||
|
||||
VkResult
|
||||
val_CreateBufferView(VkDevice _device,
|
||||
lvp_CreateBufferView(VkDevice _device,
|
||||
const VkBufferViewCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkBufferView *pView)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_buffer, buffer, pCreateInfo->buffer);
|
||||
struct val_buffer_view *view;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_buffer, buffer, pCreateInfo->buffer);
|
||||
struct lvp_buffer_view *view;
|
||||
view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!view)
|
||||
|
|
@ -273,17 +273,17 @@ val_CreateBufferView(VkDevice _device,
|
|||
view->pformat = vk_format_to_pipe(pCreateInfo->format);
|
||||
view->offset = pCreateInfo->offset;
|
||||
view->range = pCreateInfo->range;
|
||||
*pView = val_buffer_view_to_handle(view);
|
||||
*pView = lvp_buffer_view_to_handle(view);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
val_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
|
||||
lvp_DestroyBufferView(VkDevice _device, VkBufferView bufferView,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_buffer_view, view, bufferView);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_buffer_view, view, bufferView);
|
||||
|
||||
if (!bufferView)
|
||||
return;
|
||||
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
#include "nir.h"
|
||||
#include "nir_builder.h"
|
||||
#include "val_lower_vulkan_resource.h"
|
||||
#include "lvp_lower_vulkan_resource.h"
|
||||
|
||||
static nir_ssa_def *
|
||||
load_frag_coord(nir_builder *b)
|
||||
|
|
@ -80,7 +80,7 @@ try_lower_input_load(nir_function_impl *impl, nir_intrinsic_instr *load,
|
|||
}
|
||||
|
||||
bool
|
||||
val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval)
|
||||
lvp_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval)
|
||||
{
|
||||
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
|
||||
bool progress = false;
|
||||
|
|
@ -21,10 +21,10 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
#include "nir.h"
|
||||
#include "nir_builder.h"
|
||||
#include "val_lower_vulkan_resource.h"
|
||||
#include "lvp_lower_vulkan_resource.h"
|
||||
|
||||
static bool
|
||||
lower_vulkan_resource_index(const nir_instr *instr, const void *data_cb)
|
||||
|
|
@ -53,8 +53,8 @@ static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
|
|||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
unsigned desc_set_idx = nir_intrinsic_desc_set(intrin);
|
||||
unsigned binding_idx = nir_intrinsic_binding(intrin);
|
||||
struct val_pipeline_layout *layout = data_cb;
|
||||
struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
struct lvp_pipeline_layout *layout = data_cb;
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
int value = 0;
|
||||
bool is_ubo = (binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
|
||||
binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
|
|
@ -103,7 +103,7 @@ static nir_ssa_def *lower_vri_intrin_lvd(struct nir_builder *b,
|
|||
static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
|
||||
nir_tex_src_type deref_src_type,
|
||||
gl_shader_stage stage,
|
||||
struct val_pipeline_layout *layout)
|
||||
struct lvp_pipeline_layout *layout)
|
||||
{
|
||||
int deref_src_idx = nir_tex_instr_src_index(tex, deref_src_type);
|
||||
|
||||
|
|
@ -115,7 +115,7 @@ static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
|
|||
unsigned desc_set_idx = var->data.descriptor_set;
|
||||
unsigned binding_idx = var->data.binding;
|
||||
int value = 0;
|
||||
struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
nir_tex_instr_remove_src(tex, deref_src_idx);
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (deref_src_type == nir_tex_src_sampler_deref)
|
||||
|
|
@ -148,7 +148,7 @@ static int lower_vri_instr_tex_deref(nir_tex_instr *tex,
|
|||
static void lower_vri_instr_tex(struct nir_builder *b,
|
||||
nir_tex_instr *tex, void *data_cb)
|
||||
{
|
||||
struct val_pipeline_layout *layout = data_cb;
|
||||
struct lvp_pipeline_layout *layout = data_cb;
|
||||
int tex_value = 0;
|
||||
|
||||
lower_vri_instr_tex_deref(tex, nir_tex_src_sampler_deref, b->shader->info.stage, layout);
|
||||
|
|
@ -192,8 +192,8 @@ static nir_ssa_def *lower_vri_instr(struct nir_builder *b,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void val_lower_pipeline_layout(const struct val_device *device,
|
||||
struct val_pipeline_layout *layout,
|
||||
void lvp_lower_pipeline_layout(const struct lvp_device *device,
|
||||
struct lvp_pipeline_layout *layout,
|
||||
nir_shader *shader)
|
||||
{
|
||||
nir_shader_lower_instructions(shader, lower_vulkan_resource_index, lower_vri_instr, layout);
|
||||
|
|
@ -203,7 +203,7 @@ void val_lower_pipeline_layout(const struct val_device *device,
|
|||
glsl_get_base_type(glsl_without_array(type));
|
||||
unsigned desc_set_idx = var->data.descriptor_set;
|
||||
unsigned binding_idx = var->data.binding;
|
||||
struct val_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
int value = 0;
|
||||
var->data.descriptor_set = 0;
|
||||
if (base_type == GLSL_TYPE_SAMPLER) {
|
||||
|
|
@ -21,16 +21,16 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef VAL_LOWER_VULKAN_RESOURCE_H
|
||||
#define VAL_LOWER_VULKAN_RESOURCE_H
|
||||
#ifndef LVP_LOWER_VULKAN_RESOURCE_H
|
||||
#define LVP_LOWER_VULKAN_RESOURCE_H
|
||||
|
||||
struct val_pipeline_layout;
|
||||
struct val_device;
|
||||
void val_lower_pipeline_layout(const struct val_device *device,
|
||||
struct val_pipeline_layout *layout,
|
||||
struct lvp_pipeline_layout;
|
||||
struct lvp_device;
|
||||
void lvp_lower_pipeline_layout(const struct lvp_device *device,
|
||||
struct lvp_pipeline_layout *layout,
|
||||
nir_shader *shader);
|
||||
|
||||
bool
|
||||
val_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
|
||||
lvp_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
|
||||
|
||||
#endif
|
||||
|
|
@ -21,21 +21,21 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
static void
|
||||
val_render_pass_compile(struct val_render_pass *pass)
|
||||
lvp_render_pass_compile(struct lvp_render_pass *pass)
|
||||
{
|
||||
for (uint32_t i = 0; i < pass->subpass_count; i++) {
|
||||
struct val_subpass *subpass = &pass->subpasses[i];
|
||||
struct lvp_subpass *subpass = &pass->subpasses[i];
|
||||
|
||||
for (uint32_t j = 0; j < subpass->attachment_count; j++) {
|
||||
struct val_subpass_attachment *subpass_att =
|
||||
struct lvp_subpass_attachment *subpass_att =
|
||||
&subpass->attachments[j];
|
||||
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
|
||||
continue;
|
||||
|
||||
struct val_render_pass_attachment *pass_att =
|
||||
struct lvp_render_pass_attachment *pass_att =
|
||||
&pass->attachments[subpass_att->attachment];
|
||||
|
||||
pass_att->first_subpass_idx = UINT32_MAX;
|
||||
|
|
@ -43,7 +43,7 @@ val_render_pass_compile(struct val_render_pass *pass)
|
|||
}
|
||||
|
||||
for (uint32_t i = 0; i < pass->subpass_count; i++) {
|
||||
struct val_subpass *subpass = &pass->subpasses[i];
|
||||
struct lvp_subpass *subpass = &pass->subpasses[i];
|
||||
uint32_t color_sample_count = 1, depth_sample_count = 1;
|
||||
|
||||
/* We don't allow depth_stencil_attachment to be non-NULL and
|
||||
|
|
@ -60,12 +60,12 @@ val_render_pass_compile(struct val_render_pass *pass)
|
|||
subpass->ds_resolve_attachment = NULL;
|
||||
|
||||
for (uint32_t j = 0; j < subpass->attachment_count; j++) {
|
||||
struct val_subpass_attachment *subpass_att =
|
||||
struct lvp_subpass_attachment *subpass_att =
|
||||
&subpass->attachments[j];
|
||||
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
|
||||
continue;
|
||||
|
||||
struct val_render_pass_attachment *pass_att =
|
||||
struct lvp_render_pass_attachment *pass_att =
|
||||
&pass->attachments[subpass_att->attachment];
|
||||
|
||||
if (i < pass_att->first_subpass_idx)
|
||||
|
|
@ -75,14 +75,14 @@ val_render_pass_compile(struct val_render_pass *pass)
|
|||
|
||||
subpass->has_color_att = false;
|
||||
for (uint32_t j = 0; j < subpass->color_count; j++) {
|
||||
struct val_subpass_attachment *subpass_att =
|
||||
struct lvp_subpass_attachment *subpass_att =
|
||||
&subpass->color_attachments[j];
|
||||
if (subpass_att->attachment == VK_ATTACHMENT_UNUSED)
|
||||
continue;
|
||||
|
||||
subpass->has_color_att = true;
|
||||
|
||||
struct val_render_pass_attachment *pass_att =
|
||||
struct lvp_render_pass_attachment *pass_att =
|
||||
&pass->attachments[subpass_att->attachment];
|
||||
|
||||
color_sample_count = pass_att->samples;
|
||||
|
|
@ -91,7 +91,7 @@ val_render_pass_compile(struct val_render_pass *pass)
|
|||
if (subpass->depth_stencil_attachment) {
|
||||
const uint32_t a =
|
||||
subpass->depth_stencil_attachment->attachment;
|
||||
struct val_render_pass_attachment *pass_att =
|
||||
struct lvp_render_pass_attachment *pass_att =
|
||||
&pass->attachments[a];
|
||||
depth_sample_count = pass_att->samples;
|
||||
}
|
||||
|
|
@ -103,7 +103,7 @@ val_render_pass_compile(struct val_render_pass *pass)
|
|||
subpass->has_color_resolve = false;
|
||||
if (subpass->resolve_attachments) {
|
||||
for (uint32_t j = 0; j < subpass->color_count; j++) {
|
||||
struct val_subpass_attachment *resolve_att =
|
||||
struct lvp_subpass_attachment *resolve_att =
|
||||
&subpass->resolve_attachments[j];
|
||||
|
||||
if (resolve_att->attachment == VK_ATTACHMENT_UNUSED)
|
||||
|
|
@ -134,7 +134,7 @@ val_render_pass_compile(struct val_render_pass *pass)
|
|||
}
|
||||
|
||||
static unsigned
|
||||
val_num_subpass_attachments(const VkSubpassDescription *desc)
|
||||
lvp_num_subpass_attachments(const VkSubpassDescription *desc)
|
||||
{
|
||||
return desc->inputAttachmentCount +
|
||||
desc->colorAttachmentCount +
|
||||
|
|
@ -142,14 +142,14 @@ val_num_subpass_attachments(const VkSubpassDescription *desc)
|
|||
(desc->pDepthStencilAttachment != NULL);
|
||||
}
|
||||
|
||||
VkResult val_CreateRenderPass(
|
||||
VkResult lvp_CreateRenderPass(
|
||||
VkDevice _device,
|
||||
const VkRenderPassCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkRenderPass* pRenderPass)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_render_pass *pass;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_render_pass *pass;
|
||||
size_t size;
|
||||
size_t attachments_offset;
|
||||
|
||||
|
|
@ -166,7 +166,7 @@ VkResult val_CreateRenderPass(
|
|||
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
/* Clear the subpasses along with the parent pass. This required because
|
||||
* each array member of val_subpass must be a valid pointer if not NULL.
|
||||
* each array member of lvp_subpass must be a valid pointer if not NULL.
|
||||
*/
|
||||
memset(pass, 0, size);
|
||||
|
||||
|
|
@ -177,7 +177,7 @@ VkResult val_CreateRenderPass(
|
|||
pass->attachments = (void *) pass + attachments_offset;
|
||||
|
||||
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
|
||||
struct val_render_pass_attachment *att = &pass->attachments[i];
|
||||
struct lvp_render_pass_attachment *att = &pass->attachments[i];
|
||||
|
||||
att->format = pCreateInfo->pAttachments[i].format;
|
||||
att->samples = pCreateInfo->pAttachments[i].samples;
|
||||
|
|
@ -189,13 +189,13 @@ VkResult val_CreateRenderPass(
|
|||
|
||||
uint32_t subpass_attachment_count = 0;
|
||||
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
|
||||
subpass_attachment_count += val_num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
|
||||
subpass_attachment_count += lvp_num_subpass_attachments(&pCreateInfo->pSubpasses[i]);
|
||||
}
|
||||
|
||||
if (subpass_attachment_count) {
|
||||
pass->subpass_attachments =
|
||||
vk_alloc2(&device->alloc, pAllocator,
|
||||
subpass_attachment_count * sizeof(struct val_subpass_attachment), 8,
|
||||
subpass_attachment_count * sizeof(struct lvp_subpass_attachment), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (pass->subpass_attachments == NULL) {
|
||||
vk_free2(&device->alloc, pAllocator, pass);
|
||||
|
|
@ -204,14 +204,14 @@ VkResult val_CreateRenderPass(
|
|||
} else
|
||||
pass->subpass_attachments = NULL;
|
||||
|
||||
struct val_subpass_attachment *p = pass->subpass_attachments;
|
||||
struct lvp_subpass_attachment *p = pass->subpass_attachments;
|
||||
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
|
||||
const VkSubpassDescription *desc = &pCreateInfo->pSubpasses[i];
|
||||
struct val_subpass *subpass = &pass->subpasses[i];
|
||||
struct lvp_subpass *subpass = &pass->subpasses[i];
|
||||
|
||||
subpass->input_count = desc->inputAttachmentCount;
|
||||
subpass->color_count = desc->colorAttachmentCount;
|
||||
subpass->attachment_count = val_num_subpass_attachments(desc);
|
||||
subpass->attachment_count = lvp_num_subpass_attachments(desc);
|
||||
subpass->attachments = p;
|
||||
|
||||
if (desc->inputAttachmentCount > 0) {
|
||||
|
|
@ -219,7 +219,7 @@ VkResult val_CreateRenderPass(
|
|||
p += desc->inputAttachmentCount;
|
||||
|
||||
for (uint32_t j = 0; j < desc->inputAttachmentCount; j++) {
|
||||
subpass->input_attachments[j] = (struct val_subpass_attachment) {
|
||||
subpass->input_attachments[j] = (struct lvp_subpass_attachment) {
|
||||
.attachment = desc->pInputAttachments[j].attachment,
|
||||
.layout = desc->pInputAttachments[j].layout,
|
||||
};
|
||||
|
|
@ -231,7 +231,7 @@ VkResult val_CreateRenderPass(
|
|||
p += desc->colorAttachmentCount;
|
||||
|
||||
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
|
||||
subpass->color_attachments[j] = (struct val_subpass_attachment) {
|
||||
subpass->color_attachments[j] = (struct lvp_subpass_attachment) {
|
||||
.attachment = desc->pColorAttachments[j].attachment,
|
||||
.layout = desc->pColorAttachments[j].layout,
|
||||
};
|
||||
|
|
@ -243,7 +243,7 @@ VkResult val_CreateRenderPass(
|
|||
p += desc->colorAttachmentCount;
|
||||
|
||||
for (uint32_t j = 0; j < desc->colorAttachmentCount; j++) {
|
||||
subpass->resolve_attachments[j] = (struct val_subpass_attachment) {
|
||||
subpass->resolve_attachments[j] = (struct lvp_subpass_attachment) {
|
||||
.attachment = desc->pResolveAttachments[j].attachment,
|
||||
.layout = desc->pResolveAttachments[j].layout,
|
||||
};
|
||||
|
|
@ -253,26 +253,26 @@ VkResult val_CreateRenderPass(
|
|||
if (desc->pDepthStencilAttachment) {
|
||||
subpass->depth_stencil_attachment = p++;
|
||||
|
||||
*subpass->depth_stencil_attachment = (struct val_subpass_attachment) {
|
||||
*subpass->depth_stencil_attachment = (struct lvp_subpass_attachment) {
|
||||
.attachment = desc->pDepthStencilAttachment->attachment,
|
||||
.layout = desc->pDepthStencilAttachment->layout,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
val_render_pass_compile(pass);
|
||||
*pRenderPass = val_render_pass_to_handle(pass);
|
||||
lvp_render_pass_compile(pass);
|
||||
*pRenderPass = lvp_render_pass_to_handle(pass);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_DestroyRenderPass(
|
||||
void lvp_DestroyRenderPass(
|
||||
VkDevice _device,
|
||||
VkRenderPass _pass,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_render_pass, pass, _pass);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_render_pass, pass, _pass);
|
||||
|
||||
if (!_pass)
|
||||
return;
|
||||
|
|
@ -281,7 +281,7 @@ void val_DestroyRenderPass(
|
|||
vk_free2(&device->alloc, pAllocator, pass);
|
||||
}
|
||||
|
||||
void val_GetRenderAreaGranularity(
|
||||
void lvp_GetRenderAreaGranularity(
|
||||
VkDevice device,
|
||||
VkRenderPass renderPass,
|
||||
VkExtent2D* pGranularity)
|
||||
|
|
@ -21,25 +21,25 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
#include "glsl_types.h"
|
||||
#include "spirv/nir_spirv.h"
|
||||
#include "nir/nir_builder.h"
|
||||
#include "val_lower_vulkan_resource.h"
|
||||
#include "lvp_lower_vulkan_resource.h"
|
||||
#include "pipe/p_state.h"
|
||||
#include "pipe/p_context.h"
|
||||
|
||||
#define SPIR_V_MAGIC_NUMBER 0x07230203
|
||||
|
||||
VkResult val_CreateShaderModule(
|
||||
VkResult lvp_CreateShaderModule(
|
||||
VkDevice _device,
|
||||
const VkShaderModuleCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkShaderModule* pShaderModule)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_shader_module *module;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_shader_module *module;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
|
||||
assert(pCreateInfo->flags == 0);
|
||||
|
|
@ -55,19 +55,19 @@ VkResult val_CreateShaderModule(
|
|||
module->size = pCreateInfo->codeSize;
|
||||
memcpy(module->data, pCreateInfo->pCode, module->size);
|
||||
|
||||
*pShaderModule = val_shader_module_to_handle(module);
|
||||
*pShaderModule = lvp_shader_module_to_handle(module);
|
||||
|
||||
return VK_SUCCESS;
|
||||
|
||||
}
|
||||
|
||||
void val_DestroyShaderModule(
|
||||
void lvp_DestroyShaderModule(
|
||||
VkDevice _device,
|
||||
VkShaderModule _module,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_shader_module, module, _module);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_shader_module, module, _module);
|
||||
|
||||
if (!_module)
|
||||
return;
|
||||
|
|
@ -75,13 +75,13 @@ void val_DestroyShaderModule(
|
|||
vk_free2(&device->alloc, pAllocator, module);
|
||||
}
|
||||
|
||||
void val_DestroyPipeline(
|
||||
void lvp_DestroyPipeline(
|
||||
VkDevice _device,
|
||||
VkPipeline _pipeline,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_pipeline, pipeline, _pipeline);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
|
||||
|
||||
if (!_pipeline)
|
||||
return;
|
||||
|
|
@ -463,8 +463,8 @@ shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
|
|||
})
|
||||
|
||||
static void
|
||||
val_shader_compile_to_ir(struct val_pipeline *pipeline,
|
||||
struct val_shader_module *module,
|
||||
lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
|
||||
struct lvp_shader_module *module,
|
||||
const char *entrypoint_name,
|
||||
gl_shader_stage stage,
|
||||
const VkSpecializationInfo *spec_info)
|
||||
|
|
@ -508,7 +508,7 @@ val_shader_compile_to_ir(struct val_pipeline *pipeline,
|
|||
}
|
||||
}
|
||||
}
|
||||
struct val_device *pdevice = pipeline->device;
|
||||
struct lvp_device *pdevice = pipeline->device;
|
||||
const struct spirv_to_nir_options spirv_options = {
|
||||
.environment = NIR_SPIRV_VULKAN,
|
||||
.caps = {
|
||||
|
|
@ -559,14 +559,14 @@ val_shader_compile_to_ir(struct val_pipeline *pipeline,
|
|||
nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
|
||||
|
||||
if (stage == MESA_SHADER_FRAGMENT)
|
||||
val_lower_input_attachments(nir, false);
|
||||
lvp_lower_input_attachments(nir, false);
|
||||
NIR_PASS_V(nir, nir_lower_system_values);
|
||||
NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
|
||||
|
||||
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
|
||||
nir_remove_dead_variables(nir, nir_var_uniform, NULL);
|
||||
|
||||
val_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
|
||||
lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
|
||||
|
||||
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
|
||||
NIR_PASS_V(nir, nir_split_var_copies);
|
||||
|
|
@ -633,7 +633,7 @@ val_shader_compile_to_ir(struct val_pipeline *pipeline,
|
|||
pipeline->pipeline_nir[stage] = nir;
|
||||
}
|
||||
|
||||
static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct val_pipeline *pipeline)
|
||||
static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct lvp_pipeline *pipeline)
|
||||
{
|
||||
state->type = PIPE_SHADER_IR_NIR;
|
||||
state->ir.nir = pipeline->pipeline_nir[stage];
|
||||
|
|
@ -679,7 +679,7 @@ merge_tess_info(struct shader_info *tes_info,
|
|||
}
|
||||
|
||||
static gl_shader_stage
|
||||
val_shader_stage(VkShaderStageFlagBits stage)
|
||||
lvp_shader_stage(VkShaderStageFlagBits stage)
|
||||
{
|
||||
switch (stage) {
|
||||
case VK_SHADER_STAGE_VERTEX_BIT:
|
||||
|
|
@ -701,10 +701,10 @@ val_shader_stage(VkShaderStageFlagBits stage)
|
|||
}
|
||||
|
||||
static VkResult
|
||||
val_pipeline_compile(struct val_pipeline *pipeline,
|
||||
lvp_pipeline_compile(struct lvp_pipeline *pipeline,
|
||||
gl_shader_stage stage)
|
||||
{
|
||||
struct val_device *device = pipeline->device;
|
||||
struct lvp_device *device = pipeline->device;
|
||||
device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage], true);
|
||||
if (stage == MESA_SHADER_COMPUTE) {
|
||||
struct pipe_compute_state shstate = {};
|
||||
|
|
@ -740,16 +740,16 @@ val_pipeline_compile(struct val_pipeline *pipeline,
|
|||
}
|
||||
|
||||
static VkResult
|
||||
val_graphics_pipeline_init(struct val_pipeline *pipeline,
|
||||
struct val_device *device,
|
||||
struct val_pipeline_cache *cache,
|
||||
lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
|
||||
struct lvp_device *device,
|
||||
struct lvp_pipeline_cache *cache,
|
||||
const VkGraphicsPipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *alloc)
|
||||
{
|
||||
if (alloc == NULL)
|
||||
alloc = &device->alloc;
|
||||
pipeline->device = device;
|
||||
pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
|
||||
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
|
||||
pipeline->force_min_sample = false;
|
||||
|
||||
/* recreate createinfo */
|
||||
|
|
@ -757,10 +757,10 @@ val_graphics_pipeline_init(struct val_pipeline *pipeline,
|
|||
pipeline->is_compute_pipeline = false;
|
||||
|
||||
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
|
||||
VAL_FROM_HANDLE(val_shader_module, module,
|
||||
LVP_FROM_HANDLE(lvp_shader_module, module,
|
||||
pCreateInfo->pStages[i].module);
|
||||
gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
|
||||
val_shader_compile_to_ir(pipeline, module,
|
||||
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
|
||||
lvp_shader_compile_to_ir(pipeline, module,
|
||||
pCreateInfo->pStages[i].pName,
|
||||
stage,
|
||||
pCreateInfo->pStages[i].pSpecializationInfo);
|
||||
|
|
@ -781,8 +781,8 @@ val_graphics_pipeline_init(struct val_pipeline *pipeline,
|
|||
|
||||
bool has_fragment_shader = false;
|
||||
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
|
||||
gl_shader_stage stage = val_shader_stage(pCreateInfo->pStages[i].stage);
|
||||
val_pipeline_compile(pipeline, stage);
|
||||
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
|
||||
lvp_pipeline_compile(pipeline, stage);
|
||||
if (stage == MESA_SHADER_FRAGMENT)
|
||||
has_fragment_shader = true;
|
||||
}
|
||||
|
|
@ -804,16 +804,16 @@ val_graphics_pipeline_init(struct val_pipeline *pipeline,
|
|||
}
|
||||
|
||||
static VkResult
|
||||
val_graphics_pipeline_create(
|
||||
lvp_graphics_pipeline_create(
|
||||
VkDevice _device,
|
||||
VkPipelineCache _cache,
|
||||
const VkGraphicsPipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipeline)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
|
||||
struct val_pipeline *pipeline;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
|
||||
struct lvp_pipeline *pipeline;
|
||||
VkResult result;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
|
||||
|
|
@ -825,19 +825,19 @@ val_graphics_pipeline_create(
|
|||
|
||||
vk_object_base_init(&device->vk, &pipeline->base,
|
||||
VK_OBJECT_TYPE_PIPELINE);
|
||||
result = val_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
|
||||
result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
|
||||
pAllocator);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free2(&device->alloc, pAllocator, pipeline);
|
||||
return result;
|
||||
}
|
||||
|
||||
*pPipeline = val_pipeline_to_handle(pipeline);
|
||||
*pPipeline = lvp_pipeline_to_handle(pipeline);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult val_CreateGraphicsPipelines(
|
||||
VkResult lvp_CreateGraphicsPipelines(
|
||||
VkDevice _device,
|
||||
VkPipelineCache pipelineCache,
|
||||
uint32_t count,
|
||||
|
|
@ -850,7 +850,7 @@ VkResult val_CreateGraphicsPipelines(
|
|||
|
||||
for (; i < count; i++) {
|
||||
VkResult r;
|
||||
r = val_graphics_pipeline_create(_device,
|
||||
r = lvp_graphics_pipeline_create(_device,
|
||||
pipelineCache,
|
||||
&pCreateInfos[i],
|
||||
pAllocator, &pPipelines[i]);
|
||||
|
|
@ -864,42 +864,42 @@ VkResult val_CreateGraphicsPipelines(
|
|||
}
|
||||
|
||||
static VkResult
|
||||
val_compute_pipeline_init(struct val_pipeline *pipeline,
|
||||
struct val_device *device,
|
||||
struct val_pipeline_cache *cache,
|
||||
lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
|
||||
struct lvp_device *device,
|
||||
struct lvp_pipeline_cache *cache,
|
||||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *alloc)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_shader_module, module,
|
||||
LVP_FROM_HANDLE(lvp_shader_module, module,
|
||||
pCreateInfo->stage.module);
|
||||
if (alloc == NULL)
|
||||
alloc = &device->alloc;
|
||||
pipeline->device = device;
|
||||
pipeline->layout = val_pipeline_layout_from_handle(pCreateInfo->layout);
|
||||
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
|
||||
pipeline->force_min_sample = false;
|
||||
|
||||
deep_copy_compute_create_info(&pipeline->compute_create_info, pCreateInfo);
|
||||
pipeline->is_compute_pipeline = true;
|
||||
|
||||
val_shader_compile_to_ir(pipeline, module,
|
||||
lvp_shader_compile_to_ir(pipeline, module,
|
||||
pCreateInfo->stage.pName,
|
||||
MESA_SHADER_COMPUTE,
|
||||
pCreateInfo->stage.pSpecializationInfo);
|
||||
val_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
|
||||
lvp_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
static VkResult
|
||||
val_compute_pipeline_create(
|
||||
lvp_compute_pipeline_create(
|
||||
VkDevice _device,
|
||||
VkPipelineCache _cache,
|
||||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipeline)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
|
||||
struct val_pipeline *pipeline;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
|
||||
struct lvp_pipeline *pipeline;
|
||||
VkResult result;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
|
||||
|
|
@ -911,19 +911,19 @@ val_compute_pipeline_create(
|
|||
|
||||
vk_object_base_init(&device->vk, &pipeline->base,
|
||||
VK_OBJECT_TYPE_PIPELINE);
|
||||
result = val_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
|
||||
result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
|
||||
pAllocator);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free2(&device->alloc, pAllocator, pipeline);
|
||||
return result;
|
||||
}
|
||||
|
||||
*pPipeline = val_pipeline_to_handle(pipeline);
|
||||
*pPipeline = lvp_pipeline_to_handle(pipeline);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult val_CreateComputePipelines(
|
||||
VkResult lvp_CreateComputePipelines(
|
||||
VkDevice _device,
|
||||
VkPipelineCache pipelineCache,
|
||||
uint32_t count,
|
||||
|
|
@ -936,7 +936,7 @@ VkResult val_CreateComputePipelines(
|
|||
|
||||
for (; i < count; i++) {
|
||||
VkResult r;
|
||||
r = val_compute_pipeline_create(_device,
|
||||
r = lvp_compute_pipeline_create(_device,
|
||||
pipelineCache,
|
||||
&pCreateInfos[i],
|
||||
pAllocator, &pPipelines[i]);
|
||||
|
|
@ -21,16 +21,16 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
VkResult val_CreatePipelineCache(
|
||||
VkResult lvp_CreatePipelineCache(
|
||||
VkDevice _device,
|
||||
const VkPipelineCacheCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkPipelineCache* pPipelineCache)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_pipeline_cache *cache;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_pipeline_cache *cache;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
|
||||
assert(pCreateInfo->flags == 0);
|
||||
|
|
@ -49,27 +49,27 @@ VkResult val_CreatePipelineCache(
|
|||
cache->alloc = device->alloc;
|
||||
|
||||
cache->device = device;
|
||||
*pPipelineCache = val_pipeline_cache_to_handle(cache);
|
||||
*pPipelineCache = lvp_pipeline_cache_to_handle(cache);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_DestroyPipelineCache(
|
||||
void lvp_DestroyPipelineCache(
|
||||
VkDevice _device,
|
||||
VkPipelineCache _cache,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_pipeline_cache, cache, _cache);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
|
||||
|
||||
if (!_cache)
|
||||
return;
|
||||
// val_pipeline_cache_finish(cache);
|
||||
// lvp_pipeline_cache_finish(cache);
|
||||
vk_object_base_finish(&cache->base);
|
||||
vk_free2(&device->alloc, pAllocator, cache);
|
||||
}
|
||||
|
||||
VkResult val_GetPipelineCacheData(
|
||||
VkResult lvp_GetPipelineCacheData(
|
||||
VkDevice _device,
|
||||
VkPipelineCache _cache,
|
||||
size_t* pDataSize,
|
||||
|
|
@ -86,14 +86,14 @@ VkResult val_GetPipelineCacheData(
|
|||
hdr[1] = 1;
|
||||
hdr[2] = VK_VENDOR_ID_MESA;
|
||||
hdr[3] = 0;
|
||||
val_device_get_cache_uuid(&hdr[4]);
|
||||
lvp_device_get_cache_uuid(&hdr[4]);
|
||||
}
|
||||
} else
|
||||
*pDataSize = 32;
|
||||
return result;
|
||||
}
|
||||
|
||||
VkResult val_MergePipelineCaches(
|
||||
VkResult lvp_MergePipelineCaches(
|
||||
VkDevice _device,
|
||||
VkPipelineCache destCache,
|
||||
uint32_t srcCacheCount,
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -21,16 +21,16 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
#include "pipe/p_context.h"
|
||||
|
||||
VkResult val_CreateQueryPool(
|
||||
VkResult lvp_CreateQueryPool(
|
||||
VkDevice _device,
|
||||
const VkQueryPoolCreateInfo* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkQueryPool* pQueryPool)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
|
||||
enum pipe_query_type pipeq;
|
||||
switch (pCreateInfo->queryType) {
|
||||
|
|
@ -43,7 +43,7 @@ VkResult val_CreateQueryPool(
|
|||
default:
|
||||
return VK_ERROR_FEATURE_NOT_PRESENT;
|
||||
}
|
||||
struct val_query_pool *pool;
|
||||
struct lvp_query_pool *pool;
|
||||
uint32_t pool_size = sizeof(*pool) + pCreateInfo->queryCount * sizeof(struct pipe_query *);
|
||||
|
||||
pool = vk_zalloc2(&device->alloc, pAllocator,
|
||||
|
|
@ -58,17 +58,17 @@ VkResult val_CreateQueryPool(
|
|||
pool->count = pCreateInfo->queryCount;
|
||||
pool->base_type = pipeq;
|
||||
|
||||
*pQueryPool = val_query_pool_to_handle(pool);
|
||||
*pQueryPool = lvp_query_pool_to_handle(pool);
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void val_DestroyQueryPool(
|
||||
void lvp_DestroyQueryPool(
|
||||
VkDevice _device,
|
||||
VkQueryPool _pool,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_query_pool, pool, _pool);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_query_pool, pool, _pool);
|
||||
|
||||
if (!pool)
|
||||
return;
|
||||
|
|
@ -80,7 +80,7 @@ void val_DestroyQueryPool(
|
|||
vk_free2(&device->alloc, pAllocator, pool);
|
||||
}
|
||||
|
||||
VkResult val_GetQueryPoolResults(
|
||||
VkResult lvp_GetQueryPoolResults(
|
||||
VkDevice _device,
|
||||
VkQueryPool queryPool,
|
||||
uint32_t firstQuery,
|
||||
|
|
@ -90,11 +90,11 @@ VkResult val_GetQueryPoolResults(
|
|||
VkDeviceSize stride,
|
||||
VkQueryResultFlags flags)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
VAL_FROM_HANDLE(val_query_pool, pool, queryPool);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_query_pool, pool, queryPool);
|
||||
VkResult vk_result = VK_SUCCESS;
|
||||
|
||||
val_DeviceWaitIdle(_device);
|
||||
lvp_DeviceWaitIdle(_device);
|
||||
|
||||
for (unsigned i = firstQuery; i < firstQuery + queryCount; i++) {
|
||||
uint8_t *dptr = (uint8_t *)((char *)pData + (stride * (i - firstQuery)));
|
||||
|
|
@ -21,10 +21,10 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
#include "vk_enum_to_str.h"
|
||||
void val_printflike(3, 4)
|
||||
__val_finishme(const char *file, int line, const char *format, ...)
|
||||
void lvp_printflike(3, 4)
|
||||
__lvp_finishme(const char *file, int line, const char *format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
char buffer[256];
|
||||
|
|
@ -37,7 +37,7 @@ __val_finishme(const char *file, int line, const char *format, ...)
|
|||
}
|
||||
|
||||
VkResult
|
||||
__vk_errorf(struct val_instance *instance, VkResult error, const char *file, int line, const char *format, ...)
|
||||
__vk_errorf(struct lvp_instance *instance, VkResult error, const char *file, int line, const char *format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
char buffer[256];
|
||||
|
|
@ -21,49 +21,49 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "val_wsi.h"
|
||||
#include "lvp_wsi.h"
|
||||
|
||||
static PFN_vkVoidFunction
|
||||
val_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
|
||||
lvp_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
|
||||
{
|
||||
return val_lookup_entrypoint(pName);
|
||||
return lvp_lookup_entrypoint(pName);
|
||||
}
|
||||
|
||||
VkResult
|
||||
val_init_wsi(struct val_physical_device *physical_device)
|
||||
lvp_init_wsi(struct lvp_physical_device *physical_device)
|
||||
{
|
||||
return wsi_device_init(&physical_device->wsi_device,
|
||||
val_physical_device_to_handle(physical_device),
|
||||
val_wsi_proc_addr,
|
||||
lvp_physical_device_to_handle(physical_device),
|
||||
lvp_wsi_proc_addr,
|
||||
&physical_device->instance->alloc,
|
||||
-1, NULL, true);
|
||||
}
|
||||
|
||||
void
|
||||
val_finish_wsi(struct val_physical_device *physical_device)
|
||||
lvp_finish_wsi(struct lvp_physical_device *physical_device)
|
||||
{
|
||||
wsi_device_finish(&physical_device->wsi_device,
|
||||
&physical_device->instance->alloc);
|
||||
}
|
||||
|
||||
void val_DestroySurfaceKHR(
|
||||
void lvp_DestroySurfaceKHR(
|
||||
VkInstance _instance,
|
||||
VkSurfaceKHR _surface,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_instance, instance, _instance);
|
||||
LVP_FROM_HANDLE(lvp_instance, instance, _instance);
|
||||
ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
|
||||
|
||||
vk_free2(&instance->alloc, pAllocator, surface);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceSurfaceSupportKHR(
|
||||
VkResult lvp_GetPhysicalDeviceSurfaceSupportKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
uint32_t queueFamilyIndex,
|
||||
VkSurfaceKHR surface,
|
||||
VkBool32* pSupported)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_common_get_surface_support(&device->wsi_device,
|
||||
queueFamilyIndex,
|
||||
|
|
@ -71,62 +71,62 @@ VkResult val_GetPhysicalDeviceSurfaceSupportKHR(
|
|||
pSupported);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceSurfaceCapabilitiesKHR(
|
||||
VkResult lvp_GetPhysicalDeviceSurfaceCapabilitiesKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkSurfaceKHR surface,
|
||||
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_common_get_surface_capabilities(&device->wsi_device,
|
||||
surface,
|
||||
pSurfaceCapabilities);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceSurfaceCapabilities2KHR(
|
||||
VkResult lvp_GetPhysicalDeviceSurfaceCapabilities2KHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
|
||||
VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_common_get_surface_capabilities2(&device->wsi_device,
|
||||
pSurfaceInfo,
|
||||
pSurfaceCapabilities);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceSurfaceCapabilities2EXT(
|
||||
VkResult lvp_GetPhysicalDeviceSurfaceCapabilities2EXT(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkSurfaceKHR surface,
|
||||
VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_common_get_surface_capabilities2ext(&device->wsi_device,
|
||||
surface,
|
||||
pSurfaceCapabilities);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceSurfaceFormatsKHR(
|
||||
VkResult lvp_GetPhysicalDeviceSurfaceFormatsKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkSurfaceKHR surface,
|
||||
uint32_t* pSurfaceFormatCount,
|
||||
VkSurfaceFormatKHR* pSurfaceFormats)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
return wsi_common_get_surface_formats(&device->wsi_device,
|
||||
surface,
|
||||
pSurfaceFormatCount,
|
||||
pSurfaceFormats);
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDeviceSurfacePresentModesKHR(
|
||||
VkResult lvp_GetPhysicalDeviceSurfacePresentModesKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkSurfaceKHR surface,
|
||||
uint32_t* pPresentModeCount,
|
||||
VkPresentModeKHR* pPresentModes)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_common_get_surface_present_modes(&device->wsi_device,
|
||||
surface,
|
||||
|
|
@ -134,13 +134,13 @@ VkResult val_GetPhysicalDeviceSurfacePresentModesKHR(
|
|||
pPresentModes);
|
||||
}
|
||||
|
||||
VkResult val_CreateSwapchainKHR(
|
||||
VkResult lvp_CreateSwapchainKHR(
|
||||
VkDevice _device,
|
||||
const VkSwapchainCreateInfoKHR* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkSwapchainKHR* pSwapchain)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
const VkAllocationCallbacks *alloc;
|
||||
if (pAllocator)
|
||||
alloc = pAllocator;
|
||||
|
|
@ -148,18 +148,18 @@ VkResult val_CreateSwapchainKHR(
|
|||
alloc = &device->alloc;
|
||||
|
||||
return wsi_common_create_swapchain(&device->physical_device->wsi_device,
|
||||
val_device_to_handle(device),
|
||||
lvp_device_to_handle(device),
|
||||
pCreateInfo,
|
||||
alloc,
|
||||
pSwapchain);
|
||||
}
|
||||
|
||||
void val_DestroySwapchainKHR(
|
||||
void lvp_DestroySwapchainKHR(
|
||||
VkDevice _device,
|
||||
VkSwapchainKHR swapchain,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
const VkAllocationCallbacks *alloc;
|
||||
|
||||
if (pAllocator)
|
||||
|
|
@ -170,7 +170,7 @@ void val_DestroySwapchainKHR(
|
|||
wsi_common_destroy_swapchain(_device, swapchain, alloc);
|
||||
}
|
||||
|
||||
VkResult val_GetSwapchainImagesKHR(
|
||||
VkResult lvp_GetSwapchainImagesKHR(
|
||||
VkDevice device,
|
||||
VkSwapchainKHR swapchain,
|
||||
uint32_t* pSwapchainImageCount,
|
||||
|
|
@ -181,7 +181,7 @@ VkResult val_GetSwapchainImagesKHR(
|
|||
pSwapchainImages);
|
||||
}
|
||||
|
||||
VkResult val_AcquireNextImageKHR(
|
||||
VkResult lvp_AcquireNextImageKHR(
|
||||
VkDevice device,
|
||||
VkSwapchainKHR swapchain,
|
||||
uint64_t timeout,
|
||||
|
|
@ -198,23 +198,23 @@ VkResult val_AcquireNextImageKHR(
|
|||
.deviceMask = 0,
|
||||
};
|
||||
|
||||
return val_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
|
||||
return lvp_AcquireNextImage2KHR(device, &acquire_info, pImageIndex);
|
||||
}
|
||||
|
||||
VkResult val_AcquireNextImage2KHR(
|
||||
VkResult lvp_AcquireNextImage2KHR(
|
||||
VkDevice _device,
|
||||
const VkAcquireNextImageInfoKHR* pAcquireInfo,
|
||||
uint32_t* pImageIndex)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_device, device, _device);
|
||||
struct val_physical_device *pdevice = device->physical_device;
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_physical_device *pdevice = device->physical_device;
|
||||
|
||||
VkResult result = wsi_common_acquire_next_image2(&pdevice->wsi_device,
|
||||
_device,
|
||||
pAcquireInfo,
|
||||
pImageIndex);
|
||||
#if 0
|
||||
VAL_FROM_HANDLE(val_fence, fence, pAcquireInfo->fence);
|
||||
LVP_FROM_HANDLE(lvp_fence, fence, pAcquireInfo->fence);
|
||||
|
||||
if (fence && (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR)) {
|
||||
if (fence->fence)
|
||||
|
|
@ -229,19 +229,19 @@ VkResult val_AcquireNextImage2KHR(
|
|||
return result;
|
||||
}
|
||||
|
||||
VkResult val_QueuePresentKHR(
|
||||
VkResult lvp_QueuePresentKHR(
|
||||
VkQueue _queue,
|
||||
const VkPresentInfoKHR* pPresentInfo)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_queue, queue, _queue);
|
||||
LVP_FROM_HANDLE(lvp_queue, queue, _queue);
|
||||
return wsi_common_queue_present(&queue->device->physical_device->wsi_device,
|
||||
val_device_to_handle(queue->device),
|
||||
lvp_device_to_handle(queue->device),
|
||||
_queue, 0,
|
||||
pPresentInfo);
|
||||
}
|
||||
|
||||
|
||||
VkResult val_GetDeviceGroupPresentCapabilitiesKHR(
|
||||
VkResult lvp_GetDeviceGroupPresentCapabilitiesKHR(
|
||||
VkDevice device,
|
||||
VkDeviceGroupPresentCapabilitiesKHR* pCapabilities)
|
||||
{
|
||||
|
|
@ -253,7 +253,7 @@ VkResult val_GetDeviceGroupPresentCapabilitiesKHR(
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult val_GetDeviceGroupSurfacePresentModesKHR(
|
||||
VkResult lvp_GetDeviceGroupSurfacePresentModesKHR(
|
||||
VkDevice device,
|
||||
VkSurfaceKHR surface,
|
||||
VkDeviceGroupPresentModeFlagsKHR* pModes)
|
||||
|
|
@ -263,13 +263,13 @@ VkResult val_GetDeviceGroupSurfacePresentModesKHR(
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult val_GetPhysicalDevicePresentRectanglesKHR(
|
||||
VkResult lvp_GetPhysicalDevicePresentRectanglesKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkSurfaceKHR surface,
|
||||
uint32_t* pRectCount,
|
||||
VkRect2D* pRects)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_common_get_present_rectangles(&device->wsi_device,
|
||||
surface,
|
||||
|
|
@ -23,52 +23,52 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
struct val_swapchain;
|
||||
struct lvp_swapchain;
|
||||
|
||||
struct val_wsi_interface {
|
||||
struct lvp_wsi_interface {
|
||||
VkResult (*get_support)(VkIcdSurfaceBase *surface,
|
||||
struct val_physical_device *device,
|
||||
struct lvp_physical_device *device,
|
||||
uint32_t queueFamilyIndex,
|
||||
VkBool32* pSupported);
|
||||
VkResult (*get_capabilities)(VkIcdSurfaceBase *surface,
|
||||
struct val_physical_device *device,
|
||||
struct lvp_physical_device *device,
|
||||
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
|
||||
VkResult (*get_formats)(VkIcdSurfaceBase *surface,
|
||||
struct val_physical_device *device,
|
||||
struct lvp_physical_device *device,
|
||||
uint32_t* pSurfaceFormatCount,
|
||||
VkSurfaceFormatKHR* pSurfaceFormats);
|
||||
VkResult (*get_present_modes)(VkIcdSurfaceBase *surface,
|
||||
struct val_physical_device *device,
|
||||
struct lvp_physical_device *device,
|
||||
uint32_t* pPresentModeCount,
|
||||
VkPresentModeKHR* pPresentModes);
|
||||
VkResult (*create_swapchain)(VkIcdSurfaceBase *surface,
|
||||
struct val_device *device,
|
||||
struct lvp_device *device,
|
||||
const VkSwapchainCreateInfoKHR* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
struct val_swapchain **swapchain);
|
||||
struct lvp_swapchain **swapchain);
|
||||
};
|
||||
|
||||
struct val_swapchain {
|
||||
struct val_device *device;
|
||||
struct lvp_swapchain {
|
||||
struct lvp_device *device;
|
||||
|
||||
VkResult (*destroy)(struct val_swapchain *swapchain,
|
||||
VkResult (*destroy)(struct lvp_swapchain *swapchain,
|
||||
const VkAllocationCallbacks *pAllocator);
|
||||
VkResult (*get_images)(struct val_swapchain *swapchain,
|
||||
VkResult (*get_images)(struct lvp_swapchain *swapchain,
|
||||
uint32_t *pCount, VkImage *pSwapchainImages);
|
||||
VkResult (*acquire_next_image)(struct val_swapchain *swap_chain,
|
||||
VkResult (*acquire_next_image)(struct lvp_swapchain *swap_chain,
|
||||
uint64_t timeout, VkSemaphore semaphore,
|
||||
uint32_t *image_index);
|
||||
VkResult (*queue_present)(struct val_swapchain *swap_chain,
|
||||
struct val_queue *queue,
|
||||
VkResult (*queue_present)(struct lvp_swapchain *swap_chain,
|
||||
struct lvp_queue *queue,
|
||||
uint32_t image_index);
|
||||
};
|
||||
|
||||
VAL_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
|
||||
VAL_DEFINE_NONDISP_HANDLE_CASTS(val_swapchain, VkSwapchainKHR)
|
||||
LVP_DEFINE_NONDISP_HANDLE_CASTS(_VkIcdSurfaceBase, VkSurfaceKHR)
|
||||
LVP_DEFINE_NONDISP_HANDLE_CASTS(lvp_swapchain, VkSwapchainKHR)
|
||||
|
||||
VkResult val_x11_init_wsi(struct val_instance *instance);
|
||||
void val_x11_finish_wsi(struct val_instance *instance);
|
||||
VkResult val_wl_init_wsi(struct val_instance *instance);
|
||||
void val_wl_finish_wsi(struct val_instance *instance);
|
||||
VkResult lvp_x11_init_wsi(struct lvp_instance *instance);
|
||||
void lvp_x11_finish_wsi(struct lvp_instance *instance);
|
||||
VkResult lvp_wl_init_wsi(struct lvp_instance *instance);
|
||||
void lvp_wl_finish_wsi(struct lvp_instance *instance);
|
||||
|
|
@ -24,25 +24,25 @@
|
|||
*/
|
||||
|
||||
#include "wsi_common_wayland.h"
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
VkBool32 val_GetPhysicalDeviceWaylandPresentationSupportKHR(
|
||||
VkBool32 lvp_GetPhysicalDeviceWaylandPresentationSupportKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
uint32_t queueFamilyIndex,
|
||||
struct wl_display* display)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, physical_device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, physical_device, physicalDevice);
|
||||
|
||||
return wsi_wl_get_presentation_support(&physical_device->wsi_device, display);
|
||||
}
|
||||
|
||||
VkResult val_CreateWaylandSurfaceKHR(
|
||||
VkResult lvp_CreateWaylandSurfaceKHR(
|
||||
VkInstance _instance,
|
||||
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkSurfaceKHR* pSurface)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_instance, instance, _instance);
|
||||
LVP_FROM_HANDLE(lvp_instance, instance, _instance);
|
||||
const VkAllocationCallbacks *alloc;
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
|
||||
|
||||
|
|
@ -25,15 +25,15 @@
|
|||
#include <xcb/xcb.h>
|
||||
|
||||
#include "wsi_common_x11.h"
|
||||
#include "val_private.h"
|
||||
#include "lvp_private.h"
|
||||
|
||||
VkBool32 val_GetPhysicalDeviceXcbPresentationSupportKHR(
|
||||
VkBool32 lvp_GetPhysicalDeviceXcbPresentationSupportKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
uint32_t queueFamilyIndex,
|
||||
xcb_connection_t* connection,
|
||||
xcb_visualid_t visual_id)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_get_physical_device_xcb_presentation_support(
|
||||
&device->wsi_device,
|
||||
|
|
@ -41,13 +41,13 @@ VkBool32 val_GetPhysicalDeviceXcbPresentationSupportKHR(
|
|||
connection, visual_id);
|
||||
}
|
||||
|
||||
VkBool32 val_GetPhysicalDeviceXlibPresentationSupportKHR(
|
||||
VkBool32 lvp_GetPhysicalDeviceXlibPresentationSupportKHR(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
uint32_t queueFamilyIndex,
|
||||
Display* dpy,
|
||||
VisualID visualID)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_physical_device, device, physicalDevice);
|
||||
LVP_FROM_HANDLE(lvp_physical_device, device, physicalDevice);
|
||||
|
||||
return wsi_get_physical_device_xcb_presentation_support(
|
||||
&device->wsi_device,
|
||||
|
|
@ -55,13 +55,13 @@ VkBool32 val_GetPhysicalDeviceXlibPresentationSupportKHR(
|
|||
XGetXCBConnection(dpy), visualID);
|
||||
}
|
||||
|
||||
VkResult val_CreateXcbSurfaceKHR(
|
||||
VkResult lvp_CreateXcbSurfaceKHR(
|
||||
VkInstance _instance,
|
||||
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkSurfaceKHR* pSurface)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_instance, instance, _instance);
|
||||
LVP_FROM_HANDLE(lvp_instance, instance, _instance);
|
||||
const VkAllocationCallbacks *alloc;
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
|
||||
|
||||
|
|
@ -73,13 +73,13 @@ VkResult val_CreateXcbSurfaceKHR(
|
|||
return wsi_create_xcb_surface(alloc, pCreateInfo, pSurface);
|
||||
}
|
||||
|
||||
VkResult val_CreateXlibSurfaceKHR(
|
||||
VkResult lvp_CreateXlibSurfaceKHR(
|
||||
VkInstance _instance,
|
||||
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
VkSurfaceKHR* pSurface)
|
||||
{
|
||||
VAL_FROM_HANDLE(val_instance, instance, _instance);
|
||||
LVP_FROM_HANDLE(lvp_instance, instance, _instance);
|
||||
const VkAllocationCallbacks *alloc;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
|
||||
66
src/gallium/frontends/lavapipe/meson.build
Normal file
66
src/gallium/frontends/lavapipe/meson.build
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
|
||||
lvp_entrypoints = custom_target(
|
||||
'lvp_entrypoints.[ch]',
|
||||
input : ['lvp_entrypoints_gen.py', vk_api_xml],
|
||||
output : ['lvp_entrypoints.h', 'lvp_entrypoints.c'],
|
||||
command : [
|
||||
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
|
||||
meson.current_build_dir()
|
||||
],
|
||||
depend_files : files('lvp_extensions.py'),
|
||||
)
|
||||
|
||||
lvp_extensions_c = custom_target(
|
||||
'lvp_extensions.c',
|
||||
input : ['lvp_extensions.py', vk_api_xml],
|
||||
output : ['lvp_extensions.c', 'lvp_extensions.h'],
|
||||
command : [
|
||||
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
|
||||
'--out-h', '@OUTPUT1@'
|
||||
],
|
||||
)
|
||||
|
||||
liblvp_files = files(
|
||||
'lvp_device.c',
|
||||
'lvp_cmd_buffer.c',
|
||||
'lvp_descriptor_set.c',
|
||||
'lvp_execute.c',
|
||||
'lvp_util.c',
|
||||
'lvp_image.c',
|
||||
'lvp_formats.c',
|
||||
'lvp_lower_vulkan_resource.c',
|
||||
'lvp_lower_vulkan_resource.h',
|
||||
'lvp_lower_input_attachments.c',
|
||||
'lvp_pass.c',
|
||||
'lvp_pipeline.c',
|
||||
'lvp_pipeline_cache.c',
|
||||
'lvp_query.c',
|
||||
'lvp_wsi.c')
|
||||
|
||||
lvp_deps = []
|
||||
lvp_flags = []
|
||||
|
||||
if with_platform_x11
|
||||
lvp_deps += dep_xcb_dri3
|
||||
lvp_flags += [
|
||||
'-DVK_USE_PLATFORM_XCB_KHR',
|
||||
'-DVK_USE_PLATFORM_XLIB_KHR',
|
||||
]
|
||||
liblvp_files += files('lvp_wsi_x11.c')
|
||||
endif
|
||||
|
||||
if with_platform_wayland
|
||||
lvp_deps += dep_wayland_client
|
||||
lvp_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
|
||||
liblvp_files += files('lvp_wsi_wayland.c')
|
||||
endif
|
||||
|
||||
liblavapipe_st = static_library(
|
||||
'lavapipe_st',
|
||||
[liblvp_files, lvp_entrypoints, lvp_extensions_c ],
|
||||
link_with : [ libvulkan_wsi ],
|
||||
c_args : [ lvp_flags ],
|
||||
gnu_symbol_visibility : 'hidden',
|
||||
include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
|
||||
dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
|
||||
)
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
|
||||
val_entrypoints = custom_target(
|
||||
'val_entrypoints.[ch]',
|
||||
input : ['val_entrypoints_gen.py', vk_api_xml],
|
||||
output : ['val_entrypoints.h', 'val_entrypoints.c'],
|
||||
command : [
|
||||
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
|
||||
meson.current_build_dir()
|
||||
],
|
||||
depend_files : files('val_extensions.py'),
|
||||
)
|
||||
|
||||
val_extensions_c = custom_target(
|
||||
'val_extensions.c',
|
||||
input : ['val_extensions.py', vk_api_xml],
|
||||
output : ['val_extensions.c', 'val_extensions.h'],
|
||||
command : [
|
||||
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
|
||||
'--out-h', '@OUTPUT1@'
|
||||
],
|
||||
)
|
||||
|
||||
libval_files = files(
|
||||
'val_device.c',
|
||||
'val_cmd_buffer.c',
|
||||
'val_descriptor_set.c',
|
||||
'val_execute.c',
|
||||
'val_util.c',
|
||||
'val_image.c',
|
||||
'val_formats.c',
|
||||
'val_lower_vulkan_resource.c',
|
||||
'val_lower_vulkan_resource.h',
|
||||
'val_lower_input_attachments.c',
|
||||
'val_pass.c',
|
||||
'val_pipeline.c',
|
||||
'val_pipeline_cache.c',
|
||||
'val_query.c',
|
||||
'val_wsi.c')
|
||||
|
||||
val_deps = []
|
||||
val_flags = []
|
||||
|
||||
if with_platform_x11
|
||||
val_deps += dep_xcb_dri3
|
||||
val_flags += [
|
||||
'-DVK_USE_PLATFORM_XCB_KHR',
|
||||
'-DVK_USE_PLATFORM_XLIB_KHR',
|
||||
]
|
||||
libval_files += files('val_wsi_x11.c')
|
||||
endif
|
||||
|
||||
if with_platform_wayland
|
||||
val_deps += dep_wayland_client
|
||||
val_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
|
||||
libval_files += files('val_wsi_wayland.c')
|
||||
endif
|
||||
|
||||
libvallium_st = static_library(
|
||||
'vallium_st',
|
||||
[libval_files, val_entrypoints, val_extensions_c ],
|
||||
link_with : [ libvulkan_wsi ],
|
||||
c_args : [ val_flags ],
|
||||
gnu_symbol_visibility : 'hidden',
|
||||
include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
|
||||
dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
|
||||
)
|
||||
|
|
@ -226,6 +226,6 @@ if with_tests
|
|||
subdir('tests')
|
||||
endif
|
||||
if with_swrast_vk
|
||||
subdir('frontends/vallium')
|
||||
subdir('targets/vallium')
|
||||
subdir('frontends/lavapipe')
|
||||
subdir('targets/lavapipe')
|
||||
endif
|
||||
|
|
|
|||
|
|
@ -28,10 +28,10 @@ import argparse
|
|||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--out', help='Output json file.', required=True)
|
||||
parser.add_argument('--lib-path', help='Path to libvulkan_val.so')
|
||||
parser.add_argument('--lib-path', help='Path to libvulkan_lvp.so')
|
||||
args = parser.parse_args()
|
||||
|
||||
path = 'libvulkan_val.so'
|
||||
path = 'libvulkan_lvp.so'
|
||||
if args.lib_path:
|
||||
path = os.path.join(args.lib_path, path)
|
||||
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
libvulkan_val = shared_library(
|
||||
'vulkan_val',
|
||||
libvulkan_lvp = shared_library(
|
||||
'vulkan_lvp',
|
||||
[ 'target.c' ],
|
||||
include_directories : [ inc_src, inc_util, inc_include, inc_gallium, inc_gallium_aux, inc_gallium_winsys, inc_gallium_drivers ],
|
||||
link_whole : [ libvallium_st ],
|
||||
link_whole : [ liblavapipe_st ],
|
||||
link_with : [libpipe_loader_static, libmegadriver_stub, libdri, libdricommon ,libgallium, libwsw, libswdri, libws_null, libswkmsdri ],
|
||||
gnu_symbol_visibility : 'hidden',
|
||||
link_args : [ld_args_bsymbolic, ld_args_gc_sections],
|
||||
|
|
@ -11,16 +11,16 @@ libvulkan_val = shared_library(
|
|||
name_suffix : 'so',
|
||||
)
|
||||
|
||||
val_icd = custom_target(
|
||||
'val_icd',
|
||||
input : 'val_icd.py',
|
||||
output : 'val_icd.@0@.json'.format(host_machine.cpu()),
|
||||
lvp_icd = custom_target(
|
||||
'lvp_icd',
|
||||
input : 'lvp_icd.py',
|
||||
output : 'lvp_icd.@0@.json'.format(host_machine.cpu()),
|
||||
command : [
|
||||
prog_python, '@INPUT@',
|
||||
'--lib-path', join_paths(get_option('prefix'), get_option('libdir')),
|
||||
'--out', '@OUTPUT@',
|
||||
],
|
||||
depend_files : files('../../frontends/vallium/val_extensions.py'),
|
||||
depend_files : files('../../frontends/lavapipe/lvp_extensions.py'),
|
||||
build_by_default : true,
|
||||
install_dir : with_vulkan_icd_dir,
|
||||
install : true,
|
||||
Loading…
Add table
Reference in a new issue