lavapipe: unsupport NV_device_generated_commands

this is no longer useful

Konstantin Seurer <konstantin.seurer@gmail.com>

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32931>
This commit is contained in:
Mike Blumenkrantz 2024-10-25 13:14:09 -04:00 committed by Marge Bot
parent 3f90303eeb
commit f6ff8bdf74
4 changed files with 4 additions and 404 deletions

View file

@ -275,7 +275,6 @@ static const struct vk_device_extension_table lvp_device_extensions_supported =
#endif
.GOOGLE_decorate_string = true,
.GOOGLE_hlsl_functionality1 = true,
.NV_device_generated_commands = true,
};
static bool
@ -649,9 +648,6 @@ lvp_get_features(const struct lvp_physical_device *pdevice,
.robustImageAccess2 = true,
.nullDescriptor = true,
/* VK_NV_device_generated_commands */
.deviceGeneratedCommandsNV = true,
/* VK_EXT_device_generated_commands */
.deviceGeneratedCommands = true,
.dynamicGeneratedPipelineLayout = true,
@ -1056,20 +1052,12 @@ lvp_get_properties(const struct lvp_physical_device *device, struct vk_propertie
/* VK_EXT_extended_dynamic_state3 */
.dynamicPrimitiveTopologyUnrestricted = VK_TRUE,
/* VK_NV_device_generated_commands */
.maxGraphicsShaderGroupCount = 1<<12,
.maxIndirectSequenceCount = 1<<20,
.maxIndirectCommandsTokenCount = MAX_DGC_TOKENS,
.maxIndirectCommandsStreamCount = MAX_DGC_STREAMS,
.maxIndirectCommandsTokenOffset = 2047,
.maxIndirectCommandsStreamStride = 2048,
.minSequencesCountBufferOffsetAlignment = 4,
.minSequencesIndexBufferOffsetAlignment = 4,
.minIndirectCommandsBufferOffsetAlignment = 4,
/* VK_EXT_device_generated_commands */
.maxIndirectPipelineCount = 1<<12,
.maxIndirectShaderObjectCount = 1<<12,
.maxIndirectSequenceCount = 1<<20,
.maxIndirectCommandsTokenCount = MAX_DGC_TOKENS,
.maxIndirectCommandsTokenOffset = 2047,
.maxIndirectCommandsIndirectStride = 2048,
.supportedIndirectCommandsInputModes = VK_INDIRECT_COMMANDS_INPUT_MODE_VULKAN_INDEX_BUFFER_EXT | VK_INDIRECT_COMMANDS_INPUT_MODE_DXGI_INDEX_BUFFER_EXT,
.supportedIndirectCommandsShaderStages = VK_SHADER_STAGE_ALL,

View file

@ -23,128 +23,6 @@
#include "lvp_private.h"
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateIndirectCommandsLayoutNV(
VkDevice _device,
const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkIndirectCommandsLayoutNV* pIndirectCommandsLayout)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
struct lvp_indirect_command_layout_nv *dlayout;
size_t size = sizeof(*dlayout) + pCreateInfo->tokenCount * sizeof(VkIndirectCommandsLayoutTokenNV);
dlayout =
vk_zalloc2(&device->vk.alloc, pAllocator, size, alignof(struct lvp_indirect_command_layout_nv),
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!dlayout)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &dlayout->base, VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV);
dlayout->stream_count = pCreateInfo->streamCount;
dlayout->token_count = pCreateInfo->tokenCount;
for (unsigned i = 0; i < pCreateInfo->streamCount; i++)
dlayout->stream_strides[i] = pCreateInfo->pStreamStrides[i];
typed_memcpy(dlayout->tokens, pCreateInfo->pTokens, pCreateInfo->tokenCount);
*pIndirectCommandsLayout = lvp_indirect_command_layout_nv_to_handle(dlayout);
return VK_SUCCESS;
}
VKAPI_ATTR void VKAPI_CALL lvp_DestroyIndirectCommandsLayoutNV(
VkDevice _device,
VkIndirectCommandsLayoutNV indirectCommandsLayout,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
VK_FROM_HANDLE(lvp_indirect_command_layout_nv, layout, indirectCommandsLayout);
if (!layout)
return;
vk_object_base_finish(&layout->base);
vk_free2(&device->vk.alloc, pAllocator, layout);
}
enum vk_cmd_type
lvp_nv_dgc_token_to_cmd_type(const VkIndirectCommandsLayoutTokenNV *token)
{
switch (token->tokenType) {
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV:
return VK_CMD_BIND_PIPELINE_SHADER_GROUP_NV;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV:
if (token->indirectStateFlags & VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV) {
return VK_CMD_SET_FRONT_FACE;
}
assert(!"unknown token type!");
break;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV:
return VK_CMD_PUSH_CONSTANTS2;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV:
return VK_CMD_BIND_INDEX_BUFFER;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV:
return VK_CMD_BIND_VERTEX_BUFFERS2;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV:
return VK_CMD_DRAW_INDEXED_INDIRECT;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV:
return VK_CMD_DRAW_INDIRECT;
// only available if VK_EXT_mesh_shader is supported
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV:
return VK_CMD_DRAW_MESH_TASKS_INDIRECT_EXT;
// only available if VK_NV_mesh_shader is supported
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV:
unreachable("NV_mesh_shader unsupported!");
default:
unreachable("unknown token type");
}
return UINT32_MAX;
}
VKAPI_ATTR void VKAPI_CALL lvp_GetGeneratedCommandsMemoryRequirementsNV(
VkDevice device,
const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo,
VkMemoryRequirements2* pMemoryRequirements)
{
VK_FROM_HANDLE(lvp_indirect_command_layout_nv, dlayout, pInfo->indirectCommandsLayout);
size_t size = sizeof(struct list_head);
for (unsigned i = 0; i < dlayout->token_count; i++) {
const VkIndirectCommandsLayoutTokenNV *token = &dlayout->tokens[i];
UNUSED struct vk_cmd_queue_entry *cmd;
enum vk_cmd_type type = lvp_nv_dgc_token_to_cmd_type(token);
size += vk_cmd_queue_type_sizes[type];
switch (token->tokenType) {
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV:
size += sizeof(*cmd->u.bind_vertex_buffers.buffers);
size += sizeof(*cmd->u.bind_vertex_buffers.offsets);
size += sizeof(*cmd->u.bind_vertex_buffers2.sizes) + sizeof(*cmd->u.bind_vertex_buffers2.strides);
break;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV:
size += token->pushconstantSize + sizeof(VkPushConstantsInfoKHR);
break;
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV:
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV:
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV:
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV:
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV:
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV:
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV:
break;
default:
unreachable("unknown type!");
}
}
size *= pInfo->maxSequencesCount;
pMemoryRequirements->memoryRequirements.memoryTypeBits = 1;
pMemoryRequirements->memoryRequirements.alignment = 4;
pMemoryRequirements->memoryRequirements.size = align(size, pMemoryRequirements->memoryRequirements.alignment);
}
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateIndirectExecutionSetEXT(
VkDevice _device,
const VkIndirectExecutionSetCreateInfoEXT* pCreateInfo,

View file

@ -1139,17 +1139,6 @@ static void handle_pipeline(struct vk_cmd_queue_entry *cmd,
state->push_size[pipeline->type] = pipeline->layout->push_constant_size;
}
static void
handle_graphics_pipeline_group(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
assert(cmd->u.bind_pipeline_shader_group_nv.pipeline_bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline_shader_group_nv.pipeline);
if (cmd->u.bind_pipeline_shader_group_nv.group_index)
pipeline = lvp_pipeline_from_handle(pipeline->groups[cmd->u.bind_pipeline_shader_group_nv.group_index - 1]);
handle_graphics_pipeline(pipeline, state);
state->push_size[pipeline->type] = pipeline->layout->push_constant_size;
}
static void handle_vertex_buffers2(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
@ -3942,228 +3931,6 @@ get_buffer(struct rendering_state *state, const uint8_t *ptr, size_t *offset)
abort();
}
static size_t
process_sequence(struct rendering_state *state,
VkPipeline pipeline, struct lvp_indirect_command_layout_nv *dlayout,
struct list_head *list, uint8_t *pbuf, size_t max_size,
uint8_t **map_streams, const VkIndirectCommandsStreamNV *pstreams, uint32_t seq, bool print_cmds)
{
size_t size = 0;
for (uint32_t t = 0; t < dlayout->token_count; t++){
const VkIndirectCommandsLayoutTokenNV *token = &dlayout->tokens[t];
uint32_t stride = dlayout->stream_strides[token->stream];
uint8_t *stream = map_streams[token->stream];
uint32_t offset = stride * seq + token->offset;
uint32_t draw_offset = offset + pstreams[token->stream].offset;
void *input = stream + offset;
struct vk_cmd_queue_entry *cmd = (struct vk_cmd_queue_entry*)(pbuf + size);
size_t cmd_size = vk_cmd_queue_type_sizes[lvp_nv_dgc_token_to_cmd_type(token)];
uint8_t *cmdptr = (void*)(pbuf + size + cmd_size);
if (print_cmds)
fprintf(stderr, "DGC %s\n", vk_IndirectCommandsTokenTypeNV_to_str(token->tokenType));
if (max_size < size + cmd_size)
abort();
cmd->type = lvp_nv_dgc_token_to_cmd_type(token);
switch (token->tokenType) {
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV: {
VkBindShaderGroupIndirectCommandNV *bind = input;
cmd->u.bind_pipeline_shader_group_nv.pipeline_bind_point = VK_PIPELINE_BIND_POINT_GRAPHICS;
cmd->u.bind_pipeline_shader_group_nv.pipeline = pipeline;
cmd->u.bind_pipeline_shader_group_nv.group_index = bind->groupIndex;
break;
}
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV: {
VkSetStateFlagsIndirectCommandNV *state = input;
if (token->indirectStateFlags & VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV) {
if (state->data & BITFIELD_BIT(VK_FRONT_FACE_CLOCKWISE)) {
cmd->u.set_front_face.front_face = VK_FRONT_FACE_CLOCKWISE;
} else {
cmd->u.set_front_face.front_face = VK_FRONT_FACE_COUNTER_CLOCKWISE;
}
} else {
/* skip this if unrecognized state flag */
continue;
}
break;
}
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV: {
uint32_t *data = input;
cmd_size += token->pushconstantSize + sizeof(VkPushConstantsInfoKHR);
if (max_size < size + cmd_size)
abort();
cmd->u.push_constants2.push_constants_info = (void*)cmdptr;
VkPushConstantsInfoKHR *pci = cmd->u.push_constants2.push_constants_info;
pci->layout = token->pushconstantPipelineLayout;
pci->stageFlags = token->pushconstantShaderStageFlags;
pci->offset = token->pushconstantOffset;
pci->size = token->pushconstantSize;
pci->pValues = (void*)((uint8_t*)cmdptr + sizeof(VkPushConstantsInfoKHR));
memcpy((void*)pci->pValues, data, token->pushconstantSize);
break;
}
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV: {
VkBindIndexBufferIndirectCommandNV *data = input;
cmd->u.bind_index_buffer.offset = 0;
if (data->bufferAddress)
cmd->u.bind_index_buffer.buffer = get_buffer(state, (void*)(uintptr_t)data->bufferAddress, (size_t*)&cmd->u.bind_index_buffer.offset);
else
cmd->u.bind_index_buffer.buffer = VK_NULL_HANDLE;
cmd->u.bind_index_buffer.index_type = data->indexType;
for (unsigned i = 0; i < token->indexTypeCount; i++) {
if (data->indexType == token->pIndexTypeValues[i]) {
cmd->u.bind_index_buffer.index_type = token->pIndexTypes[i];
break;
}
}
break;
}
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV: {
VkBindVertexBufferIndirectCommandNV *data = input;
cmd_size += sizeof(*cmd->u.bind_vertex_buffers2.buffers) + sizeof(*cmd->u.bind_vertex_buffers2.offsets);
cmd_size += sizeof(*cmd->u.bind_vertex_buffers2.sizes) + sizeof(*cmd->u.bind_vertex_buffers2.strides);
if (max_size < size + cmd_size)
abort();
cmd->u.bind_vertex_buffers2.first_binding = token->vertexBindingUnit;
cmd->u.bind_vertex_buffers2.binding_count = 1;
cmd->u.bind_vertex_buffers2.buffers = (void*)cmdptr;
uint32_t alloc_offset = sizeof(*cmd->u.bind_vertex_buffers2.buffers);
cmd->u.bind_vertex_buffers2.offsets = (void*)(cmdptr + alloc_offset);
alloc_offset += sizeof(*cmd->u.bind_vertex_buffers2.offsets);
cmd->u.bind_vertex_buffers2.sizes = (void*)(cmdptr + alloc_offset);
alloc_offset += sizeof(*cmd->u.bind_vertex_buffers2.sizes);
cmd->u.bind_vertex_buffers2.offsets[0] = 0;
cmd->u.bind_vertex_buffers2.buffers[0] = data->bufferAddress ? get_buffer(state, (void*)(uintptr_t)data->bufferAddress, (size_t*)&cmd->u.bind_vertex_buffers2.offsets[0]) : VK_NULL_HANDLE;
cmd->u.bind_vertex_buffers2.sizes[0] = data->size;
if (token->vertexDynamicStride) {
cmd->u.bind_vertex_buffers2.strides = (void*)(cmdptr + alloc_offset);
cmd->u.bind_vertex_buffers2.strides[0] = data->stride;
} else {
cmd->u.bind_vertex_buffers2.strides = NULL;
}
break;
}
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV: {
cmd->u.draw_indexed_indirect.buffer = pstreams[token->stream].buffer;
cmd->u.draw_indexed_indirect.offset = draw_offset;
cmd->u.draw_indexed_indirect.draw_count = 1;
cmd->u.draw_indexed_indirect.stride = 0;
break;
}
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV: {
cmd->u.draw_indirect.buffer = pstreams[token->stream].buffer;
cmd->u.draw_indirect.offset = draw_offset;
cmd->u.draw_indirect.draw_count = 1;
cmd->u.draw_indirect.stride = 0;
break;
}
// only available if VK_EXT_mesh_shader is supported
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV: {
cmd->u.draw_mesh_tasks_indirect_ext.buffer = pstreams[token->stream].buffer;
cmd->u.draw_mesh_tasks_indirect_ext.offset = draw_offset;
cmd->u.draw_mesh_tasks_indirect_ext.draw_count = 1;
cmd->u.draw_mesh_tasks_indirect_ext.stride = 0;
break;
}
// only available if VK_NV_mesh_shader is supported
case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV:
unreachable("NV_mesh_shader unsupported!");
default:
unreachable("unknown token type");
break;
}
size += cmd_size;
list_addtail(&cmd->cmd_link, list);
}
return size;
}
static void
handle_preprocess_generated_commands(struct vk_cmd_queue_entry *cmd, struct rendering_state *state, bool print_cmds)
{
VkGeneratedCommandsInfoNV *pre = cmd->u.preprocess_generated_commands_nv.generated_commands_info;
VK_FROM_HANDLE(lvp_indirect_command_layout_nv, dlayout, pre->indirectCommandsLayout);
struct pipe_transfer *stream_maps[16];
uint8_t *streams[16];
for (unsigned i = 0; i < pre->streamCount; i++) {
struct lvp_buffer *buf = lvp_buffer_from_handle(pre->pStreams[i].buffer);
streams[i] = pipe_buffer_map(state->pctx, buf->bo, PIPE_MAP_READ, &stream_maps[i]);
streams[i] += pre->pStreams[i].offset;
}
LVP_FROM_HANDLE(lvp_buffer, pbuf, pre->preprocessBuffer);
LVP_FROM_HANDLE(lvp_buffer, seqc, pre->sequencesCountBuffer);
LVP_FROM_HANDLE(lvp_buffer, seqi, pre->sequencesIndexBuffer);
unsigned seq_count = pre->sequencesCount;
if (seqc) {
unsigned count = 0;
pipe_buffer_read(state->pctx, seqc->bo, pre->sequencesCountOffset, sizeof(uint32_t), &count);
seq_count = MIN2(count, seq_count);
}
uint32_t *seq = NULL;
struct pipe_transfer *seq_map = NULL;
if (seqi) {
seq = pipe_buffer_map(state->pctx, seqi->bo, PIPE_MAP_READ, &seq_map);
seq = (uint32_t*)(((uint8_t*)seq) + pre->sequencesIndexOffset);
}
struct pipe_transfer *pmap;
uint8_t *p = pipe_buffer_map(state->pctx, pbuf->bo, PIPE_MAP_WRITE, &pmap);
p += pre->preprocessOffset;
struct list_head *list = (void*)p;
size_t size = sizeof(struct list_head);
size_t max_size = pre->preprocessSize;
if (size > max_size)
abort();
list_inithead(list);
size_t offset = size;
for (unsigned i = 0; i < seq_count; i++) {
uint32_t s = seq ? seq[i] : i;
offset += process_sequence(state, pre->pipeline, dlayout, list, p + offset, max_size, streams, pre->pStreams, s, print_cmds);
}
/* vk_cmd_queue will copy the binary and break the list, so null the tail pointer */
list->prev->next = NULL;
for (unsigned i = 0; i < pre->streamCount; i++)
state->pctx->buffer_unmap(state->pctx, stream_maps[i]);
state->pctx->buffer_unmap(state->pctx, pmap);
if (seq_map)
state->pctx->buffer_unmap(state->pctx, seq_map);
}
static void
handle_execute_generated_commands(struct vk_cmd_queue_entry *cmd, struct rendering_state *state, bool print_cmds)
{
VkGeneratedCommandsInfoNV *gen = cmd->u.execute_generated_commands_nv.generated_commands_info;
struct vk_cmd_execute_generated_commands_nv *exec = &cmd->u.execute_generated_commands_nv;
if (!exec->is_preprocessed) {
struct vk_cmd_queue_entry pre;
pre.u.preprocess_generated_commands_nv.generated_commands_info = exec->generated_commands_info;
handle_preprocess_generated_commands(&pre, state, print_cmds);
}
LVP_FROM_HANDLE(lvp_buffer, pbuf, gen->preprocessBuffer);
struct pipe_transfer *pmap;
uint8_t *p = pipe_buffer_map(state->pctx, pbuf->bo, PIPE_MAP_WRITE, &pmap);
p += gen->preprocessOffset;
struct list_head *list = (void*)p;
lvp_execute_cmd_buffer(list, state, print_cmds);
state->pctx->buffer_unmap(state->pctx, pmap);
}
static size_t
process_sequence_ext(struct rendering_state *state,
struct lvp_indirect_execution_set *iset, struct lvp_indirect_command_layout_ext *elayout,
@ -5414,15 +5181,6 @@ static void lvp_execute_cmd_buffer(struct list_head *cmds,
emit_state(state);
handle_draw_mesh_tasks_indirect_count(cmd, state);
break;
case VK_CMD_BIND_PIPELINE_SHADER_GROUP_NV:
handle_graphics_pipeline_group(cmd, state);
break;
case VK_CMD_PREPROCESS_GENERATED_COMMANDS_NV:
handle_preprocess_generated_commands(cmd, state, print_cmds);
break;
case VK_CMD_EXECUTE_GENERATED_COMMANDS_NV:
handle_execute_generated_commands(cmd, state, print_cmds);
break;
case VK_CMD_PREPROCESS_GENERATED_COMMANDS_EXT:
handle_preprocess_generated_commands_ext(cmd, state, print_cmds);
break;

View file

@ -968,12 +968,7 @@ lvp_graphics_pipeline_create(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
size_t size = 0;
const VkGraphicsPipelineShaderGroupsCreateInfoNV *groupinfo = vk_find_struct_const(pCreateInfo, GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV);
if (!group && groupinfo)
size += (groupinfo->groupCount + groupinfo->pipelineCount) * sizeof(VkPipeline);
pipeline = vk_zalloc(&device->vk.alloc, sizeof(*pipeline) + size, 8,
pipeline = vk_zalloc(&device->vk.alloc, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
@ -986,25 +981,6 @@ lvp_graphics_pipeline_create(
vk_free(&device->vk.alloc, pipeline);
return result;
}
if (!group && groupinfo) {
VkGraphicsPipelineCreateInfo pci = *pCreateInfo;
for (unsigned i = 0; i < groupinfo->groupCount; i++) {
const VkGraphicsShaderGroupCreateInfoNV *g = &groupinfo->pGroups[i];
pci.pVertexInputState = g->pVertexInputState;
pci.pTessellationState = g->pTessellationState;
pci.pStages = g->pStages;
pci.stageCount = g->stageCount;
result = lvp_graphics_pipeline_create(_device, _cache, &pci, flags, &pipeline->groups[i], true);
if (result != VK_SUCCESS) {
lvp_pipeline_destroy(device, pipeline, false);
return result;
}
pipeline->num_groups++;
}
for (unsigned i = 0; i < groupinfo->pipelineCount; i++)
pipeline->groups[pipeline->num_groups + i] = groupinfo->pPipelines[i];
pipeline->num_groups_total = groupinfo->groupCount + groupinfo->pipelineCount;
}
VkPipelineCreationFeedbackCreateInfo *feedback = (void*)vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO);
if (feedback && !group) {