anv: move over to common descriptor set & pipeline layouts

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Ivan Briano <ivan.briano@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36512>
This commit is contained in:
Lionel Landwerlin 2024-08-05 23:20:20 +03:00 committed by Marge Bot
parent cea714329c
commit 5f1cd42456
5 changed files with 159 additions and 210 deletions

View file

@ -145,6 +145,7 @@ astc_emu_init_flush_denorm_shader(nir_builder *b)
static VkResult
astc_emu_init_flush_denorm_pipeline_locked(struct anv_device *device)
{
const struct vk_device_dispatch_table *disp = &device->vk.dispatch_table;
struct anv_device_astc_emu *astc_emu = &device->astc_emu;
VkDevice _device = anv_device_to_handle(device);
VkResult result = VK_SUCCESS;
@ -169,8 +170,8 @@ astc_emu_init_flush_denorm_pipeline_locked(struct anv_device *device)
},
},
};
result = anv_CreateDescriptorSetLayout(_device, &ds_layout_create_info,
NULL, &astc_emu->ds_layout);
result = disp->CreateDescriptorSetLayout(_device, &ds_layout_create_info,
NULL, &astc_emu->ds_layout);
if (result != VK_SUCCESS)
goto out;
}
@ -186,8 +187,8 @@ astc_emu_init_flush_denorm_pipeline_locked(struct anv_device *device)
.size = sizeof(uint32_t) * 4,
},
};
result = anv_CreatePipelineLayout(_device, &pipeline_layout_create_info,
NULL, &astc_emu->pipeline_layout);
result = disp->CreatePipelineLayout(_device, &pipeline_layout_create_info,
NULL, &astc_emu->pipeline_layout);
if (result != VK_SUCCESS)
goto out;
}
@ -210,9 +211,9 @@ astc_emu_init_flush_denorm_pipeline_locked(struct anv_device *device)
},
.layout = astc_emu->pipeline_layout,
};
result = anv_CreateComputePipelines(_device, VK_NULL_HANDLE, 1,
&pipeline_create_info, NULL,
&astc_emu->pipeline);
result = disp->CreateComputePipelines(_device, VK_NULL_HANDLE, 1,
&pipeline_create_info, NULL,
&astc_emu->pipeline);
ralloc_free(b.shader);
if (result != VK_SUCCESS)
@ -498,14 +499,15 @@ anv_device_init_astc_emu(struct anv_device *device)
void
anv_device_finish_astc_emu(struct anv_device *device)
{
const struct vk_device_dispatch_table *disp = &device->vk.dispatch_table;
struct anv_device_astc_emu *astc_emu = &device->astc_emu;
if (device->physical->flush_astc_ldr_void_extent_denorms) {
VkDevice _device = anv_device_to_handle(device);
anv_DestroyPipeline(_device, astc_emu->pipeline, NULL);
anv_DestroyPipelineLayout(_device, astc_emu->pipeline_layout, NULL);
anv_DestroyDescriptorSetLayout(_device, astc_emu->ds_layout, NULL);
disp->DestroyPipeline(_device, astc_emu->pipeline, NULL);
disp->DestroyPipelineLayout(_device, astc_emu->pipeline_layout, NULL);
disp->DestroyDescriptorSetLayout(_device, astc_emu->ds_layout, NULL);
simple_mtx_destroy(&astc_emu->mutex);
}

View file

@ -683,7 +683,7 @@ void anv_CmdBindPipeline(
continue;
assert(layout->set[s].dynamic_offset_start < MAX_DYNAMIC_BUFFERS);
if (layout->set[s].layout->dynamic_offset_count > 0 &&
if (layout->set[s].layout->vk.dynamic_descriptor_count > 0 &&
(push->desc_surface_offsets[s] & ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK) !=
layout->set[s].dynamic_offset_start) {
push->desc_surface_offsets[s] &= ~ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK;
@ -773,7 +773,7 @@ anv_cmd_buffer_maybe_dirty_descriptor_mode(struct anv_cmd_buffer *cmd_buffer,
static void
anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point,
struct anv_pipeline_sets_layout *layout,
struct vk_pipeline_layout *layout,
uint32_t set_index,
struct anv_descriptor_set *set,
uint32_t *dynamic_offset_count,
@ -864,25 +864,25 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
}
if (dynamic_offsets) {
if (set_layout->dynamic_offset_count > 0) {
if (set_layout->vk.dynamic_descriptor_count > 0) {
struct anv_push_constants *push = &pipe_state->push_constants;
assert(layout != NULL);
uint32_t dynamic_offset_start =
layout->set[set_index].dynamic_offset_start;
layout->dynamic_descriptor_offset[set_index];
uint32_t *push_offsets =
&push->dynamic_offsets[dynamic_offset_start];
memcpy(pipe_state->dynamic_offsets[set_index].offsets,
*dynamic_offsets,
sizeof(uint32_t) * MIN2(*dynamic_offset_count,
set_layout->dynamic_offset_count));
set_layout->vk.dynamic_descriptor_count));
/* Assert that everything is in range */
assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
assert(set_layout->vk.dynamic_descriptor_count <= *dynamic_offset_count);
assert(dynamic_offset_start + set_layout->vk.dynamic_descriptor_count <=
ARRAY_SIZE(push->dynamic_offsets));
for (uint32_t i = 0; i < set_layout->dynamic_offset_count; i++) {
for (uint32_t i = 0; i < set_layout->vk.dynamic_descriptor_count; i++) {
if (push_offsets[i] != (*dynamic_offsets)[i]) {
pipe_state->dynamic_offsets[set_index].offsets[i] =
push_offsets[i] = (*dynamic_offsets)[i];
@ -894,8 +894,8 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
}
}
*dynamic_offsets += set_layout->dynamic_offset_count;
*dynamic_offset_count -= set_layout->dynamic_offset_count;
*dynamic_offsets += set_layout->vk.dynamic_descriptor_count;
*dynamic_offset_count -= set_layout->vk.dynamic_descriptor_count;
}
}
@ -917,8 +917,7 @@ void anv_CmdBindDescriptorSets2KHR(
const VkBindDescriptorSetsInfoKHR* pInfo)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pInfo->layout);
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
VK_FROM_HANDLE(vk_pipeline_layout, layout, pInfo->layout);
assert(pInfo->firstSet + pInfo->descriptorSetCount <= MAX_SETS);
@ -1000,7 +999,7 @@ void anv_CmdBindDescriptorBuffersEXT(
static void
anv_cmd_buffer_set_descriptor_buffer_offsets(struct anv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point,
struct anv_pipeline_layout *layout,
struct vk_pipeline_layout *layout,
uint32_t first_set,
uint32_t set_count,
const VkDeviceSize *buffer_offsets,
@ -1010,7 +1009,8 @@ anv_cmd_buffer_set_descriptor_buffer_offsets(struct anv_cmd_buffer *cmd_buffer,
const uint32_t set_index = first_set + i;
const struct anv_descriptor_set_layout *set_layout =
layout->sets_layout.set[set_index].layout;
container_of(layout->set_layouts[set_index],
const struct anv_descriptor_set_layout, vk);
VkShaderStageFlags stages;
struct anv_cmd_pipeline_state *pipe_state =
anv_cmd_buffer_get_pipeline_layout_state(cmd_buffer, bind_point,
@ -1033,7 +1033,7 @@ void anv_CmdSetDescriptorBufferOffsets2EXT(
const VkSetDescriptorBufferOffsetsInfoEXT* pSetDescriptorBufferOffsetsInfo)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, pSetDescriptorBufferOffsetsInfo->layout);
VK_FROM_HANDLE(vk_pipeline_layout, layout, pSetDescriptorBufferOffsetsInfo->layout);
if (pSetDescriptorBufferOffsetsInfo->stageFlags & VK_SHADER_STAGE_COMPUTE_BIT) {
anv_cmd_buffer_set_descriptor_buffer_offsets(cmd_buffer,
@ -1379,12 +1379,13 @@ anv_cmd_buffer_push_descriptor_sets(struct anv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point,
const VkPushDescriptorSetInfoKHR *pInfo)
{
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pInfo->layout);
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
VK_FROM_HANDLE(vk_pipeline_layout, layout, pInfo->layout);
assert(pInfo->set < MAX_SETS);
struct anv_descriptor_set_layout *set_layout = layout->set[pInfo->set].layout;
struct anv_descriptor_set_layout *set_layout =
container_of(layout->set_layouts[pInfo->set],
struct anv_descriptor_set_layout, vk);
struct anv_push_descriptor_set *push_set =
&anv_cmd_buffer_get_pipe_state(cmd_buffer,
bind_point)->push_descriptor;
@ -1427,12 +1428,13 @@ void anv_CmdPushDescriptorSetWithTemplate2KHR(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(vk_descriptor_update_template, template,
pInfo->descriptorUpdateTemplate);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pInfo->layout);
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
VK_FROM_HANDLE(vk_pipeline_layout, layout, pInfo->layout);
assert(pInfo->set < MAX_PUSH_DESCRIPTORS);
struct anv_descriptor_set_layout *set_layout = layout->set[pInfo->set].layout;
struct anv_descriptor_set_layout *set_layout =
container_of(layout->set_layouts[pInfo->set],
struct anv_descriptor_set_layout, vk);
UNUSED VkShaderStageFlags stages;
struct anv_cmd_pipeline_state *pipe_state =
anv_cmd_buffer_get_pipeline_layout_state(cmd_buffer, template->bind_point,

View file

@ -27,7 +27,7 @@
#include <unistd.h>
#include <fcntl.h>
#include "util/mesa-sha1.h"
#include "util/mesa-blake3.h"
#include "vk_util.h"
#include "anv_private.h"
@ -627,6 +627,68 @@ void anv_GetDescriptorSetLayoutSupport(
pSupport->supported = supported;
}
#define BLAKE3_UPDATE_VALUE(ctx, x) _mesa_blake3_update(ctx, &(x), sizeof(x));
static void
blake3_update_descriptor_set_binding_layout(struct mesa_blake3 *ctx,
bool embedded_samplers,
const struct anv_descriptor_set_binding_layout *layout)
{
BLAKE3_UPDATE_VALUE(ctx, layout->flags);
BLAKE3_UPDATE_VALUE(ctx, layout->data);
BLAKE3_UPDATE_VALUE(ctx, layout->max_plane_count);
BLAKE3_UPDATE_VALUE(ctx, layout->array_size);
BLAKE3_UPDATE_VALUE(ctx, layout->descriptor_index);
BLAKE3_UPDATE_VALUE(ctx, layout->dynamic_offset_index);
BLAKE3_UPDATE_VALUE(ctx, layout->buffer_view_index);
BLAKE3_UPDATE_VALUE(ctx, layout->descriptor_surface_offset);
BLAKE3_UPDATE_VALUE(ctx, layout->descriptor_sampler_offset);
if (layout->samplers) {
for (uint16_t i = 0; i < layout->array_size; i++) {
/* For embedded samplers, we need to hash the sampler parameters as
* the sampler handle is baked into the shader and this ultimately is
* part of the shader hash key. We can only consider 2 shaders
* identical if all their embedded samplers parameters are identical.
*/
if (embedded_samplers)
BLAKE3_UPDATE_VALUE(ctx, layout->samplers[i].embedded_key);
/* Hash the conversion if any as this affect shader compilation due
* to NIR lowering.
*/
if (layout->samplers[i].has_ycbcr_conversion)
BLAKE3_UPDATE_VALUE(ctx, layout->samplers[i].ycbcr_conversion_state);
}
}
}
static void
blake3_hash_descriptor_set_layout(struct anv_descriptor_set_layout *layout)
{
struct mesa_blake3 ctx;
_mesa_blake3_init(&ctx);
BLAKE3_UPDATE_VALUE(&ctx, layout->flags);
BLAKE3_UPDATE_VALUE(&ctx, layout->binding_count);
BLAKE3_UPDATE_VALUE(&ctx, layout->descriptor_count);
BLAKE3_UPDATE_VALUE(&ctx, layout->shader_stages);
BLAKE3_UPDATE_VALUE(&ctx, layout->buffer_view_count);
BLAKE3_UPDATE_VALUE(&ctx, layout->vk.dynamic_descriptor_count);
BLAKE3_UPDATE_VALUE(&ctx, layout->descriptor_buffer_surface_size);
BLAKE3_UPDATE_VALUE(&ctx, layout->descriptor_buffer_sampler_size);
bool embedded_samplers =
layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT;
for (uint16_t i = 0; i < layout->binding_count; i++) {
blake3_update_descriptor_set_binding_layout(&ctx, embedded_samplers,
&layout->binding[i]);
}
_mesa_blake3_final(&ctx, layout->vk.blake3);
}
VkResult anv_CreateDescriptorSetLayout(
VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
@ -697,7 +759,7 @@ VkResult anv_CreateDescriptorSetLayout(
}
uint32_t buffer_view_count = 0;
uint32_t dynamic_offset_count = 0;
uint32_t dynamic_descriptor_count = 0;
uint32_t descriptor_buffer_surface_size = 0;
uint32_t descriptor_buffer_sampler_size = 0;
uint32_t sampler_count = 0;
@ -826,10 +888,10 @@ VkResult anv_CreateDescriptorSetLayout(
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
set_layout->dynamic_offset_stages[dynamic_offset_count] = binding->stageFlags;
dynamic_offset_count += binding->descriptorCount;
assert(dynamic_offset_count < MAX_DYNAMIC_BUFFERS);
set_layout->binding[b].dynamic_offset_index = dynamic_descriptor_count;
set_layout->dynamic_offset_stages[dynamic_descriptor_count] = binding->stageFlags;
dynamic_descriptor_count += binding->descriptorCount;
assert(dynamic_descriptor_count < MAX_DYNAMIC_BUFFERS);
break;
default:
@ -897,7 +959,7 @@ VkResult anv_CreateDescriptorSetLayout(
set_layout->type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_DIRECT);
set_layout->buffer_view_count = buffer_view_count;
set_layout->dynamic_offset_count = dynamic_offset_count;
set_layout->vk.dynamic_descriptor_count = dynamic_descriptor_count;
set_layout->descriptor_buffer_surface_size = descriptor_buffer_surface_size;
set_layout->descriptor_buffer_sampler_size = descriptor_buffer_sampler_size;
@ -907,6 +969,8 @@ VkResult anv_CreateDescriptorSetLayout(
set_layout->embedded_sampler_count = sampler_count;
}
blake3_hash_descriptor_set_layout(set_layout);
*pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
return VK_SUCCESS;
@ -1035,64 +1099,6 @@ anv_descriptor_set_layout_print(const struct anv_descriptor_set_layout *layout)
}
}
#define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
static void
sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx,
bool embedded_samplers,
const struct anv_descriptor_set_binding_layout *layout)
{
SHA1_UPDATE_VALUE(ctx, layout->flags);
SHA1_UPDATE_VALUE(ctx, layout->data);
SHA1_UPDATE_VALUE(ctx, layout->max_plane_count);
SHA1_UPDATE_VALUE(ctx, layout->array_size);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_index);
SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_index);
SHA1_UPDATE_VALUE(ctx, layout->buffer_view_index);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_surface_offset);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_sampler_offset);
if (layout->samplers) {
for (uint16_t i = 0; i < layout->array_size; i++) {
/* For embedded samplers, we need to hash the sampler parameters as
* the sampler handle is baked into the shader and this ultimately is
* part of the shader hash key. We can only consider 2 shaders
* identical if all their embedded samplers parameters are identical.
*/
if (embedded_samplers)
SHA1_UPDATE_VALUE(ctx, layout->samplers[i].embedded_key);
/* Hash the conversion if any as this affect shader compilation due
* to NIR lowering.
*/
if (layout->samplers[i].has_ycbcr_conversion)
SHA1_UPDATE_VALUE(ctx, layout->samplers[i].ycbcr_conversion_state);
}
}
}
static void
sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
const struct anv_descriptor_set_layout *layout)
{
SHA1_UPDATE_VALUE(ctx, layout->flags);
SHA1_UPDATE_VALUE(ctx, layout->binding_count);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_count);
SHA1_UPDATE_VALUE(ctx, layout->shader_stages);
SHA1_UPDATE_VALUE(ctx, layout->buffer_view_count);
SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_count);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_buffer_surface_size);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_buffer_sampler_size);
const bool embedded_samplers =
layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT;
for (uint16_t i = 0; i < layout->binding_count; i++) {
sha1_update_descriptor_set_binding_layout(ctx, embedded_samplers,
&layout->binding[i]);
}
}
/*
* Pipeline layouts. These have nothing to do with the pipeline. They are
* just multiple descriptor set layouts pasted together
@ -1133,7 +1139,7 @@ anv_pipeline_sets_layout_add(struct anv_pipeline_sets_layout *layout,
vk_descriptor_set_layout_ref(&set_layout->vk);
layout->set[set_idx].dynamic_offset_start = layout->num_dynamic_buffers;
layout->num_dynamic_buffers += set_layout->dynamic_offset_count;
layout->num_dynamic_buffers += set_layout->vk.dynamic_descriptor_count;
assert(layout->num_dynamic_buffers < MAX_DYNAMIC_BUFFERS);
@ -1159,17 +1165,18 @@ anv_pipeline_sets_layout_embedded_sampler_count(const struct anv_pipeline_sets_l
void
anv_pipeline_sets_layout_hash(struct anv_pipeline_sets_layout *layout)
{
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
struct mesa_blake3 ctx;
_mesa_blake3_init(&ctx);
for (unsigned s = 0; s < layout->num_sets; s++) {
if (!layout->set[s].layout)
continue;
sha1_update_descriptor_set_layout(&ctx, layout->set[s].layout);
_mesa_sha1_update(&ctx, &layout->set[s].dynamic_offset_start,
sizeof(layout->set[s].dynamic_offset_start));
_mesa_blake3_update(&ctx, &layout->set[s].layout->vk.blake3,
sizeof(layout->set[s].layout->vk.blake3));
_mesa_blake3_update(&ctx, &layout->set[s].dynamic_offset_start,
sizeof(layout->set[s].dynamic_offset_start));
}
_mesa_sha1_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
_mesa_sha1_final(&ctx, layout->sha1);
_mesa_blake3_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
_mesa_blake3_final(&ctx, layout->blake3);
}
void
@ -1200,66 +1207,6 @@ anv_pipeline_sets_layout_print(const struct anv_pipeline_sets_layout *layout)
}
}
VkResult anv_CreatePipelineLayout(
VkDevice _device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout)
{
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_pipeline_layout *layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
layout = vk_object_zalloc(&device->vk, pAllocator, sizeof(*layout),
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
if (layout == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
anv_pipeline_sets_layout_init(&layout->sets_layout, device,
pCreateInfo->flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT);
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
/* VUID-VkPipelineLayoutCreateInfo-graphicsPipelineLibrary-06753
*
* "If graphicsPipelineLibrary is not enabled, elements of
* pSetLayouts must be valid VkDescriptorSetLayout objects"
*
* As a result of supporting graphicsPipelineLibrary, we need to allow
* null descriptor set layouts.
*/
if (set_layout == NULL)
continue;
anv_pipeline_sets_layout_add(&layout->sets_layout, set, set_layout);
}
anv_pipeline_sets_layout_hash(&layout->sets_layout);
*pPipelineLayout = anv_pipeline_layout_to_handle(layout);
return VK_SUCCESS;
}
void anv_DestroyPipelineLayout(
VkDevice _device,
VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks* pAllocator)
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _pipelineLayout);
if (!layout)
return;
anv_pipeline_sets_layout_fini(&layout->sets_layout);
vk_object_free(&device->vk, pAllocator, layout);
}
/*
* Descriptor pools.
*

View file

@ -147,24 +147,20 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
static void
anv_pipeline_init_layout(struct anv_pipeline *pipeline,
struct anv_pipeline_layout *pipeline_layout)
struct vk_pipeline_layout *pipeline_layout)
{
if (pipeline_layout) {
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
for (uint32_t s = 0; s < layout->num_sets; s++) {
if (layout->set[s].layout == NULL)
for (uint32_t s = 0; s < pipeline_layout->set_count; s++) {
if (pipeline_layout->set_layouts[s] == NULL)
continue;
anv_pipeline_sets_layout_add(&pipeline->layout, s,
layout->set[s].layout);
struct anv_descriptor_set_layout *set_layout =
(struct anv_descriptor_set_layout *) pipeline_layout->set_layouts[s];
anv_pipeline_sets_layout_add(&pipeline->layout, s, set_layout);
}
}
anv_pipeline_sets_layout_hash(&pipeline->layout);
assert(!pipeline_layout ||
!memcmp(pipeline->layout.sha1,
pipeline_layout->sets_layout.sha1,
sizeof(pipeline_layout->sets_layout.sha1)));
}
static void
@ -641,7 +637,7 @@ anv_pipeline_hash_common(struct mesa_sha1 *ctx,
{
struct anv_device *device = pipeline->device;
_mesa_sha1_update(ctx, pipeline->layout.sha1, sizeof(pipeline->layout.sha1));
_mesa_sha1_update(ctx, pipeline->layout.blake3, sizeof(pipeline->layout.blake3));
const bool indirect_descriptors = device->physical->indirect_descriptors;
_mesa_sha1_update(ctx, &indirect_descriptors, sizeof(indirect_descriptors));
@ -746,8 +742,8 @@ anv_pipeline_hash_ray_tracing_combined_shader(struct anv_ray_tracing_pipeline *p
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
_mesa_sha1_update(&ctx, pipeline->base.layout.sha1,
sizeof(pipeline->base.layout.sha1));
_mesa_sha1_update(&ctx, pipeline->base.layout.blake3,
sizeof(pipeline->base.layout.blake3));
const bool rba = pipeline->base.device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
@ -2793,8 +2789,7 @@ anv_compute_pipeline_create(struct anv_device *device,
return result;
}
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
ANV_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
anv_pipeline_init_layout(&pipeline->base, pipeline_layout);
pipeline->base.active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
@ -2985,16 +2980,35 @@ anv_graphics_pipeline_emit(struct anv_graphics_pipeline *pipeline,
static void
anv_graphics_pipeline_import_layout(struct anv_graphics_base_pipeline *pipeline,
struct anv_pipeline_sets_layout *layout)
struct vk_pipeline_layout *pipeline_layout)
{
pipeline->base.layout.independent_sets |= layout->independent_sets;
const bool independent_layouts =
(pipeline_layout->create_flags &
VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT) != 0;
pipeline->base.layout.independent_sets |= independent_layouts;
for (uint32_t s = 0; s < layout->num_sets; s++) {
if (layout->set[s].layout == NULL)
for (uint32_t s = 0; s < pipeline_layout->set_count; s++) {
if (pipeline_layout->set_layouts[s] == NULL)
continue;
struct anv_descriptor_set_layout *set_layout =
(struct anv_descriptor_set_layout *) pipeline_layout->set_layouts[s];
anv_pipeline_sets_layout_add(&pipeline->base.layout, s, set_layout);
}
}
static void
anv_graphics_pipeline_import_sets_layout(struct anv_graphics_base_pipeline *pipeline,
const struct anv_pipeline_sets_layout *sets_layout)
{
pipeline->base.layout.independent_sets |= sets_layout->independent_sets;
for (uint32_t s = 0; s < sets_layout->num_sets; s++) {
if (sets_layout->set[s].layout == NULL)
continue;
anv_pipeline_sets_layout_add(&pipeline->base.layout, s,
layout->set[s].layout);
sets_layout->set[s].layout);
}
}
@ -3005,9 +3019,7 @@ anv_graphics_pipeline_import_lib(struct anv_graphics_base_pipeline *pipeline,
struct anv_pipeline_stage *stages,
struct anv_graphics_lib_pipeline *lib)
{
struct anv_pipeline_sets_layout *lib_layout =
&lib->base.base.layout;
anv_graphics_pipeline_import_layout(pipeline, lib_layout);
anv_graphics_pipeline_import_sets_layout(pipeline, &lib->base.base.layout);
/* We can't have shaders specified twice through libraries. */
assert((pipeline->base.active_stages & lib->base.base.active_stages) == 0);
@ -3145,11 +3157,9 @@ anv_graphics_lib_pipeline_create(struct anv_device *device,
/* After we've imported all the libraries' layouts, import the pipeline
* layout and hash the whole lot.
*/
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
if (pipeline_layout != NULL) {
anv_graphics_pipeline_import_layout(&pipeline->base,
&pipeline_layout->sets_layout);
}
ANV_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
if (pipeline_layout != NULL)
anv_graphics_pipeline_import_layout(&pipeline->base, pipeline_layout);
anv_pipeline_sets_layout_hash(&pipeline->base.base.layout);
@ -3274,11 +3284,9 @@ anv_graphics_pipeline_create(struct anv_device *device,
/* After we've imported all the libraries' layouts, import the pipeline
* layout and hash the whole lot.
*/
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
if (pipeline_layout != NULL) {
anv_graphics_pipeline_import_layout(&pipeline->base,
&pipeline_layout->sets_layout);
}
ANV_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
if (pipeline_layout != NULL)
anv_graphics_pipeline_import_layout(&pipeline->base, pipeline_layout);
anv_pipeline_sets_layout_hash(&pipeline->base.base.layout);
@ -4069,7 +4077,7 @@ anv_ray_tracing_pipeline_init(struct anv_ray_tracing_pipeline *pipeline,
{
util_dynarray_init(&pipeline->shaders, pipeline->base.mem_ctx);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
ANV_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
anv_pipeline_init_layout(&pipeline->base, pipeline_layout);
anv_pipeline_setup_l3_config(&pipeline->base, /* needs_slm */ false);

View file

@ -93,6 +93,7 @@
#include "vk_image.h"
#include "vk_instance.h"
#include "vk_pipeline_cache.h"
#include "vk_pipeline_layout.h"
#include "vk_physical_device.h"
#include "vk_sampler.h"
#include "vk_shader_module.h"
@ -3037,9 +3038,6 @@ struct anv_descriptor_set_layout {
/* Number of buffer views in this descriptor set */
uint32_t buffer_view_count;
/* Number of dynamic offsets used by this descriptor set */
uint16_t dynamic_offset_count;
/* For each dynamic buffer, which VkShaderStageFlagBits stages are using
* this buffer
*/
@ -3417,7 +3415,7 @@ struct anv_pipeline_sets_layout {
bool independent_sets;
unsigned char sha1[20];
blake3_hash blake3;
};
void anv_pipeline_sets_layout_init(struct anv_pipeline_sets_layout *layout,
@ -3437,12 +3435,6 @@ void anv_pipeline_sets_layout_hash(struct anv_pipeline_sets_layout *layout);
void anv_pipeline_sets_layout_print(const struct anv_pipeline_sets_layout *layout);
struct anv_pipeline_layout {
struct vk_object_base base;
struct anv_pipeline_sets_layout sets_layout;
};
const struct anv_descriptor_set_layout *
anv_pipeline_layout_get_push_set(const struct anv_pipeline_sets_layout *layout,
uint8_t *desc_idx);
@ -6714,8 +6706,6 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, vk.base, VkImageView,
VK_OBJECT_TYPE_IMAGE_VIEW);
VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, base, VkPipeline,
VK_OBJECT_TYPE_PIPELINE)
VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, base, VkPipelineLayout,
VK_OBJECT_TYPE_PIPELINE_LAYOUT)
VK_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, vk.base, VkQueryPool,
VK_OBJECT_TYPE_QUERY_POOL)
VK_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, vk.base, VkSampler,