mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 20:18:12 +02:00
lavapipe: rework immutable samplers
samplers can be destroyed whenever, which makes it problematic to store the pointers into descriptor layouts for embedded samplers. instead, directly store the descriptor info into the layout, since this is all constant data which is unaffected by object lifetimes cc: mesa-stable Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/41312>
This commit is contained in:
parent
965beb520c
commit
1da8528bbc
4 changed files with 38 additions and 83 deletions
|
|
@ -86,7 +86,8 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDescriptorSetLayout(
|
|||
|
||||
size_t size = sizeof(struct lvp_descriptor_set_layout) +
|
||||
num_bindings * sizeof(set_layout->binding[0]) +
|
||||
immutable_sampler_count * sizeof(struct lvp_sampler *);
|
||||
immutable_sampler_count * sizeof(struct lp_descriptor) +
|
||||
immutable_sampler_count * sizeof(struct vk_ycbcr_conversion_state);
|
||||
|
||||
set_layout = vk_descriptor_set_layout_zalloc(&device->vk, size, pCreateInfo);
|
||||
if (!set_layout)
|
||||
|
|
@ -94,8 +95,9 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDescriptorSetLayout(
|
|||
|
||||
set_layout->immutable_sampler_count = immutable_sampler_count;
|
||||
/* We just allocate all the samplers at the end of the struct */
|
||||
struct lvp_sampler **samplers =
|
||||
(struct lvp_sampler **)&set_layout->binding[num_bindings];
|
||||
struct lp_descriptor *samplers =
|
||||
(struct lp_descriptor *)&set_layout->binding[num_bindings];
|
||||
struct vk_ycbcr_conversion_state *ycbcr = (void*)(samplers + immutable_sampler_count);
|
||||
|
||||
set_layout->binding_count = num_bindings;
|
||||
set_layout->shader_stages = 0;
|
||||
|
|
@ -136,11 +138,17 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDescriptorSetLayout(
|
|||
uint8_t max_plane_count = 1;
|
||||
if (binding_has_immutable_samplers(binding)) {
|
||||
set_layout->binding[b].immutable_samplers = samplers;
|
||||
set_layout->binding[b].immutable_ycbcr = ycbcr;
|
||||
samplers += binding->descriptorCount;
|
||||
ycbcr += binding->descriptorCount;
|
||||
|
||||
for (uint32_t i = 0; i < binding->descriptorCount; i++) {
|
||||
VK_FROM_HANDLE(lvp_sampler, sampler, binding->pImmutableSamplers[i]);
|
||||
set_layout->binding[b].immutable_samplers[i] = sampler;
|
||||
set_layout->binding[b].immutable_samplers[i] = sampler->desc;
|
||||
if (sampler->vk.ycbcr_conversion)
|
||||
set_layout->binding[b].immutable_ycbcr[i] = sampler->vk.ycbcr_conversion->state;
|
||||
else
|
||||
memset(&set_layout->binding[b].immutable_ycbcr[i], 0, sizeof(struct vk_ycbcr_conversion_state));
|
||||
const uint8_t sampler_plane_count = sampler->vk.ycbcr_conversion ?
|
||||
vk_format_get_plane_count(sampler->vk.ycbcr_conversion->state.format) : 1;
|
||||
if (max_plane_count < sampler_plane_count)
|
||||
|
|
@ -366,11 +374,9 @@ lvp_descriptor_set_create(struct lvp_device *device,
|
|||
desc += bind_layout->descriptor_index;
|
||||
|
||||
for (uint32_t sampler_index = 0; sampler_index < bind_layout->array_size; sampler_index++) {
|
||||
if (bind_layout->immutable_samplers[sampler_index]) {
|
||||
for (uint32_t s = 0; s < bind_layout->stride; s++) {
|
||||
int idx = sampler_index * bind_layout->stride + s;
|
||||
desc[idx] = bind_layout->immutable_samplers[sampler_index]->desc;
|
||||
}
|
||||
for (uint32_t s = 0; s < bind_layout->stride; s++) {
|
||||
int idx = sampler_index * bind_layout->stride + s;
|
||||
desc[idx] = bind_layout->immutable_samplers[sampler_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4733,46 +4733,6 @@ handle_descriptor_buffers(struct vk_cmd_queue_entry *cmd, struct rendering_state
|
|||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
descriptor_layouts_equal(const struct lvp_descriptor_set_layout *a, const struct lvp_descriptor_set_layout *b)
|
||||
{
|
||||
const uint8_t *pa = (const uint8_t*)a, *pb = (const uint8_t*)b;
|
||||
uint32_t hash_start_offset = sizeof(struct vk_descriptor_set_layout);
|
||||
uint32_t binding_offset = offsetof(struct lvp_descriptor_set_layout, binding);
|
||||
/* base equal */
|
||||
if (memcmp(pa + hash_start_offset, pb + hash_start_offset, binding_offset - hash_start_offset))
|
||||
return false;
|
||||
|
||||
/* bindings equal */
|
||||
if (a->binding_count != b->binding_count)
|
||||
return false;
|
||||
size_t binding_size = a->binding_count * sizeof(struct lvp_descriptor_set_binding_layout);
|
||||
const struct lvp_descriptor_set_binding_layout *la = a->binding;
|
||||
const struct lvp_descriptor_set_binding_layout *lb = b->binding;
|
||||
if (memcmp(la, lb, binding_size)) {
|
||||
for (unsigned i = 0; i < a->binding_count; i++) {
|
||||
if (memcmp(&la[i], &lb[i], offsetof(struct lvp_descriptor_set_binding_layout, immutable_samplers)))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* immutable sampler equal */
|
||||
if (a->immutable_sampler_count != b->immutable_sampler_count)
|
||||
return false;
|
||||
if (a->immutable_sampler_count) {
|
||||
size_t sampler_size = a->immutable_sampler_count * sizeof(struct lvp_sampler *);
|
||||
if (memcmp(pa + binding_offset + binding_size, pb + binding_offset + binding_size, sampler_size)) {
|
||||
struct lvp_sampler **sa = (struct lvp_sampler **)(pa + binding_offset);
|
||||
struct lvp_sampler **sb = (struct lvp_sampler **)(pb + binding_offset);
|
||||
for (unsigned i = 0; i < a->immutable_sampler_count; i++) {
|
||||
if (memcmp(sa[i], sb[i], sizeof(struct lvp_sampler)))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
bind_db_samplers(struct rendering_state *state, enum lvp_pipeline_type pipeline_type, unsigned set)
|
||||
{
|
||||
|
|
@ -4803,16 +4763,14 @@ bind_db_samplers(struct rendering_state *state, enum lvp_pipeline_type pipeline_
|
|||
desc += bind_layout->descriptor_index;
|
||||
|
||||
for (uint32_t sampler_index = 0; sampler_index < bind_layout->array_size; sampler_index++) {
|
||||
if (bind_layout->immutable_samplers[sampler_index]) {
|
||||
struct lp_descriptor *immutable_desc = &bind_layout->immutable_samplers[sampler_index]->desc;
|
||||
desc[sampler_index].sampler = immutable_desc->sampler;
|
||||
desc[sampler_index].texture.sampler_index = immutable_desc->texture.sampler_index;
|
||||
if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
|
||||
did_update |= BITFIELD_BIT(MESA_SHADER_RAYGEN);
|
||||
} else {
|
||||
u_foreach_bit(stage, set_layout->shader_stages)
|
||||
did_update |= BITFIELD_BIT(vk_to_mesa_shader_stage(1<<stage));
|
||||
}
|
||||
struct lp_descriptor *immutable_desc = &bind_layout->immutable_samplers[sampler_index];
|
||||
desc[sampler_index].sampler = immutable_desc->sampler;
|
||||
desc[sampler_index].texture.sampler_index = immutable_desc->texture.sampler_index;
|
||||
if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
|
||||
did_update |= BITFIELD_BIT(MESA_SHADER_RAYGEN);
|
||||
} else {
|
||||
u_foreach_bit(stage, set_layout->shader_stages)
|
||||
did_update |= BITFIELD_BIT(vk_to_mesa_shader_stage(1<<stage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -385,8 +385,7 @@ lvp_ycbcr_conversion_lookup(const void *data, uint32_t set, uint32_t binding, ui
|
|||
if (!binding_layout->immutable_samplers)
|
||||
return NULL;
|
||||
|
||||
struct vk_ycbcr_conversion *ycbcr_conversion = binding_layout->immutable_samplers[array_index]->vk.ycbcr_conversion;
|
||||
return ycbcr_conversion ? &ycbcr_conversion->state : NULL;
|
||||
return binding_layout->immutable_ycbcr[array_index].format ? &binding_layout->immutable_ycbcr[array_index] : NULL;
|
||||
}
|
||||
|
||||
/* pipeline is NULL for shader objects. */
|
||||
|
|
@ -729,7 +728,7 @@ layouts_equal(const struct lvp_descriptor_set_layout *a, const struct lvp_descri
|
|||
{
|
||||
const uint8_t *pa = (const uint8_t*)a, *pb = (const uint8_t*)b;
|
||||
uint32_t hash_start_offset = sizeof(struct vk_descriptor_set_layout);
|
||||
uint32_t binding_offset = offsetof(struct lvp_descriptor_set_layout, binding);
|
||||
uint32_t binding_offset = offsetof(struct lvp_descriptor_set_layout, immutable_set);
|
||||
/* base equal */
|
||||
if (memcmp(pa + hash_start_offset, pb + hash_start_offset, binding_offset - hash_start_offset))
|
||||
return false;
|
||||
|
|
@ -737,31 +736,22 @@ layouts_equal(const struct lvp_descriptor_set_layout *a, const struct lvp_descri
|
|||
/* bindings equal */
|
||||
if (a->binding_count != b->binding_count)
|
||||
return false;
|
||||
if (a->immutable_sampler_count != b->immutable_sampler_count)
|
||||
return false;
|
||||
size_t binding_size = a->binding_count * sizeof(struct lvp_descriptor_set_binding_layout);
|
||||
const struct lvp_descriptor_set_binding_layout *la = a->binding;
|
||||
const struct lvp_descriptor_set_binding_layout *lb = b->binding;
|
||||
if (memcmp(la, lb, binding_size)) {
|
||||
for (unsigned i = 0; i < a->binding_count; i++) {
|
||||
if (memcmp(&la[i], &lb[i], offsetof(struct lvp_descriptor_set_binding_layout, immutable_samplers)))
|
||||
return false;
|
||||
}
|
||||
if (!memcmp(la, lb, binding_size))
|
||||
return true;
|
||||
for (unsigned i = 0; i < a->binding_count; i++) {
|
||||
if (memcmp(&la[i], &lb[i], offsetof(struct lvp_descriptor_set_binding_layout, immutable_samplers)))
|
||||
return false;
|
||||
}
|
||||
|
||||
/* immutable sampler equal */
|
||||
if (a->immutable_sampler_count != b->immutable_sampler_count)
|
||||
if (!a->immutable_sampler_count)
|
||||
return true;
|
||||
if (memcmp(la->immutable_samplers, lb->immutable_samplers, a->immutable_sampler_count * sizeof(struct lp_descriptor)))
|
||||
return false;
|
||||
if (a->immutable_sampler_count) {
|
||||
size_t sampler_size = a->immutable_sampler_count * sizeof(struct lvp_sampler *);
|
||||
if (memcmp(pa + binding_offset + binding_size, pb + binding_offset + binding_size, sampler_size)) {
|
||||
struct lvp_sampler **sa = (struct lvp_sampler **)(pa + binding_offset);
|
||||
struct lvp_sampler **sb = (struct lvp_sampler **)(pb + binding_offset);
|
||||
for (unsigned i = 0; i < a->immutable_sampler_count; i++) {
|
||||
if (memcmp(sa[i], sb[i], sizeof(struct lvp_sampler)))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
return !memcmp(la->immutable_ycbcr, lb->immutable_ycbcr, a->immutable_sampler_count * sizeof(struct vk_ycbcr_conversion_state));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -328,7 +328,8 @@ struct lvp_descriptor_set_binding_layout {
|
|||
uint32_t uniform_block_size;
|
||||
|
||||
/* Immutable samplers (or NULL if no immutable samplers) */
|
||||
struct lvp_sampler **immutable_samplers;
|
||||
struct lp_descriptor *immutable_samplers;
|
||||
struct vk_ycbcr_conversion_state *immutable_ycbcr;
|
||||
};
|
||||
|
||||
struct lvp_descriptor_set_layout {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue