lavapipe: EXT_descriptor_buffer

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22828>
This commit is contained in:
Mike Blumenkrantz 2023-04-26 13:02:29 -04:00 committed by Marge Bot
parent 9d876505bc
commit b9d774fe22
4 changed files with 494 additions and 0 deletions

View file

@ -221,6 +221,86 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
return VK_SUCCESS;
}
static struct pipe_resource *
get_buffer_resource(struct pipe_context *ctx, const VkDescriptorAddressInfoEXT *bda)
{
struct pipe_screen *pscreen = ctx->screen;
struct pipe_resource templ = {0};
templ.screen = pscreen;
templ.target = PIPE_BUFFER;
templ.format = PIPE_FORMAT_R8_UNORM;
templ.width0 = bda->range;
templ.height0 = 1;
templ.depth0 = 1;
templ.array_size = 1;
templ.bind |= PIPE_BIND_SAMPLER_VIEW;
templ.bind |= PIPE_BIND_SHADER_IMAGE;
templ.flags = PIPE_RESOURCE_FLAG_DONT_OVER_ALLOCATE;
uint64_t size;
struct pipe_resource *pres = pscreen->resource_create_unbacked(pscreen, &templ, &size);
assert(size == bda->range);
pscreen->resource_bind_backing(pscreen, pres, (void *)(uintptr_t)bda->address, 0);
return pres;
}
static struct lp_texture_handle
get_texture_handle_bda(struct lvp_device *device, const VkDescriptorAddressInfoEXT *bda, enum pipe_format format)
{
struct pipe_context *ctx = device->queue.ctx;
struct pipe_resource *pres = get_buffer_resource(ctx, bda);
struct pipe_sampler_view templ;
memset(&templ, 0, sizeof(templ));
templ.target = PIPE_BUFFER;
templ.swizzle_r = PIPE_SWIZZLE_X;
templ.swizzle_g = PIPE_SWIZZLE_Y;
templ.swizzle_b = PIPE_SWIZZLE_Z;
templ.swizzle_a = PIPE_SWIZZLE_W;
templ.format = format;
templ.u.buf.size = bda->range;
templ.texture = pres;
templ.context = ctx;
struct pipe_sampler_view *view = ctx->create_sampler_view(ctx, pres, &templ);
simple_mtx_lock(&device->queue.lock);
struct lp_texture_handle *handle = (void *)(uintptr_t)ctx->create_texture_handle(ctx, view, NULL);
util_dynarray_append(&device->bda_texture_handles, struct lp_texture_handle *, handle);
simple_mtx_unlock(&device->queue.lock);
ctx->sampler_view_destroy(ctx, view);
pipe_resource_reference(&pres, NULL);
return *handle;
}
static struct lp_texture_handle
get_image_handle_bda(struct lvp_device *device, const VkDescriptorAddressInfoEXT *bda, enum pipe_format format)
{
struct pipe_context *ctx = device->queue.ctx;
struct pipe_resource *pres = get_buffer_resource(ctx, bda);
struct pipe_image_view view = {0};
view.resource = pres;
view.format = format;
view.u.buf.size = bda->range;
simple_mtx_lock(&device->queue.lock);
struct lp_texture_handle *handle = (void *)(uintptr_t)ctx->create_image_handle(ctx, &view);
util_dynarray_append(&device->bda_image_handles, struct lp_texture_handle *, handle);
simple_mtx_unlock(&device->queue.lock);
pipe_resource_reference(&pres, NULL);
return *handle;
}
VkResult
lvp_descriptor_set_create(struct lvp_device *device,
struct lvp_descriptor_set_layout *layout,
@ -857,3 +937,163 @@ lvp_UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorS
{
lvp_descriptor_set_update_with_template(device, descriptorSet, descriptorUpdateTemplate, pData, false);
}
VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorSetLayoutSizeEXT(
VkDevice _device,
VkDescriptorSetLayout _layout,
VkDeviceSize* pSize)
{
LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout, _layout);
*pSize = layout->size * sizeof(union lp_descriptor);
for (unsigned i = 0; i < layout->binding_count; i++)
*pSize += layout->binding[i].uniform_block_size;
}
VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorSetLayoutBindingOffsetEXT(
VkDevice _device,
VkDescriptorSetLayout _layout,
uint32_t binding,
VkDeviceSize* pOffset)
{
LVP_FROM_HANDLE(lvp_descriptor_set_layout, layout, _layout);
assert(binding < layout->binding_count);
const struct lvp_descriptor_set_binding_layout *bind_layout = &layout->binding[binding];
if (bind_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
*pOffset = bind_layout->uniform_block_offset;
else
*pOffset = bind_layout->descriptor_index * sizeof(union lp_descriptor);
}
VKAPI_ATTR void VKAPI_CALL lvp_GetDescriptorEXT(
VkDevice _device,
const VkDescriptorGetInfoEXT* pCreateInfo,
size_t size,
void* pDescriptor)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
union lp_descriptor *desc = pDescriptor;
struct pipe_sampler_state sampler = {
.seamless_cube_map = 1,
.max_lod = 0.25,
};
switch (pCreateInfo->type) {
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
unreachable("this is a spec violation");
break;
}
case VK_DESCRIPTOR_TYPE_SAMPLER: {
if (pCreateInfo->data.pSampler) {
LVP_FROM_HANDLE(lvp_sampler, sampler, pCreateInfo->data.pSampler[0]);
desc->sampler = sampler->desc.sampler;
desc->sampler_index = sampler->desc.sampler_index;
} else {
lp_jit_sampler_from_pipe(&desc->sampler, &sampler);
desc->sampler_index = 0;
}
break;
}
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
const VkDescriptorImageInfo *info = pCreateInfo->data.pCombinedImageSampler;
if (info && info->imageView) {
LVP_FROM_HANDLE(lvp_image_view, iview, info->imageView);
lp_jit_texture_from_pipe(&desc->texture, iview->sv);
desc->sample_functions = iview->texture_handle->functions;
if (info->sampler) {
LVP_FROM_HANDLE(lvp_sampler, sampler, info->sampler);
desc->sampler = sampler->desc.sampler;
desc->sampler_index = sampler->desc.sampler_index;
} else {
lp_jit_sampler_from_pipe(&desc->sampler, &sampler);
desc->sampler_index = 0;
}
} else {
desc->sample_functions = device->null_texture_handle->functions;
desc->sampler_index = 0;
}
break;
}
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
if (pCreateInfo->data.pSampledImage && pCreateInfo->data.pSampledImage->imageView) {
LVP_FROM_HANDLE(lvp_image_view, iview, pCreateInfo->data.pSampledImage->imageView);
lp_jit_texture_from_pipe(&desc->texture, iview->sv);
desc->sample_functions = iview->texture_handle->functions;
} else {
desc->sample_functions = device->null_texture_handle->functions;
desc->sampler_index = 0;
}
break;
}
/* technically these use different pointers, but it's a union, so they're all the same */
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
if (pCreateInfo->data.pStorageImage && pCreateInfo->data.pStorageImage->imageView) {
LVP_FROM_HANDLE(lvp_image_view, iview, pCreateInfo->data.pStorageImage->imageView);
lp_jit_image_from_pipe(&desc->image, &iview->iv);
desc->image_functions = iview->image_handle->functions;
} else {
desc->image_functions = device->null_image_handle->functions;
}
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
const VkDescriptorAddressInfoEXT *bda = pCreateInfo->data.pUniformTexelBuffer;
if (bda && bda->address) {
enum pipe_format pformat = vk_format_to_pipe_format(bda->format);
lp_jit_texture_buffer_from_bda(&desc->texture, (void*)(uintptr_t)bda->address, bda->range, pformat);
desc->sample_functions = get_texture_handle_bda(device, bda, pformat).functions;
} else {
desc->sample_functions = device->null_texture_handle->functions;
desc->sampler_index = 0;
}
break;
}
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
const VkDescriptorAddressInfoEXT *bda = pCreateInfo->data.pStorageTexelBuffer;
if (bda && bda->address) {
enum pipe_format pformat = vk_format_to_pipe_format(bda->format);
lp_jit_image_buffer_from_bda(&desc->image, (void *)(uintptr_t)bda->address, bda->range, pformat);
desc->image_functions = get_image_handle_bda(device, bda, pformat).functions;
} else {
desc->image_functions = device->null_image_handle->functions;
}
break;
}
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
const VkDescriptorAddressInfoEXT *bda = pCreateInfo->data.pUniformBuffer;
if (bda && bda->address) {
struct pipe_constant_buffer ubo = {
.user_buffer = (void *)(uintptr_t)bda->address,
.buffer_size = bda->range,
};
lp_jit_buffer_from_pipe_const(&desc->buffer, &ubo, device->pscreen);
} else {
lp_jit_buffer_from_pipe_const(&desc->buffer, &((struct pipe_constant_buffer){0}), device->pscreen);
}
break;
}
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
const VkDescriptorAddressInfoEXT *bda = pCreateInfo->data.pStorageBuffer;
if (bda && bda->address) {
lp_jit_buffer_from_bda(&desc->buffer, (void *)(uintptr_t)bda->address, bda->range);
} else {
lp_jit_buffer_from_pipe(&desc->buffer, &((struct pipe_shader_buffer){0}));
}
break;
}
default:
break;
}
}

View file

@ -160,6 +160,7 @@ static const struct vk_device_extension_table lvp_device_extensions_supported =
.EXT_depth_clip_control = true,
.EXT_depth_range_unrestricted = true,
.EXT_dynamic_rendering_unused_attachments = true,
.EXT_descriptor_buffer = true,
.EXT_extended_dynamic_state = true,
.EXT_extended_dynamic_state2 = true,
.EXT_extended_dynamic_state3 = true,
@ -373,6 +374,12 @@ lvp_get_features(const struct lvp_physical_device *pdevice,
.shaderIntegerDotProduct = true,
.maintenance4 = true,
/* VK_EXT_descriptor_buffer */
.descriptorBuffer = true,
.descriptorBufferCaptureReplay = false,
.descriptorBufferPushDescriptors = true,
.descriptorBufferImageLayoutIgnored = true,
/* VK_EXT_primitives_generated_query */
.primitivesGeneratedQuery = true,
.primitivesGeneratedQueryWithRasterizerDiscard = true,
@ -1200,6 +1207,42 @@ VKAPI_ATTR void VKAPI_CALL lvp_GetPhysicalDeviceProperties2(
props->maxMultiDrawCount = 2048;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_PROPERTIES_EXT: {
VkPhysicalDeviceDescriptorBufferPropertiesEXT *props = (VkPhysicalDeviceDescriptorBufferPropertiesEXT *)ext;
props->combinedImageSamplerDescriptorSingleArray = VK_TRUE;
props->bufferlessPushDescriptors = VK_TRUE;
props->descriptorBufferOffsetAlignment = 4;
props->maxDescriptorBufferBindings = MAX_SETS;
props->maxResourceDescriptorBufferBindings = MAX_SETS;
props->maxSamplerDescriptorBufferBindings = MAX_SETS;
props->maxEmbeddedImmutableSamplerBindings = MAX_SETS;
props->maxEmbeddedImmutableSamplers = 2032;
props->bufferCaptureReplayDescriptorDataSize = 0;
props->imageCaptureReplayDescriptorDataSize = 0;
props->imageViewCaptureReplayDescriptorDataSize = 0;
props->samplerCaptureReplayDescriptorDataSize = 0;
props->accelerationStructureCaptureReplayDescriptorDataSize = 0;
props->samplerDescriptorSize = sizeof(union lp_descriptor);
props->combinedImageSamplerDescriptorSize = sizeof(union lp_descriptor);
props->sampledImageDescriptorSize = sizeof(union lp_descriptor);
props->storageImageDescriptorSize = sizeof(union lp_descriptor);
props->uniformTexelBufferDescriptorSize = sizeof(union lp_descriptor);
props->robustUniformTexelBufferDescriptorSize = sizeof(union lp_descriptor);
props->storageTexelBufferDescriptorSize = sizeof(union lp_descriptor);
props->robustStorageTexelBufferDescriptorSize = sizeof(union lp_descriptor);
props->uniformBufferDescriptorSize = sizeof(union lp_descriptor);
props->robustUniformBufferDescriptorSize = sizeof(union lp_descriptor);
props->storageBufferDescriptorSize = sizeof(union lp_descriptor);
props->robustStorageBufferDescriptorSize = sizeof(union lp_descriptor);
props->inputAttachmentDescriptorSize = sizeof(union lp_descriptor);
props->accelerationStructureDescriptorSize = 0;
props->maxSamplerDescriptorBufferRange = 1<<27; //spec minimum
props->maxResourceDescriptorBufferRange = 1<<27; //spec minimum
props->resourceDescriptorBufferAddressSpaceSize = 1<<27; //spec minimum
props->samplerDescriptorBufferAddressSpaceSize = 1<<27; //spec minimum
props->descriptorBufferAddressSpaceSize = 1<<27; //spec minimum
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES: {
VkPhysicalDeviceTexelBufferAlignmentProperties *properties =
(VkPhysicalDeviceTexelBufferAlignmentProperties *)ext;
@ -1565,6 +1608,9 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDevice(
device->null_image_handle = (void *)(uintptr_t)device->queue.ctx->create_image_handle(device->queue.ctx,
&(struct pipe_image_view){ 0 });
util_dynarray_init(&device->bda_texture_handles, NULL);
util_dynarray_init(&device->bda_image_handles, NULL);
*pDevice = lvp_device_to_handle(device);
return VK_SUCCESS;
@ -1577,6 +1623,16 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyDevice(
{
LVP_FROM_HANDLE(lvp_device, device, _device);
util_dynarray_foreach(&device->bda_texture_handles, struct lp_texture_handle *, handle)
device->queue.ctx->delete_texture_handle(device->queue.ctx, (uint64_t)(uintptr_t)*handle);
util_dynarray_fini(&device->bda_texture_handles);
util_dynarray_foreach(&device->bda_image_handles, struct lp_texture_handle *, handle)
device->queue.ctx->delete_image_handle(device->queue.ctx, (uint64_t)(uintptr_t)*handle);
util_dynarray_fini(&device->bda_image_handles);
device->queue.ctx->delete_texture_handle(device->queue.ctx, (uint64_t)(uintptr_t)device->null_texture_handle);
device->queue.ctx->delete_image_handle(device->queue.ctx, (uint64_t)(uintptr_t)device->null_image_handle);

View file

@ -59,6 +59,14 @@ enum gs_output {
GS_OUTPUT_LINES,
};
struct descriptor_buffer_offset {
struct lvp_pipeline_layout *layout;
uint32_t buffer_index;
VkDeviceSize offset;
const struct lvp_descriptor_set_layout *sampler_layout;
};
struct lvp_render_attachment {
struct lvp_image_view *imgv;
VkResolveModeFlags resolve_mode;
@ -129,6 +137,9 @@ struct rendering_state {
struct pipe_resource *index_buffer;
struct pipe_constant_buffer const_buffer[LVP_SHADER_STAGES][16];
struct lvp_descriptor_set *desc_sets[2][MAX_SETS];
struct pipe_resource *desc_buffers[MAX_SETS];
uint8_t *desc_buffer_addrs[MAX_SETS];
struct descriptor_buffer_offset desc_buffer_offsets[2][MAX_SETS];
int num_const_bufs[LVP_SHADER_STAGES];
int num_vb;
unsigned start_vb;
@ -1142,6 +1153,18 @@ handle_descriptor_sets(struct vk_cmd_queue_entry *cmd, struct rendering_state *s
uint32_t dynamic_offset_index = 0;
for (uint32_t i = 0; i < bds->descriptor_set_count; i++) {
if (state->desc_buffers[bds->first_set + i]) {
/* always unset descriptor buffers when binding sets */
if (bds->pipeline_bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
bool changed = state->const_buffer[MESA_SHADER_COMPUTE][bds->first_set + i].buffer == state->desc_buffers[bds->first_set + i];
state->constbuf_dirty[MESA_SHADER_COMPUTE] |= changed;
} else {
lvp_forall_gfx_stage(j) {
bool changed = state->const_buffer[j][bds->first_set + i].buffer == state->desc_buffers[bds->first_set + i];
state->constbuf_dirty[j] |= changed;
}
}
}
if (!layout->vk.set_layouts[bds->first_set + i])
continue;
@ -3891,6 +3914,164 @@ handle_execute_generated_commands(struct vk_cmd_queue_entry *cmd, struct renderi
state->pctx->buffer_unmap(state->pctx, pmap);
}
static void
handle_descriptor_buffers(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
const struct vk_cmd_bind_descriptor_buffers_ext *bind = &cmd->u.bind_descriptor_buffers_ext;
for (unsigned i = 0; i < bind->buffer_count; i++) {
struct pipe_resource *pres = get_buffer_resource(state->pctx, (void *)(uintptr_t)bind->binding_infos[i].address);
state->desc_buffer_addrs[i] = (void *)(uintptr_t)bind->binding_infos[i].address;
pipe_resource_reference(&state->desc_buffers[i], pres);
/* leave only one ref on rendering_state */
pipe_resource_reference(&pres, NULL);
}
}
static bool
descriptor_layouts_equal(const struct lvp_descriptor_set_layout *a, const struct lvp_descriptor_set_layout *b)
{
const uint8_t *pa = (const uint8_t*)a, *pb = (const uint8_t*)b;
uint32_t hash_start_offset = sizeof(struct vk_descriptor_set_layout);
uint32_t binding_offset = offsetof(struct lvp_descriptor_set_layout, binding);
/* base equal */
if (memcmp(pa + hash_start_offset, pb + hash_start_offset, binding_offset - hash_start_offset))
return false;
/* bindings equal */
if (a->binding_count != b->binding_count)
return false;
size_t binding_size = a->binding_count * sizeof(struct lvp_descriptor_set_binding_layout);
const struct lvp_descriptor_set_binding_layout *la = a->binding;
const struct lvp_descriptor_set_binding_layout *lb = b->binding;
if (memcmp(la, lb, binding_size)) {
for (unsigned i = 0; i < a->binding_count; i++) {
if (memcmp(&la[i], &lb[i], offsetof(struct lvp_descriptor_set_binding_layout, immutable_samplers)))
return false;
}
}
/* immutable sampler equal */
if (a->immutable_sampler_count != b->immutable_sampler_count)
return false;
if (a->immutable_sampler_count) {
size_t sampler_size = a->immutable_sampler_count * sizeof(struct lvp_sampler *);
if (memcmp(pa + binding_offset + binding_size, pb + binding_offset + binding_size, sampler_size)) {
struct lvp_sampler **sa = (struct lvp_sampler **)(pa + binding_offset);
struct lvp_sampler **sb = (struct lvp_sampler **)(pb + binding_offset);
for (unsigned i = 0; i < a->immutable_sampler_count; i++) {
if (memcmp(sa[i], sb[i], sizeof(struct lvp_sampler)))
return false;
}
}
}
return true;
}
static void
check_db_compat(struct rendering_state *state, struct lvp_pipeline_layout *layout, bool is_compute, int first_set, int set_count)
{
bool independent = (layout->vk.create_flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT) > 0;
/* handle compatibility rules for unbinding */
for (unsigned j = 0; j < ARRAY_SIZE(state->desc_buffers); j++) {
struct lvp_pipeline_layout *l2 = state->desc_buffer_offsets[is_compute][j].layout;
if ((j >= first_set && j < first_set + set_count) || !l2 || l2 == layout)
continue;
bool independent_l2 = (l2->vk.create_flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT) > 0;
if (independent != independent_l2) {
memset(&state->desc_buffer_offsets[is_compute][j], 0, sizeof(state->desc_buffer_offsets[is_compute][j]));
} else {
if (layout->vk.set_count != l2->vk.set_count) {
memset(&state->desc_buffer_offsets[is_compute][j], 0, sizeof(state->desc_buffer_offsets[is_compute][j]));
} else {
const struct lvp_descriptor_set_layout *a = get_set_layout(layout, j);
const struct lvp_descriptor_set_layout *b = get_set_layout(l2, j);
if (!!a != !!b || !descriptor_layouts_equal(a, b)) {
memset(&state->desc_buffer_offsets[is_compute][j], 0, sizeof(state->desc_buffer_offsets[is_compute][j]));
}
}
}
}
}
static void
bind_db_samplers(struct rendering_state *state, bool is_compute, unsigned set)
{
const struct lvp_descriptor_set_layout *set_layout = state->desc_buffer_offsets[is_compute][set].sampler_layout;
if (!set_layout)
return;
unsigned buffer_index = state->desc_buffer_offsets[is_compute][set].buffer_index;
if (!state->desc_buffer_addrs[buffer_index]) {
if (set_layout->immutable_set) {
state->desc_sets[is_compute][set] = set_layout->immutable_set;
u_foreach_bit(stage, set_layout->shader_stages)
handle_set_stage_buffer(state, set_layout->immutable_set->bo, 0, vk_to_mesa_shader_stage(1<<stage), set);
}
return;
}
uint8_t *db = state->desc_buffer_addrs[buffer_index] + state->desc_buffer_offsets[is_compute][set].offset;
uint8_t did_update = 0;
for (uint32_t binding_index = 0; binding_index < set_layout->binding_count; binding_index++) {
const struct lvp_descriptor_set_binding_layout *bind_layout = &set_layout->binding[binding_index];
if (!bind_layout->immutable_samplers)
continue;
union lp_descriptor *desc = (void*)db;
desc += bind_layout->descriptor_index;
for (uint32_t sampler_index = 0; sampler_index < bind_layout->array_size; sampler_index++) {
if (bind_layout->immutable_samplers[sampler_index]) {
desc[sampler_index].sampler = bind_layout->immutable_samplers[sampler_index]->sampler;
u_foreach_bit(stage, set_layout->shader_stages)
did_update |= BITFIELD_BIT(vk_to_mesa_shader_stage(1<<stage));
}
}
}
u_foreach_bit(stage, did_update)
state->constbuf_dirty[stage] = true;
}
static void
handle_descriptor_buffer_embedded_samplers(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
const struct vk_cmd_bind_descriptor_buffer_embedded_samplers_ext *bind = &cmd->u.bind_descriptor_buffer_embedded_samplers_ext;
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, bind->layout);
if (!layout->vk.set_layouts[bind->set])
return;
const struct lvp_descriptor_set_layout *set_layout = get_set_layout(layout, bind->set);
if (!set_layout->immutable_sampler_count)
return;
bool is_compute = bind->pipeline_bind_point == VK_PIPELINE_BIND_POINT_COMPUTE;
check_db_compat(state, layout, is_compute, bind->set, 1);
state->desc_buffer_offsets[is_compute][bind->set].sampler_layout = set_layout;
bind_db_samplers(state, is_compute, bind->set);
}
static void
handle_descriptor_buffer_offsets(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
struct vk_cmd_set_descriptor_buffer_offsets_ext *dbo = &cmd->u.set_descriptor_buffer_offsets_ext;
bool is_compute = dbo->pipeline_bind_point == VK_PIPELINE_BIND_POINT_COMPUTE;
for (unsigned i = 0; i < dbo->set_count; i++) {
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, dbo->layout);
check_db_compat(state, layout, is_compute, dbo->first_set, dbo->set_count);
unsigned idx = dbo->first_set + i;
state->desc_buffer_offsets[is_compute][idx].layout = layout;
state->desc_buffer_offsets[is_compute][idx].buffer_index = dbo->buffer_indices[i];
state->desc_buffer_offsets[is_compute][idx].offset = dbo->offsets[i];
const struct lvp_descriptor_set_layout *set_layout = get_set_layout(layout, idx);
/* set for all stages */
u_foreach_bit(stage, set_layout->shader_stages) {
gl_shader_stage pstage = vk_to_mesa_shader_stage(1<<stage);
handle_set_stage_buffer(state, state->desc_buffers[dbo->buffer_indices[i]], dbo->offsets[i], pstage, idx);
}
bind_db_samplers(state, is_compute, idx);
}
}
void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp)
{
struct vk_device_dispatch_table cmd_enqueue_dispatch;
@ -3980,6 +4161,9 @@ void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp)
ENQUEUE_CMD(CmdSetEvent2)
ENQUEUE_CMD(CmdWaitEvents2)
ENQUEUE_CMD(CmdWriteTimestamp2)
ENQUEUE_CMD(CmdBindDescriptorBuffersEXT)
ENQUEUE_CMD(CmdSetDescriptorBufferOffsetsEXT)
ENQUEUE_CMD(CmdBindDescriptorBufferEmbeddedSamplersEXT)
ENQUEUE_CMD(CmdSetPolygonModeEXT)
ENQUEUE_CMD(CmdSetTessellationDomainOriginEXT)
@ -4359,6 +4543,15 @@ static void lvp_execute_cmd_buffer(struct list_head *cmds,
case VK_CMD_EXECUTE_GENERATED_COMMANDS_NV:
handle_execute_generated_commands(cmd, state, print_cmds);
break;
case VK_CMD_BIND_DESCRIPTOR_BUFFERS_EXT:
handle_descriptor_buffers(cmd, state);
break;
case VK_CMD_SET_DESCRIPTOR_BUFFER_OFFSETS_EXT:
handle_descriptor_buffer_offsets(cmd, state);
break;
case VK_CMD_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_EXT:
handle_descriptor_buffer_embedded_samplers(cmd, state);
break;
default:
fprintf(stderr, "Unsupported command %s\n", vk_cmd_queue_type_names[cmd->type]);
unreachable("Unsupported command");
@ -4424,6 +4617,9 @@ VkResult lvp_execute_cmds(struct lvp_device *device,
util_dynarray_fini(&state->push_desc_sets);
for (unsigned i = 0; i < ARRAY_SIZE(state->desc_buffers); i++)
pipe_resource_reference(&state->desc_buffers[i], NULL);
free(state->color_att);
return VK_SUCCESS;
}

View file

@ -202,6 +202,8 @@ struct lvp_device {
struct lp_texture_handle *null_texture_handle;
struct lp_texture_handle *null_image_handle;
struct util_dynarray bda_texture_handles;
struct util_dynarray bda_image_handles;
};
void lvp_device_get_cache_uuid(void *uuid);