zink: add extra synchronization for buffer descriptor binds

"most" times it isn't necessary to insert any pipeline barriers when binding
descriptors, as GL requires explicit barrier usage which comes through a different
codepath

the exception here is when the following scenario occurs:
* have buffer A
* buffer_subdata is called on A
* discard path is taken || A is not host-visible
* stream uploader is used for host write
* CmdCopyBuffer is used to copy the data back to A
buffer A now has a pending TRANSFER write that must complete before the buffer is
used in a shader, so synchronization is required any time TRANSFER usage is detected
in a bind

there's also going to be more exceptions going forward as more internal usage is added,
so just remove the whole fake-barrier mechanism since it'll become more problematic
going forward

Cc: 21.3 mesa-stable

Reviewed-by: Hoe Hao Cheng <haochengho12907@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14496>
This commit is contained in:
Mike Blumenkrantz 2022-01-06 13:01:41 -05:00 committed by Marge Bot
parent 5028630bd6
commit 3e5f4cebe8
2 changed files with 6 additions and 15 deletions

View file

@ -1120,7 +1120,7 @@ zink_set_constant_buffer(struct pipe_context *pctx,
update_res_bind_count(ctx, new_res, shader == PIPE_SHADER_COMPUTE, false);
}
zink_batch_resource_usage_set(&ctx->batch, new_res, false);
zink_fake_buffer_barrier(new_res, VK_ACCESS_UNIFORM_READ_BIT,
zink_resource_buffer_barrier(ctx, new_res, VK_ACCESS_UNIFORM_READ_BIT,
zink_pipeline_flags_from_pipe_stage(shader));
}
update |= ((index || screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY) && ctx->ubos[shader][index].buffer_offset != offset) ||
@ -1215,7 +1215,7 @@ zink_set_shader_buffers(struct pipe_context *pctx,
ssbo->buffer_size = MIN2(buffers[i].buffer_size, new_res->base.b.width0 - ssbo->buffer_offset);
util_range_add(&new_res->base.b, &new_res->valid_buffer_range, ssbo->buffer_offset,
ssbo->buffer_offset + ssbo->buffer_size);
zink_fake_buffer_barrier(new_res, access,
zink_resource_buffer_barrier(ctx, new_res, access,
zink_pipeline_flags_from_pipe_stage(p_stage));
update = true;
max_slot = MAX2(max_slot, start_slot + i);
@ -1401,7 +1401,7 @@ zink_set_shader_images(struct pipe_context *pctx,
image_view->buffer_view = create_image_bufferview(ctx, &images[i]);
assert(image_view->buffer_view);
zink_batch_usage_set(&image_view->buffer_view->batch_uses, ctx->batch.state);
zink_fake_buffer_barrier(res, access,
zink_resource_buffer_barrier(ctx, res, access,
zink_pipeline_flags_from_pipe_stage(p_stage));
} else {
image_view->surface = create_image_surface(ctx, &images[i], p_stage == PIPE_SHADER_COMPUTE);
@ -1494,7 +1494,7 @@ zink_set_sampler_views(struct pipe_context *pctx,
update = true;
}
zink_batch_usage_set(&b->buffer_view->batch_uses, ctx->batch.state);
zink_fake_buffer_barrier(res, VK_ACCESS_SHADER_READ_BIT,
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT,
zink_pipeline_flags_from_pipe_stage(shader_type));
if (!a || a->buffer_view->buffer_view != b->buffer_view->buffer_view)
update = true;
@ -1661,7 +1661,7 @@ zink_make_texture_handle_resident(struct pipe_context *pctx, uint64_t handle, bo
rebind_bindless_bufferview(ctx, res, ds);
VkBufferView *bv = &ctx->di.bindless[0].buffer_infos[handle];
*bv = ds->bufferview->buffer_view;
zink_fake_buffer_barrier(res, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
zink_resource_buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
} else {
VkDescriptorImageInfo *ii = &ctx->di.bindless[0].img_infos[handle];
ii->sampler = bd->sampler->sampler;
@ -1780,7 +1780,7 @@ zink_make_image_handle_resident(struct pipe_context *pctx, uint64_t handle, unsi
rebind_bindless_bufferview(ctx, res, ds);
VkBufferView *bv = &ctx->di.bindless[1].buffer_infos[handle];
*bv = ds->bufferview->buffer_view;
zink_fake_buffer_barrier(res, access, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
zink_resource_buffer_barrier(ctx, res, access, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
} else {
VkDescriptorImageInfo *ii = &ctx->di.bindless[1].img_infos[handle];
ii->sampler = VK_NULL_HANDLE;
@ -2967,13 +2967,6 @@ zink_resource_buffer_needs_barrier(struct zink_resource *res, VkAccessFlags flag
(res->obj->access & flags) != flags;
}
void
zink_fake_buffer_barrier(struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline)
{
res->obj->access = flags;
res->obj->access_stage = pipeline;
}
void
zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline)
{

View file

@ -397,8 +397,6 @@ zink_resource_access_is_write(VkAccessFlags flags);
void
zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline);
void
zink_fake_buffer_barrier(struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline);
bool
zink_resource_image_needs_barrier(struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline);
bool