zink: delete zink_batch

this makes the code a bit more ergonomic

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29108>
This commit is contained in:
Mike Blumenkrantz 2024-05-08 12:59:35 -04:00 committed by Marge Bot
parent 40f595b30c
commit ac07fefdda
15 changed files with 187 additions and 192 deletions

View file

@ -473,7 +473,7 @@ get_batch_state(struct zink_context *ctx)
if (bs) {
zink_reset_batch_state(ctx, bs);
} else {
if (!ctx->batch.bs) {
if (!ctx->bs) {
/* this is batch init, so create a few more states for later use */
for (int i = 0; i < 3; i++) {
struct zink_batch_state *state = create_batch_state(ctx);
@ -494,17 +494,17 @@ get_batch_state(struct zink_context *ctx)
void
zink_reset_batch(struct zink_context *ctx)
{
ctx->batch.bs = get_batch_state(ctx);
assert(ctx->batch.bs);
ctx->bs = get_batch_state(ctx);
assert(ctx->bs);
ctx->batch.bs->has_work = false;
ctx->bs->has_work = false;
}
void
zink_batch_bind_db(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
unsigned count = 1;
VkDescriptorBufferBindingInfoEXT infos[2] = {0};
infos[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT;
@ -530,7 +530,7 @@ zink_start_batch(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
zink_reset_batch(ctx);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
bs->usage.unflushed = true;
@ -583,9 +583,9 @@ zink_start_batch(struct zink_context *ctx)
zink_batch_bind_db(ctx);
/* zero init for unordered blits */
if (screen->info.have_EXT_attachment_feedback_loop_dynamic_state) {
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.bs->cmdbuf, 0);
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.bs->reordered_cmdbuf, 0);
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.bs->unsynchronized_cmdbuf, 0);
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->bs->cmdbuf, 0);
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->bs->reordered_cmdbuf, 0);
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->bs->unsynchronized_cmdbuf, 0);
}
}
@ -812,7 +812,7 @@ zink_end_batch(struct zink_context *ctx)
ctx->oom_flush = true;
}
bs = ctx->batch.bs;
bs = ctx->bs;
if (ctx->last_batch_state)
ctx->last_batch_state->next = bs;
else {
@ -879,7 +879,7 @@ zink_end_batch(struct zink_context *ctx)
for (; res; res = zink_resource(res->base.b.next)) {
VkSemaphore sem = zink_create_exportable_semaphore(screen);
if (sem)
util_dynarray_append(&ctx->batch.bs->signal_semaphores, VkSemaphore, sem);
util_dynarray_append(&ctx->bs->signal_semaphores, VkSemaphore, sem);
}
}
@ -939,18 +939,18 @@ void
zink_batch_reference_resource_rw(struct zink_context *ctx, struct zink_resource *res, bool write)
{
/* if the resource already has usage of any sort set for this batch, */
if (!zink_resource_usage_matches(res, ctx->batch.bs) ||
if (!zink_resource_usage_matches(res, ctx->bs) ||
/* or if it's bound somewhere */
!zink_resource_has_binds(res))
/* then it already has a batch ref and doesn't need one here */
zink_batch_reference_resource(ctx, res);
zink_batch_resource_usage_set(ctx->batch.bs, res, write, res->obj->is_buffer);
zink_batch_resource_usage_set(ctx->bs, res, write, res->obj->is_buffer);
}
void
zink_batch_add_wait_semaphore(struct zink_context *ctx, VkSemaphore sem)
{
util_dynarray_append(&ctx->batch.bs->acquires, VkSemaphore, sem);
util_dynarray_append(&ctx->bs->acquires, VkSemaphore, sem);
}
static bool
@ -965,7 +965,7 @@ batch_ptr_add_usage(struct zink_context *ctx, struct set *s, void *ptr)
ALWAYS_INLINE static void
check_oom_flush(struct zink_context *ctx)
{
const VkDeviceSize resource_size = ctx->batch.bs->resource_size;
const VkDeviceSize resource_size = ctx->bs->resource_size;
if (resource_size >= zink_screen(ctx->base.screen)->clamp_video_mem) {
ctx->oom_flush = true;
ctx->oom_stall = true;
@ -984,7 +984,7 @@ zink_batch_reference_resource(struct zink_context *ctx, struct zink_resource *re
bool
zink_batch_reference_resource_move(struct zink_context *ctx, struct zink_resource *res)
{
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
simple_mtx_lock(&bs->ref_lock);
/* swapchains are special */
@ -1068,7 +1068,7 @@ void
zink_batch_reference_program(struct zink_context *ctx,
struct zink_program *pg)
{
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
if (zink_batch_usage_matches(pg->batch_uses, bs) ||
!batch_ptr_add_usage(ctx, &bs->programs, pg))
return;
@ -1117,7 +1117,7 @@ batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u, bool tryw
if (!zink_batch_usage_exists(u))
return;
if (zink_batch_usage_is_unflushed(u)) {
if (likely(u == &ctx->batch.bs->usage))
if (likely(u == &ctx->bs->usage))
ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
else { //multi-context
mtx_lock(&u->mtx);

View file

@ -72,9 +72,9 @@ blit_resolve(struct zink_context *ctx, const struct pipe_blit_info *info, bool *
zink_resource_setup_transfer_layouts(ctx, use_src, dst);
VkCommandBuffer cmdbuf = *needs_present_readback ?
ctx->batch.bs->cmdbuf :
ctx->bs->cmdbuf :
zink_get_cmdbuf(ctx, src, dst);
if (cmdbuf == ctx->batch.bs->cmdbuf)
if (cmdbuf == ctx->bs->cmdbuf)
zink_flush_dgc_if_enabled(ctx);
zink_batch_reference_resource_rw(ctx, use_src, false);
zink_batch_reference_resource_rw(ctx, dst, true);
@ -278,9 +278,9 @@ blit_native(struct zink_context *ctx, const struct pipe_blit_info *info, bool *n
zink_resource_setup_transfer_layouts(ctx, use_src, dst);
VkCommandBuffer cmdbuf = *needs_present_readback ?
ctx->batch.bs->cmdbuf :
ctx->bs->cmdbuf :
zink_get_cmdbuf(ctx, src, dst);
if (cmdbuf == ctx->batch.bs->cmdbuf)
if (cmdbuf == ctx->bs->cmdbuf)
zink_flush_dgc_if_enabled(ctx);
zink_batch_reference_resource_rw(ctx, use_src, false);
zink_batch_reference_resource_rw(ctx, dst, true);
@ -421,8 +421,8 @@ zink_blit(struct pipe_context *pctx,
ctx->unordered_blitting = !(info->render_condition_enable && ctx->render_condition_active) &&
zink_screen(ctx->base.screen)->info.have_KHR_dynamic_rendering &&
!needs_present_readback &&
zink_get_cmdbuf(ctx, src, dst) == ctx->batch.bs->reordered_cmdbuf;
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
zink_get_cmdbuf(ctx, src, dst) == ctx->bs->reordered_cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
VkPipeline pipeline = ctx->gfx_pipeline_state.pipeline;
bool in_rp = ctx->in_rp;
uint64_t tc_data = ctx->dynamic_fb.tc_info.data;
@ -432,11 +432,11 @@ zink_blit(struct pipe_context *pctx,
bool rp_tc_info_updated = ctx->rp_tc_info_updated;
if (ctx->unordered_blitting) {
/* for unordered blit, swap the unordered cmdbuf for the main one for the whole op to avoid conditional hell */
ctx->batch.bs->cmdbuf = ctx->batch.bs->reordered_cmdbuf;
ctx->bs->cmdbuf = ctx->bs->reordered_cmdbuf;
ctx->in_rp = false;
ctx->rp_changed = true;
ctx->queries_disabled = true;
ctx->batch.bs->has_barriers = true;
ctx->bs->has_barriers = true;
ctx->pipeline_changed[0] = true;
zink_reset_ds3_states(ctx);
zink_select_draw_vbo(ctx);
@ -484,7 +484,7 @@ zink_blit(struct pipe_context *pctx,
ctx->rp_tc_info_updated |= rp_tc_info_updated;
ctx->queries_disabled = queries_disabled;
ctx->dynamic_fb.tc_info.data = tc_data;
ctx->batch.bs->cmdbuf = cmdbuf;
ctx->bs->cmdbuf = cmdbuf;
ctx->gfx_pipeline_state.pipeline = pipeline;
ctx->pipeline_changed[0] = true;
ctx->ds3_states = ds3_states;

View file

@ -751,7 +751,7 @@ static void
track_freed_sparse_bo(struct zink_context *ctx, struct zink_sparse_backing *backing)
{
pipe_reference(NULL, &backing->bo->base.base.reference);
util_dynarray_append(&ctx->batch.bs->freed_sparse_backing_bos, struct zink_bo*, backing->bo);
util_dynarray_append(&ctx->bs->freed_sparse_backing_bos, struct zink_bo*, backing->bo);
}
static VkSemaphore

View file

@ -107,7 +107,7 @@ clear_in_rp(struct pipe_context *pctx,
cr.baseArrayLayer = 0;
cr.layerCount = util_framebuffer_get_num_layers(fb);
assert(ctx->in_rp);
VKCTX(CmdClearAttachments)(ctx->batch.bs->cmdbuf, num_attachments, attachments, 1, &cr);
VKCTX(CmdClearAttachments)(ctx->bs->cmdbuf, num_attachments, attachments, 1, &cr);
/*
Rendering within a subpass containing a feedback loop creates a data race, except in the following
cases:
@ -473,7 +473,7 @@ zink_clear_texture_dynamic(struct pipe_context *pctx,
zink_blit_barriers(ctx, NULL, res, full_clear);
VkCommandBuffer cmdbuf = zink_get_cmdbuf(ctx, NULL, res);
if (cmdbuf == ctx->batch.bs->cmdbuf && ctx->in_rp)
if (cmdbuf == ctx->bs->cmdbuf && ctx->in_rp)
zink_batch_no_rp(ctx);
if (res->aspect & VK_IMAGE_ASPECT_COLOR_BIT) {
@ -700,24 +700,24 @@ fb_clears_apply_internal(struct zink_context *ctx, struct pipe_resource *pres, i
else {
struct zink_resource *res = zink_resource(pres);
bool queries_disabled = ctx->queries_disabled;
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
/* slightly different than the u_blitter handling:
* this can be called recursively while unordered_blitting=true
*/
bool can_reorder = zink_screen(ctx->base.screen)->info.have_KHR_dynamic_rendering &&
!ctx->render_condition_active &&
!ctx->unordered_blitting &&
zink_get_cmdbuf(ctx, NULL, res) == ctx->batch.bs->reordered_cmdbuf;
zink_get_cmdbuf(ctx, NULL, res) == ctx->bs->reordered_cmdbuf;
if (can_reorder) {
/* set unordered_blitting but NOT blitting:
* let begin_rendering handle layouts
*/
ctx->unordered_blitting = true;
/* for unordered clears, swap the unordered cmdbuf for the main one for the whole op to avoid conditional hell */
ctx->batch.bs->cmdbuf = ctx->batch.bs->reordered_cmdbuf;
ctx->bs->cmdbuf = ctx->bs->reordered_cmdbuf;
ctx->rp_changed = true;
ctx->queries_disabled = true;
ctx->batch.bs->has_barriers = true;
ctx->bs->has_barriers = true;
}
/* this will automatically trigger all the clears */
zink_batch_rp(ctx);
@ -726,7 +726,7 @@ fb_clears_apply_internal(struct zink_context *ctx, struct pipe_resource *pres, i
ctx->unordered_blitting = false;
ctx->rp_changed = true;
ctx->queries_disabled = queries_disabled;
ctx->batch.bs->cmdbuf = cmdbuf;
ctx->bs->cmdbuf = cmdbuf;
}
}
zink_fb_clear_reset(ctx, i);

View file

@ -125,7 +125,7 @@ zink_context_destroy(struct pipe_context *pctx)
if (util_queue_is_initialized(&screen->flush_queue))
util_queue_finish(&screen->flush_queue);
if (ctx->batch.bs && !screen->device_lost) {
if (ctx->bs && !screen->device_lost) {
simple_mtx_lock(&screen->queue_lock);
VkResult result = VKSCR(QueueWaitIdle)(screen->queue);
simple_mtx_unlock(&screen->queue_lock);
@ -201,12 +201,12 @@ zink_context_destroy(struct pipe_context *pctx)
}
while (screen->last_free_batch_state && screen->last_free_batch_state->next)
screen->last_free_batch_state = screen->last_free_batch_state->next;
if (ctx->batch.bs) {
zink_clear_batch_state(ctx, ctx->batch.bs);
if (ctx->bs) {
zink_clear_batch_state(ctx, ctx->bs);
if (screen->free_batch_states)
screen->last_free_batch_state->next = ctx->batch.bs;
screen->last_free_batch_state->next = ctx->bs;
else {
screen->free_batch_states = ctx->batch.bs;
screen->free_batch_states = ctx->bs;
screen->last_free_batch_state = screen->free_batch_states;
}
}
@ -929,7 +929,7 @@ zink_delete_sampler_state(struct pipe_context *pctx,
void *sampler_state)
{
struct zink_sampler_state *sampler = sampler_state;
struct zink_batch_state *bs = zink_context(pctx)->batch.bs;
struct zink_batch_state *bs = zink_context(pctx)->bs;
/* may be called if context_create fails */
if (bs) {
util_dynarray_append(&bs->zombie_samplers, VkSampler,
@ -1374,7 +1374,7 @@ zink_set_vertex_buffers(struct pipe_context *pctx,
/* always barrier before possible rebind */
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
res->obj->unordered_read = false;
} else {
enabled_buffers &= ~BITFIELD_BIT(i);
@ -1531,7 +1531,7 @@ zink_set_constant_buffer(struct pipe_context *pctx,
}
zink_screen(ctx->base.screen)->buffer_barrier(ctx, new_res, VK_ACCESS_UNIFORM_READ_BIT,
new_res->gfx_barrier);
zink_batch_resource_usage_set(ctx->batch.bs, new_res, false, true);
zink_batch_resource_usage_set(ctx->bs, new_res, false, true);
if (!ctx->unordered_blitting)
new_res->obj->unordered_read = false;
}
@ -1652,7 +1652,7 @@ zink_set_shader_buffers(struct pipe_context *pctx,
ssbo->buffer_offset + ssbo->buffer_size);
zink_screen(ctx->base.screen)->buffer_barrier(ctx, new_res, access,
new_res->gfx_barrier);
zink_batch_resource_usage_set(ctx->batch.bs, new_res, access & VK_ACCESS_SHADER_WRITE_BIT, true);
zink_batch_resource_usage_set(ctx->bs, new_res, access & VK_ACCESS_SHADER_WRITE_BIT, true);
update = true;
max_slot = MAX2(max_slot, slot);
update_descriptor_state_ssbo(ctx, p_stage, slot, new_res);
@ -1947,14 +1947,14 @@ zink_set_shader_images(struct pipe_context *pctx,
if (b->resource->target == PIPE_BUFFER) {
screen->buffer_barrier(ctx, res, access,
res->gfx_barrier);
zink_batch_resource_usage_set(ctx->batch.bs, res,
zink_batch_resource_usage_set(ctx->bs, res,
zink_resource_access_is_write(access), true);
if (zink_resource_access_is_write(access))
res->obj->unordered_write = false;
res->obj->unordered_read = false;
} else {
finalize_image_bind(ctx, res, is_compute);
zink_batch_resource_usage_set(ctx->batch.bs, res,
zink_batch_resource_usage_set(ctx->bs, res,
zink_resource_access_is_write(access), false);
}
memcpy(&a->base, images + i, sizeof(struct pipe_image_view));
@ -1991,7 +1991,7 @@ update_feedback_loop_dynamic_state(struct zink_context *ctx)
aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
if (ctx->feedback_loops & BITFIELD_BIT(PIPE_MAX_COLOR_BUFS))
aspects |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.bs->cmdbuf, aspects);
VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->bs->cmdbuf, aspects);
}
static void
@ -2106,7 +2106,7 @@ zink_set_sampler_views(struct pipe_context *pctx,
update = true;
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT,
res->gfx_barrier);
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
if (!ctx->unordered_blitting)
res->obj->unordered_read = false;
} else {
@ -2134,7 +2134,7 @@ zink_set_sampler_views(struct pipe_context *pctx,
}
if (!a)
update = true;
zink_batch_resource_usage_set(ctx->batch.bs, res, false, false);
zink_batch_resource_usage_set(ctx->bs, res, false, false);
if (b->zs_view) {
assert(start_slot + i < 32); //bitfield size
ctx->di.zs_swizzle[shader_type].mask |= BITFIELD_BIT(start_slot + i);
@ -2233,7 +2233,7 @@ zink_delete_texture_handle(struct pipe_context *pctx, uint64_t handle)
struct zink_descriptor_surface *ds = &bd->ds;
_mesa_hash_table_remove(&ctx->di.bindless[is_buffer].tex_handles, he);
uint32_t h = handle;
util_dynarray_append(&ctx->batch.bs->bindless_releases[0], uint32_t, h);
util_dynarray_append(&ctx->bs->bindless_releases[0], uint32_t, h);
if (ds->is_buffer) {
if (zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_DB) {
@ -2357,7 +2357,7 @@ zink_make_texture_handle_resident(struct pipe_context *pctx, uint64_t handle, bo
*bv = ds->bufferview->buffer_view;
}
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
res->obj->unordered_read = false;
} else {
VkDescriptorImageInfo *ii = &ctx->di.bindless[0].img_infos[handle];
@ -2375,7 +2375,7 @@ zink_make_texture_handle_resident(struct pipe_context *pctx, uint64_t handle, bo
// TODO: figure out a way to link up layouts between unordered and main cmdbuf
res->obj->unordered_write = false;
}
zink_batch_resource_usage_set(ctx->batch.bs, res, false, false);
zink_batch_resource_usage_set(ctx->bs, res, false, false);
res->obj->unordered_write = false;
}
res->gfx_barrier |= VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
@ -2440,7 +2440,7 @@ zink_delete_image_handle(struct pipe_context *pctx, uint64_t handle)
struct zink_descriptor_surface *ds = he->data;
_mesa_hash_table_remove(&ctx->di.bindless[is_buffer].img_handles, he);
uint32_t h = handle;
util_dynarray_append(&ctx->batch.bs->bindless_releases[1], uint32_t, h);
util_dynarray_append(&ctx->bs->bindless_releases[1], uint32_t, h);
if (ds->is_buffer) {
if (zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_DB) {
@ -2499,7 +2499,7 @@ zink_make_image_handle_resident(struct pipe_context *pctx, uint64_t handle, unsi
*bv = ds->bufferview->buffer_view;
}
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, access, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
zink_batch_resource_usage_set(ctx->batch.bs, res, zink_resource_access_is_write(access), true);
zink_batch_resource_usage_set(ctx->bs, res, zink_resource_access_is_write(access), true);
if (zink_resource_access_is_write(access))
res->obj->unordered_write = false;
res->obj->unordered_read = false;
@ -2510,7 +2510,7 @@ zink_make_image_handle_resident(struct pipe_context *pctx, uint64_t handle, unsi
ii->imageLayout = VK_IMAGE_LAYOUT_GENERAL;
finalize_image_bind(ctx, res, false);
finalize_image_bind(ctx, res, true);
zink_batch_resource_usage_set(ctx->batch.bs, res, zink_resource_access_is_write(access), false);
zink_batch_resource_usage_set(ctx->bs, res, zink_resource_access_is_write(access), false);
res->obj->unordered_write = false;
}
res->gfx_barrier |= VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
@ -2558,7 +2558,7 @@ zink_set_global_binding(struct pipe_context *pctx,
memcpy(&addr, handles[i], sizeof(addr));
addr += zink_resource_get_address(zink_screen(pctx->screen), res);
memcpy(handles[i], &addr, sizeof(addr));
zink_resource_usage_set(res, ctx->batch.bs, true);
zink_resource_usage_set(res, ctx->bs, true);
res->obj->unordered_read = res->obj->unordered_write = false;
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
} else if (globals[i]) {
@ -2600,7 +2600,7 @@ zink_set_patch_vertices(struct pipe_context *pctx, uint8_t patch_vertices)
if (zink_set_tcs_key_patches(ctx, patch_vertices)) {
ctx->gfx_pipeline_state.dyn_state2.vertices_per_patch = patch_vertices;
if (zink_screen(ctx->base.screen)->info.dynamic_state2_feats.extendedDynamicState2PatchControlPoints)
VKCTX(CmdSetPatchControlPointsEXT)(ctx->batch.bs->cmdbuf, patch_vertices);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->bs->cmdbuf, patch_vertices);
else
ctx->gfx_pipeline_state.dirty = true;
zink_flush_dgc_if_enabled(ctx);
@ -2954,7 +2954,7 @@ begin_rendering(struct zink_context *ctx)
if (zink_screen(ctx->base.screen)->info.have_EXT_multisampled_render_to_single_sampled)
ctx->dynamic_fb.info.pNext = ctx->transient_attachments ? &msrtss : NULL;
assert(!ctx->transient_attachments || msrtss.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT);
VKCTX(CmdBeginRendering)(ctx->batch.bs->cmdbuf, &ctx->dynamic_fb.info);
VKCTX(CmdBeginRendering)(ctx->bs->cmdbuf, &ctx->dynamic_fb.info);
ctx->in_rp = true;
return clear_buffers;
}
@ -2966,7 +2966,7 @@ update_layered_rendering_state(struct zink_context *ctx)
return;
unsigned framebffer_is_layered = zink_framebuffer_get_num_layers(&ctx->fb_state) > 1;
VKCTX(CmdPushConstants)(
ctx->batch.bs->cmdbuf,
ctx->bs->cmdbuf,
zink_screen(ctx->base.screen)->gfx_push_constant_layout,
VK_SHADER_STAGE_ALL_GRAPHICS,
offsetof(struct zink_gfx_push_constant, framebuffer_is_layered), sizeof(unsigned),
@ -3064,7 +3064,7 @@ zink_batch_no_rp_safe(struct zink_context *ctx)
if (ctx->gfx_pipeline_state.render_pass)
zink_end_render_pass(ctx);
else {
VKCTX(CmdEndRendering)(ctx->batch.bs->cmdbuf);
VKCTX(CmdEndRendering)(ctx->bs->cmdbuf);
ctx->in_rp = false;
}
assert(!ctx->in_rp);
@ -3104,7 +3104,7 @@ zink_prep_fb_attachment(struct zink_context *ctx, struct zink_surface *surf, uns
res = zink_resource(surf->base.texture);
} else {
res = zink_resource(surf->base.texture);
zink_batch_resource_usage_set(ctx->batch.bs, res, true, false);
zink_batch_resource_usage_set(ctx->bs, res, true, false);
}
VkAccessFlags access;
@ -3298,7 +3298,7 @@ update_resource_refs_for_stage(struct zink_context *ctx, gl_shader_stage stage)
/* technically this is a failure condition, but there's no safe way out */
continue;
}
zink_batch_resource_usage_set(ctx->batch.bs, res, is_write, is_buffer);
zink_batch_resource_usage_set(ctx->bs, res, is_write, is_buffer);
if (!ctx->unordered_blitting) {
if (is_write || !res->obj->is_buffer)
res->obj->unordered_read = res->obj->unordered_write = false;
@ -3325,7 +3325,7 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
for (unsigned i = 0; i < last_vbo + 1; i++) {
struct zink_resource *res = zink_resource(ctx->vertex_buffers[i].buffer.resource);
if (res) {
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
if (!ctx->unordered_blitting)
res->obj->unordered_read = false;
}
@ -3338,7 +3338,7 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
for (unsigned i = 0; i < 2; i++) {
util_dynarray_foreach(&ctx->di.bindless[i].resident, struct zink_bindless_descriptor*, bd) {
struct zink_resource *res = zink_descriptor_surface_resource(&(*bd)->ds);
zink_batch_resource_usage_set(ctx->batch.bs, res, (*bd)->access & PIPE_IMAGE_ACCESS_WRITE, res->obj->is_buffer);
zink_batch_resource_usage_set(ctx->bs, res, (*bd)->access & PIPE_IMAGE_ACCESS_WRITE, res->obj->is_buffer);
if (!ctx->unordered_blitting) {
if ((*bd)->access & PIPE_IMAGE_ACCESS_WRITE || !res->obj->is_buffer)
res->obj->unordered_read = res->obj->unordered_write = false;
@ -3355,7 +3355,7 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
struct zink_resource *res = globals[i];
if (!res)
continue;
zink_batch_resource_usage_set(ctx->batch.bs, res, true, true);
zink_batch_resource_usage_set(ctx->bs, res, true, true);
res->obj->unordered_read = res->obj->unordered_write = false;
}
}
@ -3368,11 +3368,11 @@ reapply_color_write(struct zink_context *ctx)
const VkBool32 enables[PIPE_MAX_COLOR_BUFS] = {1, 1, 1, 1, 1, 1, 1, 1};
const VkBool32 disables[PIPE_MAX_COLOR_BUFS] = {0};
const unsigned max_att = MIN2(PIPE_MAX_COLOR_BUFS, screen->info.props.limits.maxColorAttachments);
VKCTX(CmdSetColorWriteEnableEXT)(ctx->batch.bs->cmdbuf, max_att, ctx->disable_color_writes ? disables : enables);
VKCTX(CmdSetColorWriteEnableEXT)(ctx->batch.bs->reordered_cmdbuf, max_att, enables);
VKCTX(CmdSetColorWriteEnableEXT)(ctx->bs->cmdbuf, max_att, ctx->disable_color_writes ? disables : enables);
VKCTX(CmdSetColorWriteEnableEXT)(ctx->bs->reordered_cmdbuf, max_att, enables);
assert(screen->info.have_EXT_extended_dynamic_state);
if (ctx->dsa_state)
VKCTX(CmdSetDepthWriteEnable)(ctx->batch.bs->cmdbuf, ctx->disable_color_writes ? VK_FALSE : ctx->dsa_state->hw_state.depth_write);
VKCTX(CmdSetDepthWriteEnable)(ctx->bs->cmdbuf, ctx->disable_color_writes ? VK_FALSE : ctx->dsa_state->hw_state.depth_write);
}
static void
@ -3417,9 +3417,9 @@ flush_batch(struct zink_context *ctx, bool sync)
ctx->deferred_fence = NULL;
if (sync)
sync_flush(ctx, ctx->batch.bs);
sync_flush(ctx, ctx->bs);
if (ctx->batch.bs->is_device_lost) {
if (ctx->bs->is_device_lost) {
check_device_lost(ctx);
} else {
struct zink_screen *screen = zink_screen(ctx->base.screen);
@ -3440,8 +3440,8 @@ flush_batch(struct zink_context *ctx, bool sync)
ctx->di.bindless_refs_dirty = true;
ctx->sample_locations_changed = ctx->gfx_pipeline_state.sample_locations_enabled;
if (zink_screen(ctx->base.screen)->info.dynamic_state2_feats.extendedDynamicState2PatchControlPoints) {
VKCTX(CmdSetPatchControlPointsEXT)(ctx->batch.bs->cmdbuf, ctx->gfx_pipeline_state.dyn_state2.vertices_per_patch);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->batch.bs->reordered_cmdbuf, 1);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->bs->cmdbuf, ctx->gfx_pipeline_state.dyn_state2.vertices_per_patch);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->bs->reordered_cmdbuf, 1);
}
update_feedback_loop_dynamic_state(ctx);
if (screen->info.have_EXT_color_write_enable)
@ -3891,9 +3891,9 @@ zink_flush(struct pipe_context *pctx,
};
VkResult result = VKSCR(CreateSemaphore)(screen->dev, &sci, NULL, &export_sem);
if (zink_screen_handle_vkresult(screen, result)) {
assert(!ctx->batch.bs->signal_semaphore);
ctx->batch.bs->signal_semaphore = export_sem;
ctx->batch.bs->has_work = true;
assert(!ctx->bs->signal_semaphore);
ctx->bs->signal_semaphore = export_sem;
ctx->bs->has_work = true;
} else {
mesa_loge("ZINK: vkCreateSemaphore failed (%s)", vk_Result_to_str(result));
@ -3902,7 +3902,7 @@ zink_flush(struct pipe_context *pctx,
}
}
if (!ctx->batch.bs->has_work) {
if (!ctx->bs->has_work) {
if (pfence) {
/* reuse last fence */
bs = ctx->last_batch_state;
@ -3918,7 +3918,7 @@ zink_flush(struct pipe_context *pctx,
if (ctx->tc && !ctx->track_renderpasses)
tc_driver_internal_flush_notify(ctx->tc);
} else {
bs = ctx->batch.bs;
bs = ctx->bs;
if (deferred && !(flags & PIPE_FLUSH_FENCE_FD) && pfence)
deferred_fence = true;
else
@ -3947,7 +3947,7 @@ zink_flush(struct pipe_context *pctx,
}
if (export_sem) {
pipe_reference(NULL, &mfence->reference);
util_dynarray_append(&ctx->batch.bs->fences, struct zink_tc_fence*, mfence);
util_dynarray_append(&ctx->bs->fences, struct zink_tc_fence*, mfence);
}
if (deferred_fence) {
@ -3973,7 +3973,7 @@ zink_fence_wait(struct pipe_context *pctx)
{
struct zink_context *ctx = zink_context(pctx);
if (ctx->batch.bs->has_work)
if (ctx->bs->has_work)
pctx->flush(pctx, NULL, PIPE_FLUSH_HINT_FINISH);
if (ctx->last_batch_state)
stall(ctx);
@ -3998,7 +3998,7 @@ zink_wait_on_batch(struct zink_context *ctx, uint64_t batch_id)
bool
zink_check_batch_completion(struct zink_context *ctx, uint64_t batch_id)
{
assert(ctx->batch.bs);
assert(ctx->bs);
if (!batch_id)
/* not submitted yet */
return false;
@ -4054,14 +4054,14 @@ zink_texture_barrier(struct pipe_context *pctx, unsigned flags)
dmb.dstStageMask |= depth_flags;
}
*/
VKCTX(CmdPipelineBarrier2)(ctx->batch.bs->cmdbuf, &dep);
VKCTX(CmdPipelineBarrier2)(ctx->bs->cmdbuf, &dep);
} else {
VkMemoryBarrier bmb = {0};
bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
bmb.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
bmb.dstAccessMask = dst;
VKCTX(CmdPipelineBarrier)(
ctx->batch.bs->cmdbuf,
ctx->bs->cmdbuf,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0,
@ -4081,7 +4081,7 @@ mem_barrier(struct zink_context *ctx, VkPipelineStageFlags src_stage, VkPipeline
mb.srcAccessMask = src;
mb.dstAccessMask = dst;
zink_batch_no_rp(ctx);
VKCTX(CmdPipelineBarrier)(ctx->batch.bs->cmdbuf, src_stage, dst_stage, 0, 1, &mb, 0, NULL, 0, NULL);
VKCTX(CmdPipelineBarrier)(ctx->bs->cmdbuf, src_stage, dst_stage, 0, 1, &mb, 0, NULL, 0, NULL);
}
void
@ -4495,7 +4495,7 @@ rebind_buffer(struct zink_context *ctx, struct zink_resource *res, uint32_t rebi
}
end:
if (num_rebinds)
zink_batch_resource_usage_set(ctx->batch.bs, res, has_write, true);
zink_batch_resource_usage_set(ctx->bs, res, has_write, true);
return num_rebinds;
}
@ -4516,8 +4516,8 @@ zink_copy_buffer(struct zink_context *ctx, struct zink_resource *dst, struct zin
zink_screen(ctx->base.screen)->buffer_barrier(ctx, src, VK_ACCESS_TRANSFER_READ_BIT, 0);
bool unordered_dst = zink_resource_buffer_transfer_dst_barrier(ctx, dst, dst_offset, size);
bool can_unorder = unordered_dst && unordered_src && !ctx->no_reorder;
VkCommandBuffer cmdbuf = can_unorder ? ctx->batch.bs->reordered_cmdbuf : zink_get_cmdbuf(ctx, src, dst);
ctx->batch.bs->has_barriers |= can_unorder;
VkCommandBuffer cmdbuf = can_unorder ? ctx->bs->reordered_cmdbuf : zink_get_cmdbuf(ctx, src, dst);
ctx->bs->has_barriers |= can_unorder;
zink_batch_reference_resource_rw(ctx, src, false);
zink_batch_reference_resource_rw(ctx, dst, true);
if (unlikely(zink_debug & ZINK_DEBUG_SYNC)) {
@ -4613,15 +4613,15 @@ zink_copy_image_buffer(struct zink_context *ctx, struct zink_resource *dst, stru
region.imageExtent.height = src_box->height;
VkCommandBuffer cmdbuf = unsync ?
ctx->batch.bs->unsynchronized_cmdbuf :
ctx->bs->unsynchronized_cmdbuf :
/* never promote to unordered if swapchain was acquired */
needs_present_readback ?
ctx->batch.bs->cmdbuf :
ctx->bs->cmdbuf :
buf2img ? zink_get_cmdbuf(ctx, buf, use_img) : zink_get_cmdbuf(ctx, use_img, buf);
zink_batch_reference_resource_rw(ctx, use_img, buf2img);
zink_batch_reference_resource_rw(ctx, buf, !buf2img);
if (unsync) {
ctx->batch.bs->has_unsync = true;
ctx->bs->has_unsync = true;
use_img->obj->unsync_access = true;
}
@ -4854,7 +4854,7 @@ zink_resource_commit(struct pipe_context *pctx, struct pipe_resource *pres, unsi
if (sem) {
zink_batch_add_wait_semaphore(ctx, sem);
zink_batch_reference_resource_rw(ctx, res, true);
ctx->batch.bs->has_work = true;
ctx->bs->has_work = true;
}
} else {
check_device_lost(ctx);
@ -4920,22 +4920,22 @@ zink_rebind_all_buffers(struct zink_context *ctx)
for (unsigned slot = 0; slot < ctx->di.num_ubos[shader]; slot++) {
struct zink_resource *res = rebind_ubo(ctx, shader, slot);
if (res)
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
}
for (unsigned slot = 0; slot < ctx->di.num_sampler_views[shader]; slot++) {
struct zink_resource *res = rebind_tbo(ctx, shader, slot);
if (res)
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
}
for (unsigned slot = 0; slot < ctx->di.num_ssbos[shader]; slot++) {
struct zink_resource *res = rebind_ssbo(ctx, shader, slot);
if (res)
zink_batch_resource_usage_set(ctx->batch.bs, res, (ctx->writable_ssbos[shader] & BITFIELD64_BIT(slot)) != 0, true);
zink_batch_resource_usage_set(ctx->bs, res, (ctx->writable_ssbos[shader] & BITFIELD64_BIT(slot)) != 0, true);
}
for (unsigned slot = 0; slot < ctx->di.num_images[shader]; slot++) {
struct zink_resource *res = rebind_ibo(ctx, shader, slot);
if (res)
zink_batch_resource_usage_set(ctx->batch.bs, res, (ctx->image_views[shader][slot].base.access & PIPE_IMAGE_ACCESS_WRITE) != 0, true);
zink_batch_resource_usage_set(ctx->bs, res, (ctx->image_views[shader][slot].base.access & PIPE_IMAGE_ACCESS_WRITE) != 0, true);
}
}
}
@ -5041,7 +5041,7 @@ zink_emit_string_marker(struct pipe_context *pctx,
string,
{ 0 }
};
screen->vk.CmdInsertDebugUtilsLabelEXT(ctx->batch.bs->cmdbuf, &label);
screen->vk.CmdInsertDebugUtilsLabelEXT(ctx->bs->cmdbuf, &label);
free(temp);
}
@ -5131,7 +5131,7 @@ void
zink_flush_dgc(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
if (!ctx->dgc.valid)
return;
@ -5226,7 +5226,7 @@ zink_flush_dgc(struct zink_context *ctx)
VK_NULL_HANDLE,
0
};
VKCTX(CmdExecuteGeneratedCommandsNV)(ctx->batch.bs->cmdbuf, VK_FALSE, &gen);
VKCTX(CmdExecuteGeneratedCommandsNV)(ctx->bs->cmdbuf, VK_FALSE, &gen);
pipe_resource_reference(&pres, NULL);
}
@ -5543,7 +5543,7 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
}
zink_start_batch(ctx);
if (!ctx->batch.bs)
if (!ctx->bs)
goto fail;
if (screen->compact_descriptors)
@ -5560,8 +5560,8 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
* a tess shader later
*/
if (screen->info.dynamic_state2_feats.extendedDynamicState2PatchControlPoints) {
VKCTX(CmdSetPatchControlPointsEXT)(ctx->batch.bs->cmdbuf, 1);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->batch.bs->reordered_cmdbuf, 1);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->bs->cmdbuf, 1);
VKCTX(CmdSetPatchControlPointsEXT)(ctx->bs->reordered_cmdbuf, 1);
}
}
if (!is_copy_only) {
@ -5786,7 +5786,7 @@ zink_cmd_debug_marker_begin(struct zink_context *ctx, VkCommandBuffer cmdbuf, co
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
info.pLabelName = name;
VKCTX(CmdBeginDebugUtilsLabelEXT)(cmdbuf ? cmdbuf : ctx->batch.bs->cmdbuf, &info);
VKCTX(CmdBeginDebugUtilsLabelEXT)(cmdbuf ? cmdbuf : ctx->bs->cmdbuf, &info);
free(name);
return true;

View file

@ -1070,7 +1070,7 @@ static void
enlarge_db(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
/* ensure current db surives */
zink_batch_reference_resource(ctx, bs->dd.db);
/* rebinding a db mid-batch is extremely costly: if we start with a factor
@ -1085,7 +1085,7 @@ static void
update_separable(struct zink_context *ctx, struct zink_program *pg)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
unsigned use_buffer = 0;
VkDescriptorGetInfoEXT info;
@ -1157,7 +1157,7 @@ static void
zink_descriptors_update_masked_buffer(struct zink_context *ctx, bool is_compute, uint8_t changed_sets, uint8_t bind_sets)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
/* skip if no descriptors are updated */
@ -1233,7 +1233,7 @@ void
zink_descriptors_update_masked(struct zink_context *ctx, bool is_compute, uint8_t changed_sets, uint8_t bind_sets)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
VkDescriptorSet desc_sets[ZINK_DESCRIPTOR_BASE_TYPES];
@ -1247,7 +1247,7 @@ zink_descriptors_update_masked(struct zink_context *ctx, bool is_compute, uint8_
return;
}
/* no flushing allowed: sets are allocated to the batch, so this breaks everything */
assert(ctx->batch.bs == bs);
assert(ctx->bs == bs);
u_foreach_bit(type, changed_sets) {
assert(type + 1 < pg->num_dsl);
@ -1285,7 +1285,7 @@ zink_descriptors_update_masked(struct zink_context *ctx, bool is_compute, uint8_
static void
bind_bindless_db(struct zink_context *ctx, struct zink_program *pg)
{
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
struct zink_screen *screen = zink_screen(ctx->base.screen);
unsigned index = 1;
VkDeviceSize offset = 0;
@ -1306,7 +1306,7 @@ bind_bindless_db(struct zink_context *ctx, struct zink_program *pg)
void
zink_descriptors_update(struct zink_context *ctx, bool is_compute)
{
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
struct zink_screen *screen = zink_screen(ctx->base.screen);
bool have_KHR_push_descriptor = screen->info.have_KHR_push_descriptor;
@ -1454,7 +1454,7 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
if (zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_DB) {
bind_bindless_db(ctx, pg);
} else {
VKCTX(CmdBindDescriptorSets)(ctx->batch.bs->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
VKCTX(CmdBindDescriptorSets)(ctx->bs->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
pg->layout, screen->desc_set_id[ZINK_DESCRIPTOR_BINDLESS], 1, &ctx->dd.t.bindless_set,
0, NULL);
}

View file

@ -82,7 +82,7 @@ zink_emit_stream_output_targets(struct pipe_context *pctx)
t->base.buffer_offset + t->base.buffer_size);
}
VKCTX(CmdBindTransformFeedbackBuffersEXT)(ctx->batch.bs->cmdbuf, 0, ctx->num_so_targets,
VKCTX(CmdBindTransformFeedbackBuffersEXT)(ctx->bs->cmdbuf, 0, ctx->num_so_targets,
buffers, buffer_offsets,
buffer_sizes);
ctx->dirty_so_targets = false;
@ -167,16 +167,16 @@ zink_bind_vertex_buffers(struct zink_context *ctx)
DYNAMIC_STATE != ZINK_DYNAMIC_VERTEX_INPUT2 &&
DYNAMIC_STATE != ZINK_DYNAMIC_VERTEX_INPUT) {
if (elems->hw_state.num_bindings)
VKCTX(CmdBindVertexBuffers2)(ctx->batch.bs->cmdbuf, 0,
VKCTX(CmdBindVertexBuffers2)(ctx->bs->cmdbuf, 0,
elems->hw_state.num_bindings,
buffers, buffer_offsets, NULL, elems->hw_state.b.strides);
} else if (elems->hw_state.num_bindings)
VKSCR(CmdBindVertexBuffers)(ctx->batch.bs->cmdbuf, 0,
VKSCR(CmdBindVertexBuffers)(ctx->bs->cmdbuf, 0,
elems->hw_state.num_bindings,
buffers, buffer_offsets);
if (DYNAMIC_STATE == ZINK_DYNAMIC_VERTEX_INPUT2 || DYNAMIC_STATE == ZINK_DYNAMIC_VERTEX_INPUT)
VKCTX(CmdSetVertexInputEXT)(ctx->batch.bs->cmdbuf,
VKCTX(CmdSetVertexInputEXT)(ctx->bs->cmdbuf,
elems->hw_state.num_bindings, elems->hw_state.dynbindings,
elems->hw_state.num_attribs, elems->hw_state.dynattribs);
@ -186,7 +186,7 @@ zink_bind_vertex_buffers(struct zink_context *ctx)
ALWAYS_INLINE static void
update_drawid(struct zink_context *ctx, unsigned draw_id)
{
VKCTX(CmdPushConstants)(ctx->batch.bs->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_ALL_GRAPHICS,
VKCTX(CmdPushConstants)(ctx->bs->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_ALL_GRAPHICS,
offsetof(struct zink_gfx_push_constant, draw_id), sizeof(unsigned),
&draw_id);
}
@ -240,7 +240,7 @@ draw_indexed_need_index_buffer_unref(struct zink_context *ctx,
unsigned draw_id,
bool needs_drawid)
{
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
if (dinfo->increment_draw_id && needs_drawid) {
for (unsigned i = 0; i < num_draws; i++) {
update_drawid(ctx, draw_id);
@ -300,7 +300,7 @@ draw_indexed(struct zink_context *ctx,
unsigned draw_id,
bool needs_drawid)
{
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
if (dinfo->increment_draw_id && needs_drawid) {
for (unsigned i = 0; i < num_draws; i++) {
update_drawid(ctx, draw_id);
@ -366,7 +366,7 @@ draw(struct zink_context *ctx,
unsigned draw_id,
bool needs_drawid)
{
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
if (dinfo->increment_draw_id && needs_drawid) {
for (unsigned i = 0; i < num_draws; i++) {
update_drawid(ctx, draw_id);
@ -493,7 +493,7 @@ zink_draw(struct pipe_context *pctx,
struct zink_screen *screen = zink_screen(pctx->screen);
struct zink_rasterizer_state *rast_state = ctx->rast_state;
struct zink_depth_stencil_alpha_state *dsa_state = ctx->dsa_state;
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
struct zink_so_target *so_target =
dindirect && dindirect->count_from_stream_output ?
zink_so_target(dindirect->count_from_stream_output) : NULL;
@ -597,7 +597,7 @@ zink_draw(struct pipe_context *pctx,
mb.pNext = NULL;
mb.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT;
mb.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
VKSCR(CmdPipelineBarrier)(ctx->batch.bs->cmdbuf,
VKSCR(CmdPipelineBarrier)(ctx->bs->cmdbuf,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
0, 1, &mb, 0, NULL, 0, NULL);
@ -1143,7 +1143,7 @@ static void
zink_vertex_state_mask(struct zink_context *ctx, struct pipe_vertex_state *vstate, uint32_t partial_velem_mask)
{
struct zink_vertex_state *zstate = (struct zink_vertex_state *)vstate;
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
if (partial_velem_mask == vstate->input.full_velem_mask) {
VKCTX(CmdSetVertexInputEXT)(cmdbuf,
@ -1171,14 +1171,14 @@ static void
zink_bind_vertex_state(struct zink_context *ctx, struct pipe_vertex_state *vstate, uint32_t partial_velem_mask)
{
struct zink_vertex_state *zstate = (struct zink_vertex_state *)vstate;
VkCommandBuffer cmdbuf = ctx->batch.bs->cmdbuf;
VkCommandBuffer cmdbuf = ctx->bs->cmdbuf;
if (!vstate->input.vbuffer.buffer.resource)
return;
zink_vertex_state_mask<HAS_POPCNT>(ctx, vstate, partial_velem_mask);
struct zink_resource *res = zink_resource(vstate->input.vbuffer.buffer.resource);
zink_batch_resource_usage_set(ctx->batch.bs, res, false, true);
zink_batch_resource_usage_set(ctx->bs, res, false, true);
VkDeviceSize offset = vstate->input.vbuffer.buffer_offset;
if (unlikely(zink_debug & ZINK_DEBUG_DGC)) {
VkBindVertexBufferIndirectCommandNV *ptr;
@ -1231,7 +1231,7 @@ static void
zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
{
struct zink_context *ctx = zink_context(pctx);
struct zink_batch_state *bs = ctx->batch.bs;
struct zink_batch_state *bs = ctx->bs;
struct zink_screen *screen = zink_screen(pctx->screen);
if (ctx->render_condition_active)
@ -1259,7 +1259,7 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
mb.pNext = NULL;
mb.srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT;
mb.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
VKSCR(CmdPipelineBarrier)(ctx->batch.bs->cmdbuf,
VKSCR(CmdPipelineBarrier)(ctx->bs->cmdbuf,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
0, 1, &mb, 0, NULL, 0, NULL);

View file

@ -159,7 +159,7 @@ zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct
if (pctx && mfence->deferred_ctx == pctx) {
if (mfence->fence == ctx->deferred_fence) {
zink_context(pctx)->batch.bs->has_work = true;
zink_context(pctx)->bs->has_work = true;
/* this must be the current batch */
pctx->flush(pctx, NULL, !timeout_ns ? PIPE_FLUSH_ASYNC : 0);
if (!timeout_ns)
@ -236,10 +236,10 @@ zink_fence_server_signal(struct pipe_context *pctx, struct pipe_fence_handle *pf
struct zink_context *ctx = zink_context(pctx);
struct zink_tc_fence *mfence = (struct zink_tc_fence *)pfence;
assert(!ctx->batch.bs->signal_semaphore);
ctx->batch.bs->signal_semaphore = mfence->sem;
ctx->batch.bs->has_work = true;
struct zink_batch_state *bs = ctx->batch.bs;
assert(!ctx->bs->signal_semaphore);
ctx->bs->signal_semaphore = mfence->sem;
ctx->bs->has_work = true;
struct zink_batch_state *bs = ctx->bs;
/* this must produce a synchronous flush that completes before the function returns */
pctx->flush(pctx, NULL, 0);
if (zink_screen(ctx->base.screen)->threaded_submit)
@ -258,10 +258,10 @@ zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfen
mfence->deferred_ctx = pctx;
/* this will be applied on the next submit */
VkPipelineStageFlags flag = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
util_dynarray_append(&ctx->batch.bs->wait_semaphores, VkSemaphore, mfence->sem);
util_dynarray_append(&ctx->batch.bs->wait_semaphore_stages, VkPipelineStageFlags, flag);
util_dynarray_append(&ctx->bs->wait_semaphores, VkSemaphore, mfence->sem);
util_dynarray_append(&ctx->bs->wait_semaphore_stages, VkPipelineStageFlags, flag);
pipe_reference(NULL, &mfence->reference);
util_dynarray_append(&ctx->batch.bs->fences, struct zink_tc_fence*, mfence);
util_dynarray_append(&ctx->bs->fences, struct zink_tc_fence*, mfence);
/* transfer the external wait sempahore ownership to the next submit */
mfence->sem = VK_NULL_HANDLE;

View file

@ -665,7 +665,7 @@ zink_kopper_acquire(struct zink_context *ctx, struct zink_resource *res, uint64_
kill_swapchain(ctx, res);
}
bool is_kill = is_swapchain_kill(ret);
zink_batch_usage_set(&cdt->swapchain->batch_uses, ctx->batch.bs);
zink_batch_usage_set(&cdt->swapchain->batch_uses, ctx->bs);
return !is_kill;
}
@ -985,7 +985,7 @@ zink_kopper_acquire_readback(struct zink_context *ctx, struct zink_resource *res
res->base.b.width0 = ctx->swapchain_size.width;
res->base.b.height0 = ctx->swapchain_size.height;
}
zink_batch_usage_set(&cdt->swapchain->batch_uses, ctx->batch.bs);
zink_batch_usage_set(&cdt->swapchain->batch_uses, ctx->bs);
*readback = res;
return true;
}

View file

@ -127,7 +127,7 @@ begin_vk_query_indexed(struct zink_context *ctx, struct zink_vk_query *vkq, int
VkQueryControlFlags flags)
{
if (!vkq->started) {
VKCTX(CmdBeginQueryIndexedEXT)(ctx->batch.bs->cmdbuf,
VKCTX(CmdBeginQueryIndexedEXT)(ctx->bs->cmdbuf,
vkq->pool->query_pool,
vkq->query_id,
flags,
@ -140,7 +140,7 @@ static void
end_vk_query_indexed(struct zink_context *ctx, struct zink_vk_query *vkq, int index)
{
if (vkq->started) {
VKCTX(CmdEndQueryIndexedEXT)(ctx->batch.bs->cmdbuf,
VKCTX(CmdEndQueryIndexedEXT)(ctx->bs->cmdbuf,
vkq->pool->query_pool,
vkq->query_id, index);
vkq->started = false;
@ -151,8 +151,8 @@ static void
reset_vk_query_pool(struct zink_context *ctx, struct zink_vk_query *vkq)
{
if (vkq->needs_reset) {
VKCTX(CmdResetQueryPool)(ctx->batch.bs->reordered_cmdbuf, vkq->pool->query_pool, vkq->query_id, 1);
ctx->batch.bs->has_barriers = true;
VKCTX(CmdResetQueryPool)(ctx->bs->reordered_cmdbuf, vkq->pool->query_pool, vkq->query_id, 1);
ctx->bs->has_barriers = true;
}
vkq->needs_reset = false;
}
@ -387,7 +387,7 @@ unref_vk_pool(struct zink_context *ctx, struct zink_query_pool *pool)
{
if (!pool || --pool->refcount)
return;
util_dynarray_append(&ctx->batch.bs->dead_querypools, VkQueryPool, pool->query_pool);
util_dynarray_append(&ctx->bs->dead_querypools, VkQueryPool, pool->query_pool);
if (list_is_linked(&pool->list))
list_del(&pool->list);
FREE(pool);
@ -531,7 +531,7 @@ zink_create_query(struct pipe_context *pctx,
if (!qbo_append(pctx->screen, query))
goto fail;
ctx->batch.bs->has_work = true;
ctx->bs->has_work = true;
query->needs_reset = true;
query->predicate_dirty = true;
if (query->type == PIPE_QUERY_TIMESTAMP) {
@ -775,9 +775,9 @@ copy_pool_results_to_buffer(struct zink_context *ctx, struct zink_query *query,
util_range_add(&res->base.b, &res->valid_buffer_range, offset, offset + result_size);
assert(query_id < NUM_QUERIES);
res->obj->unordered_read = res->obj->unordered_write = false;
VKCTX(CmdCopyQueryPoolResults)(ctx->batch.bs->cmdbuf, pool, query_id, num_results, res->obj->buffer,
VKCTX(CmdCopyQueryPoolResults)(ctx->bs->cmdbuf, pool, query_id, num_results, res->obj->buffer,
offset, base_result_size, flags);
zink_cmd_debug_marker_end(ctx, ctx->batch.bs->cmdbuf, marker);
zink_cmd_debug_marker_end(ctx, ctx->bs->cmdbuf, marker);
}
static void
@ -898,15 +898,15 @@ begin_query(struct zink_context *ctx, struct zink_query *q)
reset_qbos(ctx, q);
reset_query_range(ctx, q);
q->active = true;
ctx->batch.bs->has_work = true;
ctx->bs->has_work = true;
struct zink_query_start *start = util_dynarray_top_ptr(&q->starts, struct zink_query_start);
if (q->type == PIPE_QUERY_TIME_ELAPSED) {
VKCTX(CmdWriteTimestamp)(ctx->batch.bs->cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, start->vkq[0]->pool->query_pool, start->vkq[0]->query_id);
VKCTX(CmdWriteTimestamp)(ctx->bs->cmdbuf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, start->vkq[0]->pool->query_pool, start->vkq[0]->query_id);
if (!ctx->in_rp)
update_qbo(ctx, q);
zink_batch_usage_set(&q->batch_uses, ctx->batch.bs);
_mesa_set_add(&ctx->batch.bs->active_queries, q);
zink_batch_usage_set(&q->batch_uses, ctx->bs);
_mesa_set_add(&ctx->bs->active_queries, q);
}
/* ignore the rest of begin_query for timestamps */
if (is_time_query(q))
@ -941,15 +941,15 @@ begin_query(struct zink_context *ctx, struct zink_query *q)
begin_vk_query_indexed(ctx, start->vkq[0], q->index, flags);
}
if (q->vkqtype != VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT && q->vkqtype != VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT)
VKCTX(CmdBeginQuery)(ctx->batch.bs->cmdbuf, start->vkq[0]->pool->query_pool, start->vkq[0]->query_id, flags);
VKCTX(CmdBeginQuery)(ctx->bs->cmdbuf, start->vkq[0]->pool->query_pool, start->vkq[0]->query_id, flags);
if (q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE && q->index == PIPE_STAT_QUERY_IA_VERTICES) {
assert(!ctx->vertices_query);
ctx->vertices_query = q;
}
if (needs_stats_list(q))
list_addtail(&q->stats_list, &ctx->primitives_generated_queries);
zink_batch_usage_set(&q->batch_uses, ctx->batch.bs);
_mesa_set_add(&ctx->batch.bs->active_queries, q);
zink_batch_usage_set(&q->batch_uses, ctx->bs);
_mesa_set_add(&ctx->bs->active_queries, q);
if (q->needs_rast_discard_workaround) {
ctx->primitives_generated_active = true;
if (zink_set_rasterizer_discard(ctx, true))
@ -994,7 +994,7 @@ static void
update_query_id(struct zink_context *ctx, struct zink_query *q)
{
query_pool_get_range(ctx, q);
ctx->batch.bs->has_work = true;
ctx->bs->has_work = true;
q->has_draws = false;
}
@ -1031,7 +1031,7 @@ end_query(struct zink_context *ctx, struct zink_query *q)
}
if (q->vkqtype != VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT &&
q->vkqtype != VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT && !is_time_query(q))
VKCTX(CmdEndQuery)(ctx->batch.bs->cmdbuf, start->vkq[0]->pool->query_pool, start->vkq[0]->query_id);
VKCTX(CmdEndQuery)(ctx->bs->cmdbuf, start->vkq[0]->pool->query_pool, start->vkq[0]->query_id);
if (q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
q->index == PIPE_STAT_QUERY_IA_VERTICES)
@ -1087,10 +1087,10 @@ zink_end_query(struct pipe_context *pctx,
reset_qbos(ctx, query);
reset_query_range(ctx, query);
struct zink_query_start *start = util_dynarray_top_ptr(&query->starts, struct zink_query_start);
VKCTX(CmdWriteTimestamp)(ctx->batch.bs->cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VKCTX(CmdWriteTimestamp)(ctx->bs->cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
start->vkq[0]->pool->query_pool, start->vkq[0]->query_id);
zink_batch_usage_set(&query->batch_uses, ctx->batch.bs);
_mesa_set_add(&ctx->batch.bs->active_queries, query);
zink_batch_usage_set(&query->batch_uses, ctx->bs);
_mesa_set_add(&ctx->bs->active_queries, query);
query->needs_update = true;
} else if (query->active) {
/* this should be a tc-optimized query end that doesn't split a renderpass */
@ -1162,7 +1162,7 @@ suspend_query(struct zink_context *ctx, struct zink_query *query)
static void
suspend_queries(struct zink_context *ctx, bool rp_only)
{
set_foreach(&ctx->batch.bs->active_queries, entry) {
set_foreach(&ctx->bs->active_queries, entry) {
struct zink_query *query = (void*)entry->key;
if (query->suspended || (rp_only && !query->started_in_rp))
continue;
@ -1300,7 +1300,7 @@ zink_start_conditional_render(struct zink_context *ctx)
begin_info.buffer = ctx->render_condition.query->predicate->obj->buffer;
begin_info.flags = begin_flags;
ctx->render_condition.query->predicate->obj->unordered_read = false;
VKCTX(CmdBeginConditionalRenderingEXT)(ctx->batch.bs->cmdbuf, &begin_info);
VKCTX(CmdBeginConditionalRenderingEXT)(ctx->bs->cmdbuf, &begin_info);
zink_batch_reference_resource_rw(ctx, ctx->render_condition.query->predicate, false);
ctx->render_condition.active = true;
}
@ -1312,7 +1312,7 @@ zink_stop_conditional_render(struct zink_context *ctx)
zink_clear_apply_conditionals(ctx);
if (unlikely(!zink_screen(ctx->base.screen)->info.have_EXT_conditional_rendering) || !ctx->render_condition.active)
return;
VKCTX(CmdEndConditionalRenderingEXT)(ctx->batch.bs->cmdbuf);
VKCTX(CmdEndConditionalRenderingEXT)(ctx->bs->cmdbuf);
ctx->render_condition.active = false;
}

View file

@ -756,7 +756,7 @@ begin_render_pass(struct zink_context *ctx)
#endif
rpbi.pNext = &infos;
VKCTX(CmdBeginRenderPass)(ctx->batch.bs->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
VKCTX(CmdBeginRenderPass)(ctx->bs->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
ctx->in_rp = true;
return clear_buffers;
}
@ -837,7 +837,7 @@ void
zink_end_render_pass(struct zink_context *ctx)
{
if (ctx->in_rp) {
VKCTX(CmdEndRenderPass)(ctx->batch.bs->cmdbuf);
VKCTX(CmdEndRenderPass)(ctx->bs->cmdbuf);
for (unsigned i = 0; i < ctx->fb_state.nr_cbufs; i++) {
struct zink_ctx_surface *csurf = (struct zink_ctx_surface*)ctx->fb_state.cbufs[i];

View file

@ -1763,9 +1763,9 @@ add_resource_bind(struct zink_context *ctx, struct zink_resource *res, unsigned
ctx->base.resource_copy_region(&ctx->base, &res->base.b, i, 0, 0, 0, &staging.base.b, i, &box);
}
if (old_obj->exportable) {
simple_mtx_lock(&ctx->batch.bs->exportable_lock);
_mesa_set_remove_key(&ctx->batch.bs->dmabuf_exports, &staging);
simple_mtx_unlock(&ctx->batch.bs->exportable_lock);
simple_mtx_lock(&ctx->bs->exportable_lock);
_mesa_set_remove_key(&ctx->bs->dmabuf_exports, &staging);
simple_mtx_unlock(&ctx->bs->exportable_lock);
}
zink_resource_object_reference(screen, &old_obj, NULL);
return true;

View file

@ -1837,14 +1837,14 @@ zink_flush_frontbuffer(struct pipe_screen *pscreen,
zink_kopper_acquire(ctx, res, UINT64_MAX);
ctx->needs_present = res;
/* set batch usage to submit acquire semaphore */
zink_batch_resource_usage_set(ctx->batch.bs, res, true, false);
zink_batch_resource_usage_set(ctx->bs, res, true, false);
/* ensure the resource is set up to present garbage */
ctx->base.flush_resource(&ctx->base, pres);
}
/* handle any outstanding acquire submits (not just from above) */
if (ctx->swapchain || ctx->needs_present) {
ctx->batch.bs->has_work = true;
ctx->bs->has_work = true;
pctx->flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
if (ctx->last_batch_state && screen->threaded_submit) {
struct zink_batch_state *bs = ctx->last_batch_state;

View file

@ -252,10 +252,10 @@ unordered_res_exec(const struct zink_context *ctx, const struct zink_resource *r
if (res->obj->unordered_read && res->obj->unordered_write)
return true;
/* if testing write access but have any ordered read access, cannot promote */
if (is_write && zink_batch_usage_matches(res->obj->bo->reads.u, ctx->batch.bs) && !res->obj->unordered_read)
if (is_write && zink_batch_usage_matches(res->obj->bo->reads.u, ctx->bs) && !res->obj->unordered_read)
return false;
/* if write access is unordered or nonexistent, always promote */
return res->obj->unordered_write || !zink_batch_usage_matches(res->obj->bo->writes.u, ctx->batch.bs);
return res->obj->unordered_write || !zink_batch_usage_matches(res->obj->bo->writes.u, ctx->bs);
}
static ALWAYS_INLINE bool
@ -289,11 +289,11 @@ zink_get_cmdbuf(struct zink_context *ctx, struct zink_resource *src, struct zink
zink_batch_no_rp(ctx);
if (unordered_exec) {
ctx->batch.bs->has_barriers = true;
ctx->batch.bs->has_work = true;
return ctx->batch.bs->reordered_cmdbuf;
ctx->bs->has_barriers = true;
ctx->bs->has_work = true;
return ctx->bs->reordered_cmdbuf;
}
return ctx->batch.bs->cmdbuf;
return ctx->bs->cmdbuf;
}
static void
@ -468,8 +468,8 @@ struct update_unordered_access_and_get_cmdbuf<true> {
assert(!usage_matches);
res->obj->unordered_write = true;
res->obj->unordered_read = true;
ctx->batch.bs->has_unsync = true;
return ctx->batch.bs->unsynchronized_cmdbuf;
ctx->bs->has_unsync = true;
return ctx->bs->unsynchronized_cmdbuf;
}
};
@ -483,12 +483,12 @@ struct update_unordered_access_and_get_cmdbuf<false> {
if (is_write || zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, ZINK_RESOURCE_ACCESS_RW))
res->obj->unordered_read = true;
}
if (zink_resource_usage_matches(res, ctx->batch.bs) && !ctx->unordered_blitting &&
if (zink_resource_usage_matches(res, ctx->bs) && !ctx->unordered_blitting &&
/* if current batch usage exists with ordered non-transfer access, never promote
* this avoids layout dsync
*/
(!res->obj->unordered_read || !res->obj->unordered_write)) {
cmdbuf = ctx->batch.bs->cmdbuf;
cmdbuf = ctx->bs->cmdbuf;
res->obj->unordered_write = false;
res->obj->unordered_read = false;
/* it's impossible to detect this from the caller
@ -498,7 +498,7 @@ struct update_unordered_access_and_get_cmdbuf<false> {
} else {
cmdbuf = is_write ? zink_get_cmdbuf(ctx, NULL, res) : zink_get_cmdbuf(ctx, res, NULL);
/* force subsequent barriers to be ordered to avoid layout desync */
if (cmdbuf != ctx->batch.bs->reordered_cmdbuf) {
if (cmdbuf != ctx->bs->reordered_cmdbuf) {
res->obj->unordered_write = false;
res->obj->unordered_read = false;
}
@ -524,7 +524,7 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
return;
enum zink_resource_access rw = is_write ? ZINK_RESOURCE_ACCESS_RW : ZINK_RESOURCE_ACCESS_WRITE;
bool completed = zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, rw);
bool usage_matches = !completed && zink_resource_usage_matches(res, ctx->batch.bs);
bool usage_matches = !completed && zink_resource_usage_matches(res, ctx->bs);
VkCommandBuffer cmdbuf = update_unordered_access_and_get_cmdbuf<UNSYNCHRONIZED>::apply(ctx, res, usage_matches, is_write);
assert(new_layout);
@ -547,7 +547,7 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
zink_resource_copies_reset(res);
if (res->obj->exportable)
simple_mtx_lock(&ctx->batch.bs->exportable_lock);
simple_mtx_lock(&ctx->bs->exportable_lock);
if (res->obj->dt) {
struct kopper_displaytarget *cdt = res->obj->dt;
if (cdt->swapchain->num_acquires && res->obj->dt_idx != UINT32_MAX) {
@ -556,7 +556,7 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
} else if (res->obj->exportable) {
struct pipe_resource *pres = NULL;
bool found = false;
_mesa_set_search_or_add(&ctx->batch.bs->dmabuf_exports, res, &found);
_mesa_set_search_or_add(&ctx->bs->dmabuf_exports, res, &found);
if (!found) {
pipe_resource_reference(&pres, &res->base.b);
}
@ -565,11 +565,11 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
for (struct zink_resource *r = res; r; r = zink_resource(r->base.b.next)) {
VkSemaphore sem = zink_screen_export_dmabuf_semaphore(zink_screen(ctx->base.screen), r);
if (sem)
util_dynarray_append(&ctx->batch.bs->fd_wait_semaphores, VkSemaphore, sem);
util_dynarray_append(&ctx->bs->fd_wait_semaphores, VkSemaphore, sem);
}
}
if (res->obj->exportable)
simple_mtx_unlock(&ctx->batch.bs->exportable_lock);
simple_mtx_unlock(&ctx->bs->exportable_lock);
}
bool
@ -629,9 +629,9 @@ zink_resource_buffer_transfer_dst_barrier(struct zink_context *ctx, struct zink_
res->obj->last_write = VK_ACCESS_TRANSFER_WRITE_BIT;
res->obj->unordered_access_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
ctx->batch.bs->unordered_write_access |= VK_ACCESS_TRANSFER_WRITE_BIT;
ctx->batch.bs->unordered_write_stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
if (!zink_resource_usage_matches(res, ctx->batch.bs)) {
ctx->bs->unordered_write_access |= VK_ACCESS_TRANSFER_WRITE_BIT;
ctx->bs->unordered_write_stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
if (!zink_resource_usage_matches(res, ctx->bs)) {
res->obj->access = VK_ACCESS_TRANSFER_WRITE_BIT;
res->obj->access_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
res->obj->ordered_access_is_copied = true;
@ -699,7 +699,7 @@ zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res
bool is_write = zink_resource_access_is_write(flags);
enum zink_resource_access rw = is_write ? ZINK_RESOURCE_ACCESS_RW : ZINK_RESOURCE_ACCESS_WRITE;
bool completed = zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, rw);
bool usage_matches = !completed && zink_resource_usage_matches(res, ctx->batch.bs);
bool usage_matches = !completed && zink_resource_usage_matches(res, ctx->bs);
if (!usage_matches) {
res->obj->unordered_write = true;
if (is_write || zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, ZINK_RESOURCE_ACCESS_RW))
@ -766,8 +766,8 @@ zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res
res->obj->unordered_access = flags;
res->obj->unordered_access_stage = pipeline;
if (is_write) {
ctx->batch.bs->unordered_write_access |= flags;
ctx->batch.bs->unordered_write_stages |= pipeline;
ctx->bs->unordered_write_access |= flags;
ctx->bs->unordered_write_stages |= pipeline;
}
}
if (!unordered || !usage_matches || res->obj->ordered_access_is_copied) {

View file

@ -675,11 +675,6 @@ zink_batch_state(struct zink_fence *fence)
return (struct zink_batch_state *)fence;
}
struct zink_batch {
struct zink_batch_state *bs;
};
/** bo types */
struct bo_export {
/** File descriptor associated with a handle export. */
@ -1813,7 +1808,7 @@ struct zink_context {
bool oom_stall;
bool track_renderpasses;
bool no_reorder;
struct zink_batch batch;
struct zink_batch_state *bs;
unsigned shader_has_inlinable_uniforms_mask;
unsigned inlinable_uniforms_valid_mask;