panvk: Remove unnecessary functions

panvk_get_image_layout_transition_handler returns the same zero struct
in both paths so it can simply be removed. This also means that
transition_image_layout_sync_scope and cmd_transition_image_layout can
be removed as they are always NOPs.
This commit is contained in:
Jakob Sinclair 2026-04-30 11:00:16 +02:00
parent e3beb262bd
commit ba35486229
7 changed files with 41 additions and 232 deletions

View file

@ -164,7 +164,6 @@ struct panvk_cs_deps {
enum mali_cs_condition cond;
struct cs_index cond_value;
} dst[PANVK_SUBQUEUE_COUNT];
bool needs_layout_transitions;
};
enum panvk_sb_ids {
@ -723,14 +722,8 @@ cs_next_iter_sb(struct panvk_cmd_buffer *cmdbuf,
}
}
enum panvk_barrier_stage {
PANVK_BARRIER_STAGE_FIRST,
PANVK_BARRIER_STAGE_AFTER_LAYOUT_TRANSITION,
};
void panvk_per_arch(add_cs_deps)(
struct panvk_cmd_buffer *cmdbuf,
enum panvk_barrier_stage barrier_stage,
const VkDependencyInfo *in,
struct panvk_cs_deps *out,
bool is_set_event);

View file

@ -440,66 +440,40 @@ collect_cs_deps(struct panvk_cmd_buffer *cmdbuf, const VkDependencyInfo *info,
static void
normalize_dependency(struct panvk_sync_scope *src,
struct panvk_sync_scope *dst,
struct panvk_sync_scope transition,
uint32_t src_qfi, uint32_t dst_qfi,
enum panvk_barrier_stage barrier_stage)
uint32_t src_qfi, uint32_t dst_qfi)
{
switch (barrier_stage) {
case PANVK_BARRIER_STAGE_FIRST:
if (transition.stages) {
/* We need to do layout transition, so we want to sync src with layout
* transition, and then later layout transition with dst.
*/
*dst = transition;
}
break;
case PANVK_BARRIER_STAGE_AFTER_LAYOUT_TRANSITION:
/* If transition.stages is empty, there was no layout transition and so we
* won't be waiting for anything.
*/
*src = transition;
break;
}
/* Perform queue family ownership transfer if src and dst are unequal. */
if (src_qfi != dst_qfi) {
/* Only normalize if we're actually syncing acquire, and not layout
* transition, with dst.
*/
if (barrier_stage == PANVK_BARRIER_STAGE_FIRST) {
/* queue family acquire operation */
switch (src_qfi) {
case VK_QUEUE_FAMILY_EXTERNAL:
/* no execution dependency and no availability operation */
*src = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_NONE, VK_ACCESS_2_NONE};
break;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
/* treat the foreign queue as the host */
*src = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_HOST_BIT, VK_ACCESS_2_HOST_WRITE_BIT};
break;
default:
break;
}
/* queue family acquire operation */
switch (src_qfi) {
case VK_QUEUE_FAMILY_EXTERNAL:
/* no execution dependency and no availability operation */
*src = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_NONE,
VK_ACCESS_2_NONE};
break;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
/* treat the foreign queue as the host */
*src = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_HOST_BIT,
VK_ACCESS_2_HOST_WRITE_BIT};
break;
default:
break;
}
/* Only normalize if we're actually syncing the latest of either src or
* layout transition, with release.
*/
if ((barrier_stage == PANVK_BARRIER_STAGE_FIRST && !transition.stages) ||
(barrier_stage == PANVK_BARRIER_STAGE_AFTER_LAYOUT_TRANSITION && transition.stages)) {
/* queue family release operation */
switch (dst_qfi) {
case VK_QUEUE_FAMILY_EXTERNAL:
/* no execution dependency and no visibility operation */
*dst = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_NONE, VK_ACCESS_2_NONE};
break;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
/* treat the foreign queue as the host */
*dst = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_HOST_BIT, VK_ACCESS_2_HOST_WRITE_BIT};
break;
default:
break;
}
/* queue family release operation */
switch (dst_qfi) {
case VK_QUEUE_FAMILY_EXTERNAL:
/* no execution dependency and no visibility operation */
*dst = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_NONE,
VK_ACCESS_2_NONE};
break;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
/* treat the foreign queue as the host */
*dst = (struct panvk_sync_scope){VK_PIPELINE_STAGE_2_HOST_BIT,
VK_ACCESS_2_HOST_WRITE_BIT};
break;
default:
break;
}
}
@ -511,7 +485,6 @@ normalize_dependency(struct panvk_sync_scope *src,
void
panvk_per_arch(add_cs_deps)(struct panvk_cmd_buffer *cmdbuf,
enum panvk_barrier_stage barrier_stage,
const VkDependencyInfo *in,
struct panvk_cs_deps *out,
bool is_set_event)
@ -535,10 +508,8 @@ panvk_per_arch(add_cs_deps)(struct panvk_cmd_buffer *cmdbuf,
if (is_asymmetric_event)
dst.stages = src.stages;
normalize_dependency(&src, &dst, (struct panvk_sync_scope){0},
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
barrier_stage);
normalize_dependency(&src, &dst, VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED);
collect_cs_deps(cmdbuf, in, src, dst, out);
}
@ -547,10 +518,8 @@ panvk_per_arch(add_cs_deps)(struct panvk_cmd_buffer *cmdbuf,
const VkBufferMemoryBarrier2 *barrier = &in->pBufferMemoryBarriers[i];
struct panvk_sync_scope src = {barrier->srcStageMask, barrier->srcAccessMask};
struct panvk_sync_scope dst = {barrier->dstStageMask, barrier->dstAccessMask};
normalize_dependency(&src, &dst, (struct panvk_sync_scope){0},
barrier->srcQueueFamilyIndex,
barrier->dstQueueFamilyIndex,
barrier_stage);
normalize_dependency(&src, &dst, barrier->srcQueueFamilyIndex,
barrier->dstQueueFamilyIndex);
collect_cs_deps(cmdbuf, in, src, dst, out);
}
@ -559,18 +528,10 @@ panvk_per_arch(add_cs_deps)(struct panvk_cmd_buffer *cmdbuf,
const VkImageMemoryBarrier2 *barrier = &in->pImageMemoryBarriers[i];
struct panvk_sync_scope src = {barrier->srcStageMask, barrier->srcAccessMask};
struct panvk_sync_scope dst = {barrier->dstStageMask, barrier->dstAccessMask};
struct panvk_sync_scope transition;
panvk_per_arch(transition_image_layout_sync_scope)(barrier,
&transition.stages, &transition.access);
normalize_dependency(&src, &dst, transition,
barrier->srcQueueFamilyIndex,
barrier->dstQueueFamilyIndex,
barrier_stage);
normalize_dependency(&src, &dst, barrier->srcQueueFamilyIndex,
barrier->dstQueueFamilyIndex);
collect_cs_deps(cmdbuf, in, src, dst, out);
if (barrier_stage == PANVK_BARRIER_STAGE_FIRST && transition.stages)
out->needs_layout_transitions = true;
}
}
@ -697,30 +658,12 @@ panvk_per_arch(CmdPipelineBarrier2)(VkCommandBuffer commandBuffer,
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, commandBuffer);
struct panvk_cs_deps deps = {0};
panvk_per_arch(add_cs_deps)(cmdbuf, PANVK_BARRIER_STAGE_FIRST, pDependencyInfo, &deps, false);
panvk_per_arch(add_cs_deps)(cmdbuf, pDependencyInfo, &deps, false);
if (deps.needs_fb_barrier)
panvk_per_arch(cmd_fb_barrier)(cmdbuf);
panvk_per_arch(emit_barrier)(cmdbuf, deps);
if (deps.needs_layout_transitions) {
for (uint32_t i = 0; i < pDependencyInfo->imageMemoryBarrierCount; i++) {
const VkImageMemoryBarrier2 *barrier = &pDependencyInfo->pImageMemoryBarriers[i];
panvk_per_arch(cmd_transition_image_layout)(commandBuffer, barrier);
}
struct panvk_cs_deps trans_deps = {0};
panvk_per_arch(add_cs_deps)(
cmdbuf, PANVK_BARRIER_STAGE_AFTER_LAYOUT_TRANSITION,
pDependencyInfo, &trans_deps, false);
assert(!trans_deps.needs_fb_barrier);
panvk_per_arch(emit_barrier)(cmdbuf, trans_deps);
}
}
static struct cs_buffer

View file

@ -7,7 +7,6 @@
#include "panvk_entrypoints.h"
#include "panvk_event.h"
#include "panvk_instr.h"
#include "panvk_meta.h"
#include "util/bitscan.h"
@ -29,7 +28,7 @@ panvk_per_arch(CmdResetEvent2)(VkCommandBuffer commandBuffer, VkEvent _event,
};
struct panvk_cs_deps deps = {0};
panvk_per_arch(add_cs_deps)(cmdbuf, PANVK_BARRIER_STAGE_FIRST, &info, &deps, false);
panvk_per_arch(add_cs_deps)(cmdbuf, &info, &deps, false);
for (uint32_t i = 0; i < PANVK_SUBQUEUE_COUNT; i++) {
struct cs_builder *b = panvk_get_cs_builder(cmdbuf, i);
@ -67,7 +66,7 @@ panvk_per_arch(CmdSetEvent2)(VkCommandBuffer commandBuffer, VkEvent _event,
VK_FROM_HANDLE(panvk_event, event, _event);
struct panvk_cs_deps deps = {0};
panvk_per_arch(add_cs_deps)(cmdbuf, PANVK_BARRIER_STAGE_FIRST, pDependencyInfo, &deps, true);
panvk_per_arch(add_cs_deps)(cmdbuf, pDependencyInfo, &deps, true);
/* vkCmdSetEvents() is not allowed to be called mid-render-pass */
assert(!deps.needs_fb_barrier);
@ -108,12 +107,11 @@ panvk_per_arch(CmdSetEvent2)(VkCommandBuffer commandBuffer, VkEvent _event,
static void
cmd_wait_event(struct panvk_cmd_buffer *cmdbuf, struct panvk_event *event,
const VkDependencyInfo *info, struct panvk_cs_deps *trans_deps,
bool *needs_trans_barrier)
const VkDependencyInfo *info)
{
struct panvk_cs_deps deps = {0};
panvk_per_arch(add_cs_deps)(cmdbuf, PANVK_BARRIER_STAGE_FIRST, info, &deps, false);
panvk_per_arch(add_cs_deps)(cmdbuf, info, &deps, false);
for (uint32_t i = 0; i < PANVK_SUBQUEUE_COUNT; i++) {
struct cs_builder *b = panvk_get_cs_builder(cmdbuf, i);
@ -131,20 +129,6 @@ cmd_wait_event(struct panvk_cmd_buffer *cmdbuf, struct panvk_event *event,
seqno, sync_addr);
}
}
if (deps.needs_layout_transitions) {
for (uint32_t i = 0; i < info->imageMemoryBarrierCount; i++) {
const VkImageMemoryBarrier2 *barrier = &info->pImageMemoryBarriers[i];
panvk_per_arch(cmd_transition_image_layout)(
panvk_cmd_buffer_to_handle(cmdbuf), barrier);
}
panvk_per_arch(add_cs_deps)(
cmdbuf, PANVK_BARRIER_STAGE_AFTER_LAYOUT_TRANSITION,
info, trans_deps, false);
*needs_trans_barrier = true;
}
}
VKAPI_ATTR void VKAPI_CALL
@ -154,19 +138,10 @@ panvk_per_arch(CmdWaitEvents2)(VkCommandBuffer commandBuffer,
{
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, commandBuffer);
struct panvk_cs_deps trans_deps = {0};
bool needs_trans_barrier = false;
for (uint32_t i = 0; i < eventCount; i++) {
VK_FROM_HANDLE(panvk_event, event, pEvents[i]);
const VkDependencyInfo *info = &pDependencyInfos[i];
cmd_wait_event(cmdbuf, event, info, &trans_deps, &needs_trans_barrier);
}
if (needs_trans_barrier) {
assert(!trans_deps.needs_fb_barrier);
panvk_per_arch(emit_barrier)(cmdbuf, trans_deps);
cmd_wait_event(cmdbuf, event, info);
}
}

View file

@ -349,17 +349,6 @@ panvk_per_arch(CmdPipelineBarrier2)(VkCommandBuffer commandBuffer,
panvk_per_arch(cmd_close_batch)(cmdbuf);
panvk_per_arch(cmd_open_batch)(cmdbuf);
}
for (uint32_t i = 0; i < pDependencyInfo->imageMemoryBarrierCount; i++) {
const VkImageMemoryBarrier2 *barrier = &pDependencyInfo->pImageMemoryBarriers[i];
panvk_per_arch(cmd_transition_image_layout)(commandBuffer, barrier);
}
/* If we had any layout transition dispatches, the batch will be closed at
* this point, therefore establishing the sync between itself and the
* commands that follow.
*/
}
static void

View file

@ -106,19 +106,7 @@ panvk_per_arch(CmdWaitEvents2)(VkCommandBuffer commandBuffer,
for (uint32_t i = 0; i < eventCount; i++) {
VK_FROM_HANDLE(panvk_event, event, pEvents[i]);
const VkDependencyInfo *info = &pDependencyInfos[i];
panvk_add_wait_event_operation(cmdbuf, event);
for (uint32_t i = 0; i < info->imageMemoryBarrierCount; i++) {
const VkImageMemoryBarrier2 *barrier = &info->pImageMemoryBarriers[i];
panvk_per_arch(cmd_transition_image_layout)(commandBuffer, barrier);
}
/* We don't need to do anything here to establish the sync between layout
* transition dispatches and the commands following the barrier. See the
* comment in ./panvk_vX_cmd_buffer.c:CmdPipelineBarrier2 for details.
*/
}
}

View file

@ -207,12 +207,4 @@ VkResult panvk_per_arch(meta_get_copy_desc_job)(
const struct panvk_shader_desc_state *shader_desc_state,
uint32_t attrib_buf_idx_offset, struct pan_ptr *job_desc);
#endif
void panvk_per_arch(transition_image_layout_sync_scope)(
const VkImageMemoryBarrier2 *barrier,
VkPipelineStageFlags2 *out_stages, VkAccessFlags2 *out_access);
void panvk_per_arch(cmd_transition_image_layout)(
VkCommandBuffer _cmdbuf,
const VkImageMemoryBarrier2 *barrier);
#endif

View file

@ -641,77 +641,6 @@ panvk_per_arch(CmdCopyImage2)(VkCommandBuffer commandBuffer,
}
}
static bool
panvk_image_has_afbc(struct panvk_image *img, VkImageSubresourceRange range)
{
VkImageAspectFlags aspect_mask =
vk_image_expand_aspect_mask(&img->vk, range.aspectMask);
u_foreach_bit(aspect, aspect_mask) {
unsigned plane_index = panvk_plane_index(img, 1u << aspect);
struct panvk_image_plane *plane = &img->planes[plane_index];
if (drm_is_afbc(plane->image.props.modifier))
return true;
}
return false;
}
static bool
panvk_acquire_unmodified(const VkImageMemoryBarrier2 *barrier)
{
if (barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_EXTERNAL &&
barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_FOREIGN_EXT)
return false;
const VkExternalMemoryAcquireUnmodifiedEXT *acquire_unmodified =
vk_find_struct_const(barrier->pNext,
EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXT);
return acquire_unmodified &&
acquire_unmodified->acquireUnmodifiedMemory == VK_TRUE;
}
/* TODO: pass less data than what's in a VkImageMemoryBarrier2 */
struct panvk_image_layout_transition_handler {
void (*cmd)(VkCommandBuffer cmdbuf, const VkImageMemoryBarrier2 *barrier);
VkPipelineStageFlags2 stages;
VkAccessFlags2 access;
};
static struct panvk_image_layout_transition_handler
panvk_get_image_layout_transition_handler(const VkImageMemoryBarrier2 *barrier)
{
if (barrier->oldLayout == barrier->newLayout ||
panvk_acquire_unmodified(barrier))
return (struct panvk_image_layout_transition_handler){0};
return (struct panvk_image_layout_transition_handler){0};
}
void
panvk_per_arch(transition_image_layout_sync_scope)(
const VkImageMemoryBarrier2 *barrier,
VkPipelineStageFlags2 *out_stages, VkAccessFlags2 *out_access)
{
struct panvk_image_layout_transition_handler handler =
panvk_get_image_layout_transition_handler(barrier);
*out_stages = handler.stages;
*out_access = handler.access;
}
void
panvk_per_arch(cmd_transition_image_layout)(
VkCommandBuffer cmdbuf, const VkImageMemoryBarrier2 *barrier)
{
struct panvk_image_layout_transition_handler handler =
panvk_get_image_layout_transition_handler(barrier);
if (handler.cmd)
handler.cmd(cmdbuf, barrier);
}
void
panvk_per_arch(cmd_meta_resolve_attachments)(struct panvk_cmd_buffer *cmdbuf)
{