panvk: add a meta command for transitioning image layout

Currently the only thing this function ever does is clear AFBC
metadata when transitioning away from UNDEFINED.

Reviewed-by: Lars-Ivar Hesselberg Simonsen <lars-ivar.simonsen@arm.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36176>
This commit is contained in:
Caterina Shablia 2025-07-24 00:07:24 +00:00
parent 8e3fb838ac
commit c95f899305
4 changed files with 168 additions and 0 deletions

View file

@ -0,0 +1,21 @@
/*
* Copyright 2024 Collabora Ltd.
* SPDX-License-Identifier: MIT
*/
#include "compiler/libcl/libcl.h"
#include "compiler/libcl/libcl_vk.h"
#include "genxml/gen_macros.h"
#include "lib/pan_encoder.h"
#if PAN_ARCH >= 6
KERNEL(1)
panlib_clear_afbc_metadata(global uint8_t *p,
uint32_t layer_or_slice_stride)
{
uint32_t item = get_global_id(0);
uint32_t layer_or_slice = get_global_id(1);
uint4 *q = p + layer_or_slice * layer_or_slice_stride;
q[item] = 0;
}
#endif

View file

@ -3,6 +3,7 @@
libpan_shader_files = files(
'clear_afbc_metadata.cl',
'query_pool.cl',
'draw_helper.cl',
'indirect_dispatch.cl',

View file

@ -158,4 +158,13 @@ VkResult panvk_per_arch(meta_get_copy_desc_job)(
uint32_t attrib_buf_idx_offset, struct pan_ptr *job_desc);
#endif
#if defined(PAN_ARCH)
void panvk_per_arch(transition_image_layout_sync_scope)(
const VkImageMemoryBarrier2 *barrier,
VkPipelineStageFlags2 *out_stages, VkAccessFlags2 *out_access);
void panvk_per_arch(cmd_transition_image_layout)(
VkCommandBuffer _cmdbuf,
const VkImageMemoryBarrier2 *barrier);
#endif
#endif

View file

@ -11,6 +11,10 @@
#include "csf/panvk_instr.h"
#endif
#include "panvk_cmd_precomp.h"
#include "libpan.h"
#include "libpan_dgc.h"
static bool
copy_to_image_use_gfx_pipeline(struct panvk_device *dev,
struct panvk_image *dst_img)
@ -573,3 +577,136 @@ panvk_per_arch(CmdCopyImage2)(VkCommandBuffer commandBuffer,
panvk_per_arch(cmd_meta_compute_end)(cmdbuf, &save);
}
}
static bool
panvk_image_has_afbc(struct panvk_image *img, VkImageSubresourceRange range)
{
VkImageAspectFlags aspect_mask =
vk_image_expand_aspect_mask(&img->vk, range.aspectMask);
u_foreach_bit(aspect, aspect_mask) {
unsigned plane_index = panvk_plane_index(img->vk.format, aspect);
struct panvk_image_plane *plane = &img->planes[plane_index];
if (drm_is_afbc(plane->image.props.modifier))
return true;
}
return false;
}
static void
cmd_clear_afbc_metadata(VkCommandBuffer _cmdbuf,
const VkImageMemoryBarrier2 *barrier)
{
VK_FROM_HANDLE(panvk_cmd_buffer, cmdbuf, _cmdbuf);
struct panvk_precomp_ctx precomp_ctx = panvk_per_arch(precomp_cs)(cmdbuf);
struct panvk_device *dev = to_panvk_device(cmdbuf->vk.base.device);
VK_FROM_HANDLE(panvk_image, img, barrier->image);
VkImageSubresourceRange range = barrier->subresourceRange;
VkImageAspectFlags aspect_mask =
vk_image_expand_aspect_mask(&img->vk, range.aspectMask);
uint32_t level_count = vk_image_subresource_level_count(&img->vk, &range);
struct panvk_cmd_meta_compute_save_ctx save = {0};
panvk_per_arch(cmd_meta_compute_start)(cmdbuf, &save);
u_foreach_bit(aspect, aspect_mask) {
unsigned plane_index = panvk_plane_index(img->vk.format, aspect);
struct panvk_image_plane *plane = &img->planes[plane_index];
if (!drm_is_afbc(plane->image.props.modifier))
continue;
for (uint32_t level = range.baseMipLevel;
level < range.baseMipLevel + level_count;
level++) {
const struct pan_image_slice_layout *slayout =
&plane->plane.layout.slices[level];
uint32_t layers_or_slices;
if (img->vk.image_type == VK_IMAGE_TYPE_2D) {
layers_or_slices =
vk_image_subresource_layer_count(&img->vk, &range);
} else if (img->vk.image_type == VK_IMAGE_TYPE_3D) {
layers_or_slices =
vk_image_subresource_slice_count(&dev->vk,
&img->vk,
&(VkImageSubresourceLayers) {
.mipLevel = level,
.baseArrayLayer = range.baseArrayLayer,
.layerCount = range.layerCount,
});
} else {
UNREACHABLE("Unsupported image type");
}
uint32_t layer_or_slice_stride = slayout->afbc.surface_stride_B;
uint32_t ptr = plane->plane.base + slayout->offset_B +
range.baseArrayLayer * layer_or_slice_stride;
struct panlib_clear_afbc_metadata_args args = {
.p = ptr,
.layer_or_slice_stride = layer_or_slice_stride,
};
panlib_clear_afbc_metadata_struct(&precomp_ctx,
panlib_3d(
slayout->afbc.header.surface_size_B / 16,
layers_or_slices, 1),
PANLIB_BARRIER_NONE, args);
}
}
panvk_per_arch(cmd_meta_compute_end)(cmdbuf, &save);
}
/* TODO: pass less data than what's in a VkImageMemoryBarrier2 */
struct panvk_image_layout_transition_handler {
void (*cmd)(VkCommandBuffer cmdbuf, const VkImageMemoryBarrier2 *barrier);
VkPipelineStageFlags2 stages;
VkAccessFlags2 access;
};
static struct panvk_image_layout_transition_handler
panvk_get_image_layout_transition_handler(const VkImageMemoryBarrier2 *barrier)
{
VK_FROM_HANDLE(panvk_image, img, barrier->image);
if (barrier->oldLayout == barrier->newLayout)
return (struct panvk_image_layout_transition_handler){0};
if (barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
panvk_image_has_afbc(img, barrier->subresourceRange)) {
return (struct panvk_image_layout_transition_handler){
.cmd = cmd_clear_afbc_metadata,
.stages = VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT,
.access = VK_ACCESS_2_MEMORY_WRITE_BIT,
};
}
return (struct panvk_image_layout_transition_handler){0};
}
void
panvk_per_arch(transition_image_layout_sync_scope)(
const VkImageMemoryBarrier2 *barrier,
VkPipelineStageFlags2 *out_stages, VkAccessFlags2 *out_access)
{
struct panvk_image_layout_transition_handler handler =
panvk_get_image_layout_transition_handler(barrier);
*out_stages = handler.stages;
*out_access = handler.access;
}
void
panvk_per_arch(cmd_transition_image_layout)(
VkCommandBuffer cmdbuf, const VkImageMemoryBarrier2 *barrier)
{
struct panvk_image_layout_transition_handler handler =
panvk_get_image_layout_transition_handler(barrier);
if (handler.cmd)
handler.cmd(cmdbuf, barrier);
}