anv: enable multiple planes per image/imageView

This change introduce the concept of planes for image & views. It
matches the planes available in new formats.

We also refactor depth & stencil support through the usage of planes
for the sake of uniformity. In the backend (genX_cmd_buffer.c) we have
to take some care though with regard to auxilliary surfaces.
Multiplanar color buffers can have multiple auxilliary surfaces but
depth & stencil share the same HiZ one (only store in the depth
plane).

v2: by Jason
    Remove unused aspect parameters from anv_blorp.c
    Assert when attempting to resolve YUV images
    Drop redundant logic for plane offset in make_surface()
    Rework anv_foreach_plane_aspect_bit()

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
Lionel Landwerlin 2017-07-19 12:14:19 +01:00
parent 185e719090
commit a62a979335
9 changed files with 909 additions and 478 deletions

View file

@ -183,29 +183,30 @@ get_blorp_surf_for_anv_image(const struct anv_image *image,
enum isl_aux_usage aux_usage, enum isl_aux_usage aux_usage,
struct blorp_surf *blorp_surf) struct blorp_surf *blorp_surf)
{ {
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
if (aux_usage == ANV_AUX_USAGE_DEFAULT) if (aux_usage == ANV_AUX_USAGE_DEFAULT)
aux_usage = image->aux_usage; aux_usage = image->planes[plane].aux_usage;
if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT || if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
aux_usage == ISL_AUX_USAGE_HIZ) aux_usage == ISL_AUX_USAGE_HIZ)
aux_usage = ISL_AUX_USAGE_NONE; aux_usage = ISL_AUX_USAGE_NONE;
const struct anv_surface *surface = const struct anv_surface *surface = &image->planes[plane].surface;
anv_image_get_surface_for_aspect_mask(image, aspect);
*blorp_surf = (struct blorp_surf) { *blorp_surf = (struct blorp_surf) {
.surf = &surface->isl, .surf = &surface->isl,
.addr = { .addr = {
.buffer = image->bo, .buffer = image->planes[plane].bo,
.offset = image->offset + surface->offset, .offset = image->planes[plane].bo_offset + surface->offset,
}, },
}; };
if (aux_usage != ISL_AUX_USAGE_NONE) { if (aux_usage != ISL_AUX_USAGE_NONE) {
blorp_surf->aux_surf = &image->aux_surface.isl, const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
blorp_surf->aux_surf = &aux_surface->isl,
blorp_surf->aux_addr = (struct blorp_address) { blorp_surf->aux_addr = (struct blorp_address) {
.buffer = image->bo, .buffer = image->planes[plane].bo,
.offset = image->offset + image->aux_surface.offset, .offset = image->planes[plane].bo_offset + aux_surface->offset,
}; };
blorp_surf->aux_usage = aux_usage; blorp_surf->aux_usage = aux_usage;
} }
@ -254,17 +255,35 @@ void anv_CmdCopyImage(
anv_get_layerCount(src_image, &pRegions[r].srcSubresource)); anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
} }
assert(pRegions[r].srcSubresource.aspectMask == VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
pRegions[r].dstSubresource.aspectMask); dst_mask = pRegions[r].dstSubresource.aspectMask;
uint32_t a; assert(anv_image_aspects_compatible(src_mask, dst_mask));
for_each_bit(a, pRegions[r].dstSubresource.aspectMask) {
VkImageAspectFlagBits aspect = (1 << a);
if (_mesa_bitcount(src_mask) > 1) {
uint32_t aspect_bit;
anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
struct blorp_surf src_surf, dst_surf;
get_blorp_surf_for_anv_image(src_image, 1UL << aspect_bit,
ANV_AUX_USAGE_DEFAULT, &src_surf);
get_blorp_surf_for_anv_image(dst_image, 1UL << aspect_bit,
ANV_AUX_USAGE_DEFAULT, &dst_surf);
for (unsigned i = 0; i < layer_count; i++) {
blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
src_base_layer + i,
&dst_surf, pRegions[r].dstSubresource.mipLevel,
dst_base_layer + i,
srcOffset.x, srcOffset.y,
dstOffset.x, dstOffset.y,
extent.width, extent.height);
}
}
} else {
struct blorp_surf src_surf, dst_surf; struct blorp_surf src_surf, dst_surf;
get_blorp_surf_for_anv_image(src_image, aspect, get_blorp_surf_for_anv_image(src_image, src_mask,
ANV_AUX_USAGE_DEFAULT, &src_surf); ANV_AUX_USAGE_DEFAULT, &src_surf);
get_blorp_surf_for_anv_image(dst_image, aspect, get_blorp_surf_for_anv_image(dst_image, dst_mask,
ANV_AUX_USAGE_DEFAULT, &dst_surf); ANV_AUX_USAGE_DEFAULT, &dst_surf);
for (unsigned i = 0; i < layer_count; i++) { for (unsigned i = 0; i < layer_count; i++) {
@ -753,15 +772,16 @@ void anv_CmdClearColorImage(
struct blorp_batch batch; struct blorp_batch batch;
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0); blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
struct blorp_surf surf;
get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
ANV_AUX_USAGE_DEFAULT, &surf);
for (unsigned r = 0; r < rangeCount; r++) { for (unsigned r = 0; r < rangeCount; r++) {
if (pRanges[r].aspectMask == 0) if (pRanges[r].aspectMask == 0)
continue; continue;
assert(pRanges[r].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT); assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
struct blorp_surf surf;
get_blorp_surf_for_anv_image(image, pRanges[r].aspectMask,
ANV_AUX_USAGE_DEFAULT, &surf);
struct anv_format_plane src_format = struct anv_format_plane src_format =
anv_get_format_plane(&cmd_buffer->device->info, image->vk_format, anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
@ -1048,7 +1068,8 @@ void anv_CmdClearAttachments(
BLORP_BATCH_NO_EMIT_DEPTH_STENCIL); BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
for (uint32_t a = 0; a < attachmentCount; ++a) { for (uint32_t a = 0; a < attachmentCount; ++a) {
if (pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
clear_color_attachment(cmd_buffer, &batch, clear_color_attachment(cmd_buffer, &batch,
&pAttachments[a], &pAttachments[a],
rectCount, pRects); rectCount, pRects);
@ -1159,9 +1180,10 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->state.pending_pipe_bits |= cmd_buffer->state.pending_pipe_bits |=
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT; ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
blorp_fast_clear(&batch, &surf, iview->isl.format, assert(image->n_planes == 1);
iview->isl.base_level, blorp_fast_clear(&batch, &surf, iview->planes[0].isl.format,
iview->isl.base_array_layer, fb->layers, iview->planes[0].isl.base_level,
iview->planes[0].isl.base_array_layer, fb->layers,
render_area.offset.x, render_area.offset.y, render_area.offset.x, render_area.offset.y,
render_area.offset.x + render_area.extent.width, render_area.offset.x + render_area.extent.width,
render_area.offset.y + render_area.extent.height); render_area.offset.y + render_area.extent.height);
@ -1169,10 +1191,11 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->state.pending_pipe_bits |= cmd_buffer->state.pending_pipe_bits |=
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT; ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
} else { } else {
blorp_clear(&batch, &surf, iview->isl.format, assert(image->n_planes == 1);
anv_swizzle_for_render(iview->isl.swizzle), blorp_clear(&batch, &surf, iview->planes[0].isl.format,
iview->isl.base_level, anv_swizzle_for_render(iview->planes[0].isl.swizzle),
iview->isl.base_array_layer, fb->layers, iview->planes[0].isl.base_level,
iview->planes[0].isl.base_array_layer, fb->layers,
render_area.offset.x, render_area.offset.y, render_area.offset.x, render_area.offset.y,
render_area.offset.x + render_area.extent.width, render_area.offset.x + render_area.extent.width,
render_area.offset.y + render_area.extent.height, render_area.offset.y + render_area.extent.height,
@ -1211,7 +1234,7 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
* a stencil clear in addition to using the BLORP-fallback for depth. * a stencil clear in addition to using the BLORP-fallback for depth.
*/ */
if (clear_depth) { if (clear_depth) {
if (!blorp_can_hiz_clear_depth(gen, iview->isl.format, if (!blorp_can_hiz_clear_depth(gen, iview->planes[0].isl.format,
iview->image->samples, iview->image->samples,
render_area.offset.x, render_area.offset.x,
render_area.offset.y, render_area.offset.y,
@ -1279,13 +1302,30 @@ anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
blorp_batch_finish(&batch); blorp_batch_finish(&batch);
} }
static void
resolve_surface(struct blorp_batch *batch,
struct blorp_surf *src_surf,
uint32_t src_level, uint32_t src_layer,
struct blorp_surf *dst_surf,
uint32_t dst_level, uint32_t dst_layer,
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
uint32_t width, uint32_t height)
{
blorp_blit(batch,
src_surf, src_level, src_layer,
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
dst_surf, dst_level, dst_layer,
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
src_x, src_y, src_x + width, src_y + height,
dst_x, dst_y, dst_x + width, dst_y + height,
0x2600 /* GL_NEAREST */, false, false);
}
static void static void
resolve_image(struct blorp_batch *batch, resolve_image(struct blorp_batch *batch,
const struct anv_image *src_image, const struct anv_image *src_image,
enum isl_aux_usage src_aux_usage,
uint32_t src_level, uint32_t src_layer, uint32_t src_level, uint32_t src_layer,
const struct anv_image *dst_image, const struct anv_image *dst_image,
enum isl_aux_usage dst_aux_usage,
uint32_t dst_level, uint32_t dst_layer, uint32_t dst_level, uint32_t dst_layer,
VkImageAspectFlags aspect_mask, VkImageAspectFlags aspect_mask,
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y, uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
@ -1295,25 +1335,24 @@ resolve_image(struct blorp_batch *batch,
assert(src_image->samples > 1); assert(src_image->samples > 1);
assert(dst_image->type == VK_IMAGE_TYPE_2D); assert(dst_image->type == VK_IMAGE_TYPE_2D);
assert(dst_image->samples == 1); assert(dst_image->samples == 1);
assert(src_image->n_planes == dst_image->n_planes);
uint32_t a; uint32_t aspect_bit;
for_each_bit(a, aspect_mask) {
VkImageAspectFlagBits aspect = 1 << a;
anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
struct blorp_surf src_surf, dst_surf; struct blorp_surf src_surf, dst_surf;
get_blorp_surf_for_anv_image(src_image, aspect, get_blorp_surf_for_anv_image(src_image, 1UL << aspect_bit,
src_aux_usage, &src_surf); ANV_AUX_USAGE_DEFAULT, &src_surf);
get_blorp_surf_for_anv_image(dst_image, aspect, get_blorp_surf_for_anv_image(dst_image, 1UL << aspect_bit,
dst_aux_usage, &dst_surf); ANV_AUX_USAGE_DEFAULT, &dst_surf);
blorp_blit(batch, assert(!src_image->format->can_ycbcr);
&src_surf, src_level, src_layer, assert(!dst_image->format->can_ycbcr);
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
&dst_surf, dst_level, dst_layer, resolve_surface(batch,
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY, &src_surf, src_level, src_layer,
src_x, src_y, src_x + width, src_y + height, &dst_surf, dst_level, dst_layer,
dst_x, dst_y, dst_x + width, dst_y + height, src_x, src_y, dst_x, dst_y, width, height);
0x2600 /* GL_NEAREST */, false, false);
} }
} }
@ -1342,12 +1381,17 @@ void anv_CmdResolveImage(
const uint32_t layer_count = const uint32_t layer_count =
anv_get_layerCount(dst_image, &pRegions[r].dstSubresource); anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
dst_mask = pRegions[r].dstSubresource.aspectMask;
assert(anv_image_aspects_compatible(src_mask, dst_mask));
for (uint32_t layer = 0; layer < layer_count; layer++) { for (uint32_t layer = 0; layer < layer_count; layer++) {
resolve_image(&batch, resolve_image(&batch,
src_image, ANV_AUX_USAGE_DEFAULT, src_image,
pRegions[r].srcSubresource.mipLevel, pRegions[r].srcSubresource.mipLevel,
pRegions[r].srcSubresource.baseArrayLayer + layer, pRegions[r].srcSubresource.baseArrayLayer + layer,
dst_image, ANV_AUX_USAGE_DEFAULT, dst_image,
pRegions[r].dstSubresource.mipLevel, pRegions[r].dstSubresource.mipLevel,
pRegions[r].dstSubresource.baseArrayLayer + layer, pRegions[r].dstSubresource.baseArrayLayer + layer,
pRegions[r].dstSubresource.aspectMask, pRegions[r].dstSubresource.aspectMask,
@ -1360,9 +1404,21 @@ void anv_CmdResolveImage(
blorp_batch_finish(&batch); blorp_batch_finish(&batch);
} }
static enum isl_aux_usage
fast_clear_aux_usage(const struct anv_image *image,
VkImageAspectFlagBits aspect)
{
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
return ISL_AUX_USAGE_CCS_D;
else
return image->planes[plane].aux_usage;
}
void void
anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer, anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
const uint32_t base_level, const uint32_t level_count, const uint32_t base_level, const uint32_t level_count,
const uint32_t base_layer, uint32_t layer_count) const uint32_t base_layer, uint32_t layer_count)
{ {
@ -1377,9 +1433,8 @@ anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0); blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
struct blorp_surf surf; struct blorp_surf surf;
get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT, get_blorp_surf_for_anv_image(image, aspect,
image->aux_usage == ISL_AUX_USAGE_NONE ? fast_clear_aux_usage(image, aspect),
ISL_AUX_USAGE_CCS_D : image->aux_usage,
&surf); &surf);
/* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear": /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
@ -1400,6 +1455,10 @@ anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
cmd_buffer->state.pending_pipe_bits |= cmd_buffer->state.pending_pipe_bits |=
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT; ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
uint32_t width_div = image->format->planes[plane].denominator_scales[0];
uint32_t height_div = image->format->planes[plane].denominator_scales[1];
for (uint32_t l = 0; l < level_count; l++) { for (uint32_t l = 0; l < level_count; l++) {
const uint32_t level = base_level + l; const uint32_t level = base_level + l;
@ -1412,11 +1471,13 @@ anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
if (image->type == VK_IMAGE_TYPE_3D) if (image->type == VK_IMAGE_TYPE_3D)
layer_count = extent.depth; layer_count = extent.depth;
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
assert(base_layer + layer_count <= anv_image_aux_layers(image, level)); assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
blorp_fast_clear(&batch, &surf, surf.surf->format, blorp_fast_clear(&batch, &surf, surf.surf->format,
level, base_layer, layer_count, level, base_layer, layer_count,
0, 0, extent.width, extent.height); 0, 0,
extent.width / width_div,
extent.height / height_div);
} }
cmd_buffer->state.pending_pipe_bits |= cmd_buffer->state.pending_pipe_bits |=
@ -1472,18 +1533,30 @@ anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
const VkRect2D render_area = cmd_buffer->state.render_area; const VkRect2D render_area = cmd_buffer->state.render_area;
assert(src_iview->aspect_mask == dst_iview->aspect_mask); assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
resolve_image(&batch, src_iview->image, src_aux_usage, struct blorp_surf src_surf, dst_surf;
src_iview->isl.base_level, get_blorp_surf_for_anv_image(src_iview->image,
src_iview->isl.base_array_layer, VK_IMAGE_ASPECT_COLOR_BIT,
dst_iview->image, dst_aux_usage, src_aux_usage, &src_surf);
dst_iview->isl.base_level, get_blorp_surf_for_anv_image(dst_iview->image,
dst_iview->isl.base_array_layer, VK_IMAGE_ASPECT_COLOR_BIT,
src_iview->aspect_mask, dst_aux_usage, &dst_surf);
render_area.offset.x, render_area.offset.y,
render_area.offset.x, render_area.offset.y, assert(!src_iview->image->format->can_ycbcr);
render_area.extent.width, render_area.extent.height); assert(!dst_iview->image->format->can_ycbcr);
resolve_surface(&batch,
&src_surf,
src_iview->planes[0].isl.base_level,
src_iview->planes[0].isl.base_array_layer,
&dst_surf,
dst_iview->planes[0].isl.base_level,
dst_iview->planes[0].isl.base_array_layer,
render_area.offset.x, render_area.offset.y,
render_area.offset.x, render_area.offset.y,
render_area.extent.width, render_area.extent.height);
} }
blorp_batch_finish(&batch); blorp_batch_finish(&batch);
@ -1493,22 +1566,24 @@ anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
void void
anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer, anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
uint32_t base_level, uint32_t level_count, uint32_t base_level, uint32_t level_count,
uint32_t base_layer, uint32_t layer_count) uint32_t base_layer, uint32_t layer_count)
{ {
struct blorp_batch batch; struct blorp_batch batch;
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0); blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
struct blorp_surf surf; struct blorp_surf surf;
get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT, get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT,
ISL_AUX_USAGE_NONE, &surf); ISL_AUX_USAGE_NONE, &surf);
struct blorp_surf shadow_surf = { struct blorp_surf shadow_surf = {
.surf = &image->shadow_surface.isl, .surf = &image->planes[0].shadow_surface.isl,
.addr = { .addr = {
.buffer = image->bo, .buffer = image->planes[0].bo,
.offset = image->offset + image->shadow_surface.offset, .offset = image->planes[0].bo_offset +
image->planes[0].shadow_surface.offset,
}, },
}; };
@ -1543,11 +1618,14 @@ anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
{ {
assert(image); assert(image);
assert(anv_image_aspect_to_plane(image->aspects,
VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
/* Don't resolve depth buffers without an auxiliary HiZ buffer and /* Don't resolve depth buffers without an auxiliary HiZ buffer and
* don't perform such a resolve on gens that don't support it. * don't perform such a resolve on gens that don't support it.
*/ */
if (cmd_buffer->device->info.gen < 8 || if (cmd_buffer->device->info.gen < 8 ||
image->aux_usage != ISL_AUX_USAGE_HIZ) image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ)
return; return;
assert(op == BLORP_HIZ_OP_HIZ_RESOLVE || assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
@ -1561,10 +1639,11 @@ anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
ISL_AUX_USAGE_NONE, &surf); ISL_AUX_USAGE_NONE, &surf);
/* Manually add the aux HiZ surf */ /* Manually add the aux HiZ surf */
surf.aux_surf = &image->aux_surface.isl, surf.aux_surf = &image->planes[0].aux_surface.isl,
surf.aux_addr = (struct blorp_address) { surf.aux_addr = (struct blorp_address) {
.buffer = image->bo, .buffer = image->planes[0].bo,
.offset = image->offset + image->aux_surface.offset, .offset = image->planes[0].bo_offset +
image->planes[0].aux_surface.offset,
}; };
surf.aux_usage = ISL_AUX_USAGE_HIZ; surf.aux_usage = ISL_AUX_USAGE_HIZ;
@ -1578,15 +1657,18 @@ void
anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer, anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
const struct anv_state surface_state, const struct anv_state surface_state,
const struct anv_image * const image, const struct anv_image * const image,
VkImageAspectFlagBits aspect,
const uint8_t level, const uint32_t layer_count, const uint8_t level, const uint32_t layer_count,
const enum blorp_fast_clear_op op) const enum blorp_fast_clear_op op)
{ {
assert(cmd_buffer && image); assert(cmd_buffer && image);
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
/* The resolved subresource range must have a CCS buffer. */ /* The resolved subresource range must have a CCS buffer. */
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
assert(layer_count <= anv_image_aux_layers(image, level)); assert(layer_count <= anv_image_aux_layers(image, aspect, level));
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->samples == 1); assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT && image->samples == 1);
/* Create a binding table for this surface state. */ /* Create a binding table for this surface state. */
uint32_t binding_table; uint32_t binding_table;
@ -1601,13 +1683,13 @@ anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
BLORP_BATCH_PREDICATE_ENABLE); BLORP_BATCH_PREDICATE_ENABLE);
struct blorp_surf surf; struct blorp_surf surf;
get_blorp_surf_for_anv_image(image, VK_IMAGE_ASPECT_COLOR_BIT, get_blorp_surf_for_anv_image(image, aspect,
image->aux_usage == ISL_AUX_USAGE_CCS_E ? fast_clear_aux_usage(image, aspect),
ISL_AUX_USAGE_CCS_E : ISL_AUX_USAGE_CCS_D,
&surf); &surf);
blorp_ccs_resolve_attachment(&batch, binding_table, &surf, level, blorp_ccs_resolve_attachment(&batch, binding_table, &surf, level,
layer_count, image->color_surface.isl.format, layer_count,
image->planes[plane].surface.isl.format,
op); op);
blorp_batch_finish(&batch); blorp_batch_finish(&batch);

View file

@ -424,20 +424,25 @@ anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
uint32_t b; uint32_t b;
for_each_bit(b, iview->image->aspects) { for_each_bit(b, iview->image->aspects) {
VkImageAspectFlagBits aspect = (1 << b); VkImageAspectFlagBits aspect = (1 << b);
char suffix; const char *suffix;
switch (aspect) { switch (aspect) {
case VK_IMAGE_ASPECT_COLOR_BIT: suffix = 'c'; break; case VK_IMAGE_ASPECT_COLOR_BIT: suffix = "c"; break;
case VK_IMAGE_ASPECT_DEPTH_BIT: suffix = 'd'; break; case VK_IMAGE_ASPECT_DEPTH_BIT: suffix = "d"; break;
case VK_IMAGE_ASPECT_STENCIL_BIT: suffix = 's'; break; case VK_IMAGE_ASPECT_STENCIL_BIT: suffix = "s"; break;
case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR: suffix = "c0"; break;
case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR: suffix = "c1"; break;
case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR: suffix = "c2"; break;
default: default:
unreachable("Invalid aspect"); unreachable("Invalid aspect");
} }
char *filename = ralloc_asprintf(dump_ctx, "framebuffer%04d-%d%c.ppm", char *filename = ralloc_asprintf(dump_ctx, "framebuffer%04d-%d%s.ppm",
dump_idx, i, suffix); dump_idx, i, suffix);
unsigned plane = anv_image_aspect_to_plane(iview->image->aspects, aspect);
dump_add_image(cmd_buffer, (struct anv_image *)iview->image, aspect, dump_add_image(cmd_buffer, (struct anv_image *)iview->image, aspect,
iview->isl.base_level, iview->isl.base_array_layer, iview->planes[plane].isl.base_level,
iview->planes[plane].isl.base_array_layer,
filename); filename);
} }
} }

View file

@ -430,8 +430,7 @@ anv_get_format_plane(const struct gen_device_info *devinfo, VkFormat vk_format,
return plane_format; return plane_format;
} }
assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT); assert((aspect & ~VK_IMAGE_ASPECT_ANY_COLOR_BIT) == 0);
assert(vk_format_aspects(vk_format) == VK_IMAGE_ASPECT_COLOR_BIT);
const struct isl_format_layout *isl_layout = const struct isl_format_layout *isl_layout =
isl_format_get_layout(plane_format.isl_format); isl_format_get_layout(plane_format.isl_format);

View file

@ -68,6 +68,9 @@ choose_isl_surf_usage(VkImageCreateFlags vk_create_flags,
isl_usage |= ISL_SURF_USAGE_STENCIL_BIT; isl_usage |= ISL_SURF_USAGE_STENCIL_BIT;
break; break;
case VK_IMAGE_ASPECT_COLOR_BIT: case VK_IMAGE_ASPECT_COLOR_BIT:
case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR:
case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR:
case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR:
break; break;
default: default:
unreachable("bad VkImageAspect"); unreachable("bad VkImageAspect");
@ -95,26 +98,31 @@ choose_isl_surf_usage(VkImageCreateFlags vk_create_flags,
static struct anv_surface * static struct anv_surface *
get_surface(struct anv_image *image, VkImageAspectFlags aspect) get_surface(struct anv_image *image, VkImageAspectFlags aspect)
{ {
switch (aspect) { uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
default: return &image->planes[plane].surface;
unreachable("bad VkImageAspect");
case VK_IMAGE_ASPECT_COLOR_BIT:
return &image->color_surface;
case VK_IMAGE_ASPECT_DEPTH_BIT:
return &image->depth_surface;
case VK_IMAGE_ASPECT_STENCIL_BIT:
return &image->stencil_surface;
}
} }
static void static void
add_surface(struct anv_image *image, struct anv_surface *surf) add_surface(struct anv_image *image, struct anv_surface *surf, uint32_t plane)
{ {
assert(surf->isl.size > 0); /* isl surface must be initialized */ assert(surf->isl.size > 0); /* isl surface must be initialized */
surf->offset = align_u32(image->size, surf->isl.alignment); if (image->disjoint) {
surf->offset = align_u32(image->planes[plane].size, surf->isl.alignment);
/* Plane offset is always 0 when it's disjoint. */
} else {
surf->offset = align_u32(image->size, surf->isl.alignment);
/* Determine plane's offset only once when the first surface is added. */
if (image->planes[plane].size == 0)
image->planes[plane].offset = image->size;
}
image->size = surf->offset + surf->isl.size; image->size = surf->offset + surf->isl.size;
image->planes[plane].size = (surf->offset + surf->isl.size) - image->planes[plane].offset;
image->alignment = MAX2(image->alignment, surf->isl.alignment); image->alignment = MAX2(image->alignment, surf->isl.alignment);
image->planes[plane].alignment = MAX2(image->planes[plane].alignment,
surf->isl.alignment);
} }
@ -194,11 +202,13 @@ all_formats_ccs_e_compatible(const struct gen_device_info *devinfo,
*/ */
static void static void
add_fast_clear_state_buffer(struct anv_image *image, add_fast_clear_state_buffer(struct anv_image *image,
VkImageAspectFlagBits aspect,
uint32_t plane,
const struct anv_device *device) const struct anv_device *device)
{ {
assert(image && device); assert(image && device);
assert(image->aux_surface.isl.size > 0 && assert(image->planes[plane].aux_surface.isl.size > 0 &&
image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
/* The offset to the buffer of clear values must be dword-aligned for GPU /* The offset to the buffer of clear values must be dword-aligned for GPU
* memcpy operations. It is located immediately after the auxiliary surface. * memcpy operations. It is located immediately after the auxiliary surface.
@ -212,18 +222,31 @@ add_fast_clear_state_buffer(struct anv_image *image,
/* Auxiliary buffers should be a multiple of 4K, so the start of the clear /* Auxiliary buffers should be a multiple of 4K, so the start of the clear
* values buffer should already be dword-aligned. * values buffer should already be dword-aligned.
*/ */
assert(image->aux_surface.isl.size % 4 == 0); assert((image->planes[plane].offset + image->planes[plane].size) % 4 == 0);
/* This buffer should be at the very end of the image. */ /* This buffer should be at the very end of the plane. */
assert(image->size == if (image->disjoint) {
image->aux_surface.offset + image->aux_surface.isl.size); assert(image->planes[plane].size ==
(image->planes[plane].offset + image->planes[plane].size));
} else {
assert(image->size ==
(image->planes[plane].offset + image->planes[plane].size));
}
const unsigned entry_size = anv_fast_clear_state_entry_size(device); const unsigned entry_size = anv_fast_clear_state_entry_size(device);
/* There's no padding between entries, so ensure that they're always a /* There's no padding between entries, so ensure that they're always a
* multiple of 32 bits in order to enable GPU memcpy operations. * multiple of 32 bits in order to enable GPU memcpy operations.
*/ */
assert(entry_size % 4 == 0); assert(entry_size % 4 == 0);
image->size += entry_size * anv_image_aux_levels(image);
const unsigned plane_state_size =
entry_size * anv_image_aux_levels(image, aspect);
image->planes[plane].fast_clear_state_offset =
image->planes[plane].offset + image->planes[plane].size;
image->planes[plane].size += plane_state_size;
image->size += plane_state_size;
} }
/** /**
@ -259,14 +282,13 @@ make_surface(const struct anv_device *dev,
assert(tiling_flags); assert(tiling_flags);
struct anv_surface *anv_surf = get_surface(image, aspect);
image->extent = anv_sanitize_image_extent(vk_info->imageType, image->extent = anv_sanitize_image_extent(vk_info->imageType,
vk_info->extent); vk_info->extent);
enum isl_format format = anv_get_isl_format(&dev->info, vk_info->format, const unsigned plane = anv_image_aspect_to_plane(image->aspects, aspect);
aspect, vk_info->tiling); const struct anv_format_plane plane_format =
assert(format != ISL_FORMAT_UNSUPPORTED); anv_get_format_plane(&dev->info, image->vk_format, aspect, image->tiling);
struct anv_surface *anv_surf = &image->planes[plane].surface;
/* If an image is created as BLOCK_TEXEL_VIEW_COMPATIBLE, then we need to /* If an image is created as BLOCK_TEXEL_VIEW_COMPATIBLE, then we need to
* fall back to linear on Broadwell and earlier because we aren't * fall back to linear on Broadwell and earlier because we aren't
@ -278,16 +300,16 @@ make_surface(const struct anv_device *dev,
if (dev->info.gen <= 8 && if (dev->info.gen <= 8 &&
(vk_info->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR) && (vk_info->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR) &&
vk_info->tiling == VK_IMAGE_TILING_OPTIMAL) { vk_info->tiling == VK_IMAGE_TILING_OPTIMAL) {
assert(isl_format_is_compressed(format)); assert(isl_format_is_compressed(plane_format.isl_format));
tiling_flags = ISL_TILING_LINEAR_BIT; tiling_flags = ISL_TILING_LINEAR_BIT;
needs_shadow = true; needs_shadow = true;
} }
ok = isl_surf_init(&dev->isl_dev, &anv_surf->isl, ok = isl_surf_init(&dev->isl_dev, &anv_surf->isl,
.dim = vk_to_isl_surf_dim[vk_info->imageType], .dim = vk_to_isl_surf_dim[vk_info->imageType],
.format = format, .format = plane_format.isl_format,
.width = image->extent.width, .width = image->extent.width / plane_format.denominator_scales[0],
.height = image->extent.height, .height = image->extent.height / plane_format.denominator_scales[1],
.depth = image->extent.depth, .depth = image->extent.depth,
.levels = vk_info->mipLevels, .levels = vk_info->mipLevels,
.array_len = vk_info->arrayLayers, .array_len = vk_info->arrayLayers,
@ -302,7 +324,9 @@ make_surface(const struct anv_device *dev,
*/ */
assert(ok); assert(ok);
add_surface(image, anv_surf); image->planes[plane].aux_usage = ISL_AUX_USAGE_NONE;
add_surface(image, anv_surf, plane);
/* If an image is created as BLOCK_TEXEL_VIEW_COMPATIBLE, then we need to /* If an image is created as BLOCK_TEXEL_VIEW_COMPATIBLE, then we need to
* create an identical tiled shadow surface for use while texturing so we * create an identical tiled shadow surface for use while texturing so we
@ -312,9 +336,9 @@ make_surface(const struct anv_device *dev,
assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT); assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
assert(tiling_flags == ISL_TILING_LINEAR_BIT); assert(tiling_flags == ISL_TILING_LINEAR_BIT);
ok = isl_surf_init(&dev->isl_dev, &image->shadow_surface.isl, ok = isl_surf_init(&dev->isl_dev, &image->planes[plane].shadow_surface.isl,
.dim = vk_to_isl_surf_dim[vk_info->imageType], .dim = vk_to_isl_surf_dim[vk_info->imageType],
.format = format, .format = plane_format.isl_format,
.width = image->extent.width, .width = image->extent.width,
.height = image->extent.height, .height = image->extent.height,
.depth = image->extent.depth, .depth = image->extent.depth,
@ -331,7 +355,7 @@ make_surface(const struct anv_device *dev,
*/ */
assert(ok); assert(ok);
add_surface(image, &image->shadow_surface); add_surface(image, &image->planes[plane].shadow_surface, plane);
} }
/* Add a HiZ surface to a depth buffer that will be used for rendering. /* Add a HiZ surface to a depth buffer that will be used for rendering.
@ -357,24 +381,43 @@ make_surface(const struct anv_device *dev,
} else if (dev->info.gen == 8 && vk_info->samples > 1) { } else if (dev->info.gen == 8 && vk_info->samples > 1) {
anv_perf_warn(dev->instance, image, "Enable gen8 multisampled HiZ"); anv_perf_warn(dev->instance, image, "Enable gen8 multisampled HiZ");
} else if (!unlikely(INTEL_DEBUG & DEBUG_NO_HIZ)) { } else if (!unlikely(INTEL_DEBUG & DEBUG_NO_HIZ)) {
assert(image->aux_surface.isl.size == 0); assert(image->planes[plane].aux_surface.isl.size == 0);
ok = isl_surf_get_hiz_surf(&dev->isl_dev, &image->depth_surface.isl, ok = isl_surf_get_hiz_surf(&dev->isl_dev,
&image->aux_surface.isl); &image->planes[plane].surface.isl,
&image->planes[plane].aux_surface.isl);
assert(ok); assert(ok);
add_surface(image, &image->aux_surface); add_surface(image, &image->planes[plane].aux_surface, plane);
image->aux_usage = ISL_AUX_USAGE_HIZ; image->planes[plane].aux_usage = ISL_AUX_USAGE_HIZ;
} }
} else if (aspect == VK_IMAGE_ASPECT_COLOR_BIT && vk_info->samples == 1) { } else if ((aspect & VK_IMAGE_ASPECT_ANY_COLOR_BIT) && vk_info->samples == 1) {
if (!unlikely(INTEL_DEBUG & DEBUG_NO_RBC)) { /* TODO: Disallow compression with :
assert(image->aux_surface.isl.size == 0); *
ok = isl_surf_get_ccs_surf(&dev->isl_dev, &anv_surf->isl, * 1) non multiplanar images (We appear to hit a sampler bug with
&image->aux_surface.isl, 0); * CCS & R16G16 format. Putting the clear state a page/4096bytes
* further fixes the issue).
*
* 2) alias images, because they might be aliases of images
* described in 1)
*
* 3) compression disabled by debug
*/
const bool allow_compression =
image->n_planes == 1 &&
(vk_info->flags & VK_IMAGE_CREATE_ALIAS_BIT_KHR) == 0 &&
likely((INTEL_DEBUG & DEBUG_NO_RBC) == 0);
if (allow_compression) {
assert(image->planes[plane].aux_surface.isl.size == 0);
ok = isl_surf_get_ccs_surf(&dev->isl_dev,
&image->planes[plane].surface.isl,
&image->planes[plane].aux_surface.isl, 0);
if (ok) { if (ok) {
/* Disable CCS when it is not useful (i.e., when you can't render /* Disable CCS when it is not useful (i.e., when you can't render
* to the image with CCS enabled). * to the image with CCS enabled).
*/ */
if (!isl_format_supports_rendering(&dev->info, format)) { if (!isl_format_supports_rendering(&dev->info,
plane_format.isl_format)) {
/* While it may be technically possible to enable CCS for this /* While it may be technically possible to enable CCS for this
* image, we currently don't have things hooked up to get it * image, we currently don't have things hooked up to get it
* working. * working.
@ -382,12 +425,12 @@ make_surface(const struct anv_device *dev,
anv_perf_warn(dev->instance, image, anv_perf_warn(dev->instance, image,
"This image format doesn't support rendering. " "This image format doesn't support rendering. "
"Not allocating an CCS buffer."); "Not allocating an CCS buffer.");
image->aux_surface.isl.size = 0; image->planes[plane].aux_surface.isl.size = 0;
return VK_SUCCESS; return VK_SUCCESS;
} }
add_surface(image, &image->aux_surface); add_surface(image, &image->planes[plane].aux_surface, plane);
add_fast_clear_state_buffer(image, dev); add_fast_clear_state_buffer(image, aspect, plane, dev);
/* For images created without MUTABLE_FORMAT_BIT set, we know that /* For images created without MUTABLE_FORMAT_BIT set, we know that
* they will always be used with the original format. In * they will always be used with the original format. In
@ -399,22 +442,42 @@ make_surface(const struct anv_device *dev,
*/ */
if (!(vk_info->usage & VK_IMAGE_USAGE_STORAGE_BIT) && if (!(vk_info->usage & VK_IMAGE_USAGE_STORAGE_BIT) &&
all_formats_ccs_e_compatible(&dev->info, vk_info)) { all_formats_ccs_e_compatible(&dev->info, vk_info)) {
image->aux_usage = ISL_AUX_USAGE_CCS_E; image->planes[plane].aux_usage = ISL_AUX_USAGE_CCS_E;
} }
} }
} }
} else if (aspect == VK_IMAGE_ASPECT_COLOR_BIT && vk_info->samples > 1) { } else if ((aspect & VK_IMAGE_ASPECT_ANY_COLOR_BIT) && vk_info->samples > 1) {
assert(image->aux_surface.isl.size == 0);
assert(!(vk_info->usage & VK_IMAGE_USAGE_STORAGE_BIT)); assert(!(vk_info->usage & VK_IMAGE_USAGE_STORAGE_BIT));
ok = isl_surf_get_mcs_surf(&dev->isl_dev, &anv_surf->isl, assert(image->planes[plane].aux_surface.isl.size == 0);
&image->aux_surface.isl); ok = isl_surf_get_mcs_surf(&dev->isl_dev,
&image->planes[plane].surface.isl,
&image->planes[plane].aux_surface.isl);
if (ok) { if (ok) {
add_surface(image, &image->aux_surface); add_surface(image, &image->planes[plane].aux_surface, plane);
add_fast_clear_state_buffer(image, dev); add_fast_clear_state_buffer(image, aspect, plane, dev);
image->aux_usage = ISL_AUX_USAGE_MCS; image->planes[plane].aux_usage = ISL_AUX_USAGE_MCS;
} }
} }
assert((image->planes[plane].offset + image->planes[plane].size) == image->size);
/* Upper bound of the last surface should be smaller than the plane's
* size.
*/
assert((MAX2(image->planes[plane].surface.offset,
image->planes[plane].aux_surface.offset) +
(image->planes[plane].aux_surface.isl.size > 0 ?
image->planes[plane].aux_surface.isl.size :
image->planes[plane].surface.isl.size)) <=
(image->planes[plane].offset + image->planes[plane].size));
if (image->planes[plane].aux_surface.isl.size) {
/* assert(image->planes[plane].fast_clear_state_offset == */
/* (image->planes[plane].aux_surface.offset + image->planes[plane].aux_surface.isl.size)); */
assert(image->planes[plane].fast_clear_state_offset <
(image->planes[plane].offset + image->planes[plane].size));
}
return VK_SUCCESS; return VK_SUCCESS;
} }
@ -446,13 +509,18 @@ anv_image_create(VkDevice _device,
image->type = pCreateInfo->imageType; image->type = pCreateInfo->imageType;
image->extent = pCreateInfo->extent; image->extent = pCreateInfo->extent;
image->vk_format = pCreateInfo->format; image->vk_format = pCreateInfo->format;
image->format = anv_get_format(pCreateInfo->format);
image->aspects = vk_format_aspects(image->vk_format); image->aspects = vk_format_aspects(image->vk_format);
image->levels = pCreateInfo->mipLevels; image->levels = pCreateInfo->mipLevels;
image->array_size = pCreateInfo->arrayLayers; image->array_size = pCreateInfo->arrayLayers;
image->samples = pCreateInfo->samples; image->samples = pCreateInfo->samples;
image->usage = pCreateInfo->usage; image->usage = pCreateInfo->usage;
image->tiling = pCreateInfo->tiling; image->tiling = pCreateInfo->tiling;
image->aux_usage = ISL_AUX_USAGE_NONE;
const struct anv_format *format = anv_get_format(image->vk_format);
assert(format != NULL);
image->n_planes = format->n_planes;
uint32_t b; uint32_t b;
for_each_bit(b, image->aspects) { for_each_bit(b, image->aspects) {
@ -499,48 +567,61 @@ anv_DestroyImage(VkDevice _device, VkImage _image,
vk_free2(&device->alloc, pAllocator, image); vk_free2(&device->alloc, pAllocator, image);
} }
static void static void anv_image_bind_memory_plane(struct anv_device *device,
anv_bind_image_memory(const VkBindImageMemoryInfoKHR *pBindInfo) struct anv_image *image,
uint32_t plane,
struct anv_device_memory *memory,
uint32_t memory_offset)
{ {
ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory); if (!memory) {
ANV_FROM_HANDLE(anv_image, image, pBindInfo->image); image->planes[plane].bo = NULL;
image->planes[plane].bo_offset = 0;
assert(pBindInfo->sType == VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR);
if (mem == NULL) {
image->bo = NULL;
image->offset = 0;
return;
} }
image->bo = mem->bo; image->planes[plane].bo = memory->bo;
image->offset = pBindInfo->memoryOffset; image->planes[plane].bo_offset = memory_offset;
} }
VkResult anv_BindImageMemory( VkResult anv_BindImageMemory(
VkDevice device, VkDevice _device,
VkImage image, VkImage _image,
VkDeviceMemory memory, VkDeviceMemory _memory,
VkDeviceSize memoryOffset) VkDeviceSize memoryOffset)
{ {
anv_bind_image_memory( ANV_FROM_HANDLE(anv_device, device, _device);
&(VkBindImageMemoryInfoKHR) { ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, ANV_FROM_HANDLE(anv_image, image, _image);
.image = image,
.memory = memory, uint32_t aspect_bit;
.memoryOffset = memoryOffset, anv_foreach_image_aspect_bit(aspect_bit, image, image->aspects) {
}); uint32_t plane =
anv_image_aspect_to_plane(image->aspects, 1UL << aspect_bit);
anv_image_bind_memory_plane(device, image, plane, mem, memoryOffset);
}
return VK_SUCCESS; return VK_SUCCESS;
} }
VkResult anv_BindImageMemory2KHR( VkResult anv_BindImageMemory2KHR(
VkDevice device, VkDevice _device,
uint32_t bindInfoCount, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR* pBindInfos) const VkBindImageMemoryInfoKHR* pBindInfos)
{ {
for (uint32_t i = 0; i < bindInfoCount; i++) ANV_FROM_HANDLE(anv_device, device, _device);
anv_bind_image_memory(&pBindInfos[i]);
for (uint32_t i = 0; i < bindInfoCount; i++) {
const VkBindImageMemoryInfoKHR *bind_info = &pBindInfos[i];
ANV_FROM_HANDLE(anv_device_memory, mem, bind_info->memory);
ANV_FROM_HANDLE(anv_image, image, bind_info->image);
uint32_t aspect_bit;
anv_foreach_image_aspect_bit(aspect_bit, image, image->aspects) {
uint32_t plane =
anv_image_aspect_to_plane(image->aspects, 1UL << aspect_bit);
anv_image_bind_memory_plane(device, image, plane,
mem, bind_info->memoryOffset);
}
}
return VK_SUCCESS; return VK_SUCCESS;
} }
@ -574,22 +655,10 @@ void anv_GetImageSubresourceLayout(
assert(__builtin_popcount(pSubresource->aspectMask) == 1); assert(__builtin_popcount(pSubresource->aspectMask) == 1);
switch (pSubresource->aspectMask) { anv_surface_get_subresource_layout(image,
case VK_IMAGE_ASPECT_COLOR_BIT: get_surface(image,
anv_surface_get_subresource_layout(image, &image->color_surface, pSubresource->aspectMask),
pSubresource, pLayout); pSubresource, pLayout);
break;
case VK_IMAGE_ASPECT_DEPTH_BIT:
anv_surface_get_subresource_layout(image, &image->depth_surface,
pSubresource, pLayout);
break;
case VK_IMAGE_ASPECT_STENCIL_BIT:
anv_surface_get_subresource_layout(image, &image->stencil_surface,
pSubresource, pLayout);
break;
default:
assert(!"Invalid image aspect");
}
} }
/** /**
@ -600,7 +669,7 @@ void anv_GetImageSubresourceLayout(
* *
* @param devinfo The device information of the Intel GPU. * @param devinfo The device information of the Intel GPU.
* @param image The image that may contain a collection of buffers. * @param image The image that may contain a collection of buffers.
* @param aspects The aspect(s) of the image to be accessed. * @param plane The plane of the image to be accessed.
* @param layout The current layout of the image aspect(s). * @param layout The current layout of the image aspect(s).
* *
* @return The primary buffer that should be used for the given layout. * @return The primary buffer that should be used for the given layout.
@ -624,26 +693,25 @@ anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
/* Determine the optimal buffer. */ /* Determine the optimal buffer. */
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
/* If there is no auxiliary surface allocated, we must use the one and only /* If there is no auxiliary surface allocated, we must use the one and only
* main buffer. * main buffer.
*/ */
if (image->aux_surface.isl.size == 0) if (image->planes[plane].aux_surface.isl.size == 0)
return ISL_AUX_USAGE_NONE; return ISL_AUX_USAGE_NONE;
/* All images that use an auxiliary surface are required to be tiled. */ /* All images that use an auxiliary surface are required to be tiled. */
assert(image->tiling == VK_IMAGE_TILING_OPTIMAL); assert(image->tiling == VK_IMAGE_TILING_OPTIMAL);
/* Stencil has no aux */ /* Stencil has no aux */
if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT) assert(aspect != VK_IMAGE_ASPECT_STENCIL_BIT);
return ISL_AUX_USAGE_NONE;
const bool color_aspect = aspect == VK_IMAGE_ASPECT_COLOR_BIT;
/* The following switch currently only handles depth stencil aspects. /* The following switch currently only handles depth stencil aspects.
* TODO: Handle the color aspect. * TODO: Handle the color aspect.
*/ */
if (color_aspect) if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT)
return image->aux_usage; return image->planes[plane].aux_usage;
switch (layout) { switch (layout) {
@ -677,7 +745,7 @@ anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
/* Sampling Layouts */ /* Sampling Layouts */
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
assert(!color_aspect); assert((image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT) == 0);
/* Fall-through */ /* Fall-through */
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR:
@ -688,7 +756,7 @@ anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
return ISL_AUX_USAGE_NONE; return ISL_AUX_USAGE_NONE;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
assert(color_aspect); assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
/* On SKL+, the render buffer can be decompressed by the presentation /* On SKL+, the render buffer can be decompressed by the presentation
* engine. Support for this feature has not yet landed in the wider * engine. Support for this feature has not yet landed in the wider
@ -710,7 +778,7 @@ anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
/* Rendering Layouts */ /* Rendering Layouts */
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
assert(color_aspect); assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
unreachable("Color images are not yet supported."); unreachable("Color images are not yet supported.");
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
@ -766,8 +834,10 @@ anv_image_fill_surface_state(struct anv_device *device,
struct anv_surface_state *state_inout, struct anv_surface_state *state_inout,
struct brw_image_param *image_param_out) struct brw_image_param *image_param_out)
{ {
const struct anv_surface *surface = uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
anv_image_get_surface_for_aspect_mask(image, aspect);
const struct anv_surface *surface = &image->planes[plane].surface,
*aux_surface = &image->planes[plane].aux_surface;
struct isl_view view = *view_in; struct isl_view view = *view_in;
view.usage |= view_usage; view.usage |= view_usage;
@ -777,13 +847,13 @@ anv_image_fill_surface_state(struct anv_device *device,
* the primary surface. The shadow surface will be tiled, unlike the main * the primary surface. The shadow surface will be tiled, unlike the main
* surface, so it should get significantly better performance. * surface, so it should get significantly better performance.
*/ */
if (image->shadow_surface.isl.size > 0 && if (image->planes[plane].shadow_surface.isl.size > 0 &&
isl_format_is_compressed(view.format) && isl_format_is_compressed(view.format) &&
(flags & ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL)) { (flags & ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL)) {
assert(isl_format_is_compressed(surface->isl.format)); assert(isl_format_is_compressed(surface->isl.format));
assert(surface->isl.tiling == ISL_TILING_LINEAR); assert(surface->isl.tiling == ISL_TILING_LINEAR);
assert(image->shadow_surface.isl.tiling != ISL_TILING_LINEAR); assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR);
surface = &image->shadow_surface; surface = &image->planes[plane].shadow_surface;
} }
if (view_usage == ISL_SURF_USAGE_RENDER_TARGET_BIT) if (view_usage == ISL_SURF_USAGE_RENDER_TARGET_BIT)
@ -798,9 +868,9 @@ anv_image_fill_surface_state(struct anv_device *device,
if (!clear_color) if (!clear_color)
clear_color = &default_clear_color; clear_color = &default_clear_color;
const uint64_t address = image->offset + surface->offset; const uint64_t address = image->planes[plane].bo_offset + surface->offset;
const uint64_t aux_address = (aux_usage == ISL_AUX_USAGE_NONE) ? 0 : const uint64_t aux_address = aux_usage == ISL_AUX_USAGE_NONE ?
image->offset + image->aux_surface.offset; 0 : (image->planes[plane].bo_offset + aux_surface->offset);
if (view_usage == ISL_SURF_USAGE_STORAGE_BIT && if (view_usage == ISL_SURF_USAGE_STORAGE_BIT &&
!(flags & ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY) && !(flags & ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY) &&
@ -889,7 +959,7 @@ anv_image_fill_surface_state(struct anv_device *device,
.view = &view, .view = &view,
.address = address + offset_B, .address = address + offset_B,
.clear_color = *clear_color, .clear_color = *clear_color,
.aux_surf = &image->aux_surface.isl, .aux_surf = &aux_surface->isl,
.aux_usage = aux_usage, .aux_usage = aux_usage,
.aux_address = aux_address, .aux_address = aux_address,
.mocs = device->default_mocs, .mocs = device->default_mocs,
@ -920,6 +990,22 @@ anv_image_fill_surface_state(struct anv_device *device,
} }
} }
static VkImageAspectFlags
remap_aspect_flags(VkImageAspectFlags view_aspects)
{
if (view_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
if (_mesa_bitcount(view_aspects) == 1)
return VK_IMAGE_ASPECT_COLOR_BIT;
VkImageAspectFlags color_aspects = 0;
for (uint32_t i = 0; i < _mesa_bitcount(view_aspects); i++)
color_aspects |= VK_IMAGE_ASPECT_PLANE_0_BIT_KHR << i;
return color_aspects;
}
/* No special remapping needed for depth & stencil aspects. */
return view_aspects;
}
VkResult VkResult
anv_CreateImageView(VkDevice _device, anv_CreateImageView(VkDevice _device,
const VkImageViewCreateInfo *pCreateInfo, const VkImageViewCreateInfo *pCreateInfo,
@ -964,106 +1050,130 @@ anv_CreateImageView(VkDevice _device,
break; break;
} }
/* First expand aspects to the image's ones (for example
* VK_IMAGE_ASPECT_COLOR_BIT will be converted to
* VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR |
* VK_IMAGE_ASPECT_PLANE_2_BIT_KHR for an image of format
* VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR.
*/
VkImageAspectFlags expanded_aspects =
anv_image_expand_aspects(image, range->aspectMask);
iview->image = image; iview->image = image;
iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask; /* Remap the expanded aspects for the image view. For example if only
* VK_IMAGE_ASPECT_PLANE_1_BIT_KHR was given in range->aspectMask, we will
* convert it to VK_IMAGE_ASPECT_COLOR_BIT since from the point of view of
* the image view, it only has a single plane.
*/
iview->aspect_mask = remap_aspect_flags(expanded_aspects);
iview->n_planes = anv_image_aspect_get_planes(iview->aspect_mask);
iview->vk_format = pCreateInfo->format; iview->vk_format = pCreateInfo->format;
struct anv_format_plane format =
anv_get_format_plane(&device->info, pCreateInfo->format,
range->aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT) ?
VK_IMAGE_ASPECT_DEPTH_BIT : range->aspectMask,
image->tiling);
iview->isl = (struct isl_view) {
.format = format.isl_format,
.base_level = range->baseMipLevel,
.levels = anv_get_levelCount(image, range),
.base_array_layer = range->baseArrayLayer,
.array_len = anv_get_layerCount(image, range),
.swizzle = {
.r = remap_swizzle(pCreateInfo->components.r,
VK_COMPONENT_SWIZZLE_R, format.swizzle),
.g = remap_swizzle(pCreateInfo->components.g,
VK_COMPONENT_SWIZZLE_G, format.swizzle),
.b = remap_swizzle(pCreateInfo->components.b,
VK_COMPONENT_SWIZZLE_B, format.swizzle),
.a = remap_swizzle(pCreateInfo->components.a,
VK_COMPONENT_SWIZZLE_A, format.swizzle),
},
};
iview->extent = (VkExtent3D) { iview->extent = (VkExtent3D) {
.width = anv_minify(image->extent.width , range->baseMipLevel), .width = anv_minify(image->extent.width , range->baseMipLevel),
.height = anv_minify(image->extent.height, range->baseMipLevel), .height = anv_minify(image->extent.height, range->baseMipLevel),
.depth = anv_minify(image->extent.depth , range->baseMipLevel), .depth = anv_minify(image->extent.depth , range->baseMipLevel),
}; };
if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_3D) { /* Now go through the underlying image selected planes (computed in
iview->isl.base_array_layer = 0; * expanded_aspects) and map them to planes in the image view.
iview->isl.array_len = iview->extent.depth;
}
if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
iview->isl.usage = ISL_SURF_USAGE_CUBE_BIT;
} else {
iview->isl.usage = 0;
}
/* Input attachment surfaces for color are allocated and filled
* out at BeginRenderPass time because they need compression information.
* Compression is not yet enabled for depth textures and stencil doesn't
* allow compression so we can just use the texture surface state from the
* view.
*/ */
if (view_usage & VK_IMAGE_USAGE_SAMPLED_BIT || uint32_t iaspect_bit, vplane = 0;
(view_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT && anv_foreach_image_aspect_bit(iaspect_bit, image, expanded_aspects) {
!(iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT))) { uint32_t iplane =
iview->optimal_sampler_surface_state.state = alloc_surface_state(device); anv_image_aspect_to_plane(expanded_aspects, 1UL << iaspect_bit);
iview->general_sampler_surface_state.state = alloc_surface_state(device); VkImageAspectFlags vplane_aspect =
anv_plane_to_aspect(iview->aspect_mask, vplane);
struct anv_format_plane format =
anv_get_format_plane(&device->info, pCreateInfo->format,
vplane_aspect, image->tiling);
enum isl_aux_usage general_aux_usage = iview->planes[vplane].image_plane = iplane;
anv_layout_to_aux_usage(&device->info, image, iview->aspect_mask,
VK_IMAGE_LAYOUT_GENERAL);
enum isl_aux_usage optimal_aux_usage =
anv_layout_to_aux_usage(&device->info, image, iview->aspect_mask,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
anv_image_fill_surface_state(device, image, iview->aspect_mask, iview->planes[vplane].isl = (struct isl_view) {
&iview->isl, ISL_SURF_USAGE_TEXTURE_BIT, .format = format.isl_format,
optimal_aux_usage, NULL, .base_level = range->baseMipLevel,
ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL, .levels = anv_get_levelCount(image, range),
&iview->optimal_sampler_surface_state, .base_array_layer = range->baseArrayLayer,
NULL); .array_len = anv_get_layerCount(image, range),
.swizzle = {
.r = remap_swizzle(pCreateInfo->components.r,
VK_COMPONENT_SWIZZLE_R, format.swizzle),
.g = remap_swizzle(pCreateInfo->components.g,
VK_COMPONENT_SWIZZLE_G, format.swizzle),
.b = remap_swizzle(pCreateInfo->components.b,
VK_COMPONENT_SWIZZLE_B, format.swizzle),
.a = remap_swizzle(pCreateInfo->components.a,
VK_COMPONENT_SWIZZLE_A, format.swizzle),
},
};
anv_image_fill_surface_state(device, image, iview->aspect_mask, if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_3D) {
&iview->isl, ISL_SURF_USAGE_TEXTURE_BIT, iview->planes[vplane].isl.base_array_layer = 0;
general_aux_usage, NULL, iview->planes[vplane].isl.array_len = iview->extent.depth;
0, }
&iview->general_sampler_surface_state,
NULL);
}
/* NOTE: This one needs to go last since it may stomp isl_view.format */ if (pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
if (view_usage & VK_IMAGE_USAGE_STORAGE_BIT) { pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
iview->storage_surface_state.state = alloc_surface_state(device); iview->planes[vplane].isl.usage = ISL_SURF_USAGE_CUBE_BIT;
iview->writeonly_storage_surface_state.state = alloc_surface_state(device); } else {
iview->planes[vplane].isl.usage = 0;
}
anv_image_fill_surface_state(device, image, iview->aspect_mask, if (view_usage & VK_IMAGE_USAGE_SAMPLED_BIT ||
&iview->isl, ISL_SURF_USAGE_STORAGE_BIT, (view_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT &&
ISL_AUX_USAGE_NONE, NULL, !(iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT))) {
0, iview->planes[vplane].optimal_sampler_surface_state.state = alloc_surface_state(device);
&iview->storage_surface_state, iview->planes[vplane].general_sampler_surface_state.state = alloc_surface_state(device);
&iview->storage_image_param);
anv_image_fill_surface_state(device, image, iview->aspect_mask, enum isl_aux_usage general_aux_usage =
&iview->isl, ISL_SURF_USAGE_STORAGE_BIT, anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit,
ISL_AUX_USAGE_NONE, NULL, VK_IMAGE_LAYOUT_GENERAL);
ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY, enum isl_aux_usage optimal_aux_usage =
&iview->writeonly_storage_surface_state, anv_layout_to_aux_usage(&device->info, image, 1UL << iaspect_bit,
NULL); VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
anv_image_fill_surface_state(device, image, 1ULL << iaspect_bit,
&iview->planes[vplane].isl,
ISL_SURF_USAGE_TEXTURE_BIT,
optimal_aux_usage, NULL,
ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL,
&iview->planes[vplane].optimal_sampler_surface_state,
NULL);
anv_image_fill_surface_state(device, image, 1ULL << iaspect_bit,
&iview->planes[vplane].isl,
ISL_SURF_USAGE_TEXTURE_BIT,
general_aux_usage, NULL,
0,
&iview->planes[vplane].general_sampler_surface_state,
NULL);
}
/* NOTE: This one needs to go last since it may stomp isl_view.format */
if (view_usage & VK_IMAGE_USAGE_STORAGE_BIT) {
iview->planes[vplane].storage_surface_state.state = alloc_surface_state(device);
iview->planes[vplane].writeonly_storage_surface_state.state = alloc_surface_state(device);
anv_image_fill_surface_state(device, image, 1ULL << iaspect_bit,
&iview->planes[vplane].isl,
ISL_SURF_USAGE_STORAGE_BIT,
ISL_AUX_USAGE_NONE, NULL,
0,
&iview->planes[vplane].storage_surface_state,
&iview->planes[vplane].storage_image_param);
anv_image_fill_surface_state(device, image, 1ULL << iaspect_bit,
&iview->planes[vplane].isl,
ISL_SURF_USAGE_STORAGE_BIT,
ISL_AUX_USAGE_NONE, NULL,
ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY,
&iview->planes[vplane].writeonly_storage_surface_state,
NULL);
}
vplane++;
} }
*pView = anv_image_view_to_handle(iview); *pView = anv_image_view_to_handle(iview);
@ -1081,24 +1191,26 @@ anv_DestroyImageView(VkDevice _device, VkImageView _iview,
if (!iview) if (!iview)
return; return;
if (iview->optimal_sampler_surface_state.state.alloc_size > 0) { for (uint32_t plane = 0; plane < iview->n_planes; plane++) {
anv_state_pool_free(&device->surface_state_pool, if (iview->planes[plane].optimal_sampler_surface_state.state.alloc_size > 0) {
iview->optimal_sampler_surface_state.state); anv_state_pool_free(&device->surface_state_pool,
} iview->planes[plane].optimal_sampler_surface_state.state);
}
if (iview->general_sampler_surface_state.state.alloc_size > 0) { if (iview->planes[plane].general_sampler_surface_state.state.alloc_size > 0) {
anv_state_pool_free(&device->surface_state_pool, anv_state_pool_free(&device->surface_state_pool,
iview->general_sampler_surface_state.state); iview->planes[plane].general_sampler_surface_state.state);
} }
if (iview->storage_surface_state.state.alloc_size > 0) { if (iview->planes[plane].storage_surface_state.state.alloc_size > 0) {
anv_state_pool_free(&device->surface_state_pool, anv_state_pool_free(&device->surface_state_pool,
iview->storage_surface_state.state); iview->planes[plane].storage_surface_state.state);
} }
if (iview->writeonly_storage_surface_state.state.alloc_size > 0) { if (iview->planes[plane].writeonly_storage_surface_state.state.alloc_size > 0) {
anv_state_pool_free(&device->surface_state_pool, anv_state_pool_free(&device->surface_state_pool,
iview->writeonly_storage_surface_state.state); iview->planes[plane].writeonly_storage_surface_state.state);
}
} }
vk_free2(&device->alloc, pAllocator, iview); vk_free2(&device->alloc, pAllocator, iview);
@ -1206,16 +1318,21 @@ const struct anv_surface *
anv_image_get_surface_for_aspect_mask(const struct anv_image *image, anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
VkImageAspectFlags aspect_mask) VkImageAspectFlags aspect_mask)
{ {
VkImageAspectFlags sanitized_mask;
switch (aspect_mask) { switch (aspect_mask) {
case VK_IMAGE_ASPECT_COLOR_BIT: case VK_IMAGE_ASPECT_COLOR_BIT:
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
return &image->color_surface; sanitized_mask = VK_IMAGE_ASPECT_COLOR_BIT;
break;
case VK_IMAGE_ASPECT_DEPTH_BIT: case VK_IMAGE_ASPECT_DEPTH_BIT:
assert(image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT); assert(image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
return &image->depth_surface; sanitized_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
break;
case VK_IMAGE_ASPECT_STENCIL_BIT: case VK_IMAGE_ASPECT_STENCIL_BIT:
assert(image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT); assert(image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
return &image->stencil_surface; sanitized_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
break;
case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT: case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT:
/* FINISHME: The Vulkan spec (git a511ba2) requires support for /* FINISHME: The Vulkan spec (git a511ba2) requires support for
* combined depth stencil formats. Specifically, it states: * combined depth stencil formats. Specifically, it states:
@ -1229,13 +1346,29 @@ anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
* stencil surfaces from the underlying surface. * stencil surfaces from the underlying surface.
*/ */
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) { if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
return &image->depth_surface; sanitized_mask = VK_IMAGE_ASPECT_DEPTH_BIT;
} else { } else {
assert(image->aspects == VK_IMAGE_ASPECT_STENCIL_BIT); assert(image->aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
return &image->stencil_surface; sanitized_mask = VK_IMAGE_ASPECT_STENCIL_BIT;
} }
default: break;
case VK_IMAGE_ASPECT_PLANE_0_BIT_KHR:
assert((image->aspects & ~VK_IMAGE_ASPECT_ANY_COLOR_BIT) == 0);
sanitized_mask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR;
break;
case VK_IMAGE_ASPECT_PLANE_1_BIT_KHR:
assert((image->aspects & ~VK_IMAGE_ASPECT_ANY_COLOR_BIT) == 0);
sanitized_mask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR;
break;
case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR:
assert((image->aspects & ~VK_IMAGE_ASPECT_ANY_COLOR_BIT) == 0);
sanitized_mask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
break;
default:
unreachable("image does not have aspect"); unreachable("image does not have aspect");
return NULL; return NULL;
} }
uint32_t plane = anv_image_aspect_to_plane(image->aspects, sanitized_mask);
return &image->planes[plane].surface;
} }

View file

@ -80,8 +80,8 @@ VkResult anv_CreateDmaBufImageINTEL(
pAllocator, &image_h); pAllocator, &image_h);
image = anv_image_from_handle(image_h); image = anv_image_from_handle(image_h);
image->bo = mem->bo; image->planes[0].bo = mem->bo;
image->offset = 0; image->planes[0].bo_offset = 0;
assert(image->extent.width > 0); assert(image->extent.width > 0);
assert(image->extent.height > 0); assert(image->extent.height > 0);

View file

@ -1551,6 +1551,16 @@ anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags)
return pipe_bits; return pipe_bits;
} }
#define VK_IMAGE_ASPECT_ANY_COLOR_BIT ( \
VK_IMAGE_ASPECT_COLOR_BIT | \
VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
#define VK_IMAGE_ASPECT_PLANES_BITS ( \
VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | \
VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | \
VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
struct anv_vertex_binding { struct anv_vertex_binding {
struct anv_buffer * buffer; struct anv_buffer * buffer;
VkDeviceSize offset; VkDeviceSize offset;
@ -2214,10 +2224,47 @@ anv_image_aspect_to_plane(VkImageAspectFlags image_aspects,
case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR: case VK_IMAGE_ASPECT_PLANE_2_BIT_KHR:
return 2; return 2;
default: default:
/* Purposefully assert with depth/stencil aspects. */
unreachable("invalid image aspect"); unreachable("invalid image aspect");
} }
} }
static inline uint32_t
anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask)
{
uint32_t planes = 0;
if (aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT |
VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_PLANE_0_BIT_KHR))
planes++;
if (aspect_mask & VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)
planes++;
if (aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)
planes++;
return planes;
}
static inline VkImageAspectFlags
anv_plane_to_aspect(VkImageAspectFlags image_aspects,
uint32_t plane)
{
if (image_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
if (_mesa_bitcount(image_aspects) > 1)
return VK_IMAGE_ASPECT_PLANE_0_BIT_KHR << plane;
return VK_IMAGE_ASPECT_COLOR_BIT;
}
if (image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
return VK_IMAGE_ASPECT_DEPTH_BIT << plane;
assert(image_aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
return VK_IMAGE_ASPECT_STENCIL_BIT;
}
#define anv_foreach_image_aspect_bit(b, image, aspects) \
for_each_bit(b, anv_image_expand_aspects(image, aspects))
const struct anv_format * const struct anv_format *
anv_get_format(VkFormat format); anv_get_format(VkFormat format);
@ -2277,72 +2324,116 @@ struct anv_image {
* of the actual surface formats. * of the actual surface formats.
*/ */
VkFormat vk_format; VkFormat vk_format;
const struct anv_format *format;
VkImageAspectFlags aspects; VkImageAspectFlags aspects;
VkExtent3D extent; VkExtent3D extent;
uint32_t levels; uint32_t levels;
uint32_t array_size; uint32_t array_size;
uint32_t samples; /**< VkImageCreateInfo::samples */ uint32_t samples; /**< VkImageCreateInfo::samples */
uint32_t n_planes;
VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */ VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
VkImageTiling tiling; /** VkImageCreateInfo::tiling */ VkImageTiling tiling; /** VkImageCreateInfo::tiling */
VkDeviceSize size; VkDeviceSize size;
uint32_t alignment; uint32_t alignment;
/* Set when bound */ bool disjoint;
struct anv_bo *bo;
VkDeviceSize offset;
/** /**
* Image subsurfaces * Image subsurfaces
* *
* For each foo, anv_image::foo_surface is valid if and only if * For each foo, anv_image::planes[x].surface is valid if and only if
* anv_image::aspects has a foo aspect. * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
* to figure the number associated with a given aspect.
* *
* The hardware requires that the depth buffer and stencil buffer be * The hardware requires that the depth buffer and stencil buffer be
* separate surfaces. From Vulkan's perspective, though, depth and stencil * separate surfaces. From Vulkan's perspective, though, depth and stencil
* reside in the same VkImage. To satisfy both the hardware and Vulkan, we * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
* allocate the depth and stencil buffers as separate surfaces in the same * allocate the depth and stencil buffers as separate surfaces in the same
* bo. * bo.
*/
union {
struct anv_surface color_surface;
struct {
struct anv_surface depth_surface;
struct anv_surface stencil_surface;
};
};
/**
* A surface which shadows the main surface and may have different tiling.
* This is used for sampling using a tiling that isn't supported for other
* operations.
*/
struct anv_surface shadow_surface;
/**
* For color images, this is the aux usage for this image when not used as a
* color attachment.
* *
* For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image * Memory layout :
* has a HiZ buffer. *
* -----------------------
* | surface0 | /|\
* ----------------------- |
* | shadow surface0 | |
* ----------------------- | Plane 0
* | aux surface0 | |
* ----------------------- |
* | fast clear colors0 | \|/
* -----------------------
* | surface1 | /|\
* ----------------------- |
* | shadow surface1 | |
* ----------------------- | Plane 1
* | aux surface1 | |
* ----------------------- |
* | fast clear colors1 | \|/
* -----------------------
* | ... |
* | |
* -----------------------
*/ */
enum isl_aux_usage aux_usage; struct {
/**
* Offset of the entire plane (whenever the image is disjoint this is
* set to 0).
*/
uint32_t offset;
struct anv_surface aux_surface; VkDeviceSize size;
uint32_t alignment;
struct anv_surface surface;
/**
* A surface which shadows the main surface and may have different
* tiling. This is used for sampling using a tiling that isn't supported
* for other operations.
*/
struct anv_surface shadow_surface;
/**
* For color images, this is the aux usage for this image when not used
* as a color attachment.
*
* For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the
* image has a HiZ buffer.
*/
enum isl_aux_usage aux_usage;
struct anv_surface aux_surface;
/**
* Offset of the fast clear state (used to compute the
* fast_clear_state_offset of the following planes).
*/
uint32_t fast_clear_state_offset;
/**
* BO associated with this plane, set when bound.
*/
struct anv_bo *bo;
VkDeviceSize bo_offset;
} planes[3];
}; };
/* Returns the number of auxiliary buffer levels attached to an image. */ /* Returns the number of auxiliary buffer levels attached to an image. */
static inline uint8_t static inline uint8_t
anv_image_aux_levels(const struct anv_image * const image) anv_image_aux_levels(const struct anv_image * const image,
VkImageAspectFlagBits aspect)
{ {
assert(image); uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
return image->aux_surface.isl.size > 0 ? image->aux_surface.isl.levels : 0; return image->planes[plane].aux_surface.isl.size > 0 ?
image->planes[plane].aux_surface.isl.levels : 0;
} }
/* Returns the number of auxiliary buffer layers attached to an image. */ /* Returns the number of auxiliary buffer layers attached to an image. */
static inline uint32_t static inline uint32_t
anv_image_aux_layers(const struct anv_image * const image, anv_image_aux_layers(const struct anv_image * const image,
VkImageAspectFlagBits aspect,
const uint8_t miplevel) const uint8_t miplevel)
{ {
assert(image); assert(image);
@ -2350,14 +2441,15 @@ anv_image_aux_layers(const struct anv_image * const image,
/* The miplevel must exist in the main buffer. */ /* The miplevel must exist in the main buffer. */
assert(miplevel < image->levels); assert(miplevel < image->levels);
if (miplevel >= anv_image_aux_levels(image)) { if (miplevel >= anv_image_aux_levels(image, aspect)) {
/* There are no layers with auxiliary data because the miplevel has no /* There are no layers with auxiliary data because the miplevel has no
* auxiliary data. * auxiliary data.
*/ */
return 0; return 0;
} else { } else {
return MAX2(image->aux_surface.isl.logical_level0_px.array_len, uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
image->aux_surface.isl.logical_level0_px.depth >> miplevel); return MAX2(image->planes[plane].aux_surface.isl.logical_level0_px.array_len,
image->planes[plane].aux_surface.isl.logical_level0_px.depth >> miplevel);
} }
} }
@ -2400,19 +2492,20 @@ void
anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer, anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
const struct anv_state surface_state, const struct anv_state surface_state,
const struct anv_image * const image, const struct anv_image * const image,
VkImageAspectFlagBits aspect,
const uint8_t level, const uint32_t layer_count, const uint8_t level, const uint32_t layer_count,
const enum blorp_fast_clear_op op); const enum blorp_fast_clear_op op);
void void
anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer, anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
const uint32_t base_level, const uint32_t level_count, const uint32_t base_level, const uint32_t level_count,
const uint32_t base_layer, uint32_t layer_count); const uint32_t base_layer, uint32_t layer_count);
void void
anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer, anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
uint32_t base_level, uint32_t level_count, uint32_t base_level, uint32_t level_count,
uint32_t base_layer, uint32_t layer_count); uint32_t base_layer, uint32_t layer_count);
@ -2437,37 +2530,72 @@ anv_get_levelCount(const struct anv_image *image,
image->levels - range->baseMipLevel : range->levelCount; image->levels - range->baseMipLevel : range->levelCount;
} }
static inline VkImageAspectFlags
anv_image_expand_aspects(const struct anv_image *image,
VkImageAspectFlags aspects)
{
/* If the underlying image has color plane aspects and
* VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
* the underlying image. */
if ((image->aspects & VK_IMAGE_ASPECT_PLANES_BITS) != 0 &&
aspects == VK_IMAGE_ASPECT_COLOR_BIT)
return image->aspects;
return aspects;
}
static inline bool
anv_image_aspects_compatible(VkImageAspectFlags aspects1,
VkImageAspectFlags aspects2)
{
if (aspects1 == aspects2)
return true;
/* Only 1 color aspects are compatibles. */
if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT) != 0 &&
(aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT) != 0 &&
_mesa_bitcount(aspects1) == _mesa_bitcount(aspects2))
return true;
return false;
}
struct anv_image_view { struct anv_image_view {
const struct anv_image *image; /**< VkImageViewCreateInfo::image */ const struct anv_image *image; /**< VkImageViewCreateInfo::image */
struct isl_view isl;
VkImageAspectFlags aspect_mask; VkImageAspectFlags aspect_mask;
VkFormat vk_format; VkFormat vk_format;
VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */ VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
/** unsigned n_planes;
* RENDER_SURFACE_STATE when using image as a sampler surface with an image struct {
* layout of SHADER_READ_ONLY_OPTIMAL or DEPTH_STENCIL_READ_ONLY_OPTIMAL. uint32_t image_plane;
*/
struct anv_surface_state optimal_sampler_surface_state;
/** struct isl_view isl;
* RENDER_SURFACE_STATE when using image as a sampler surface with an image
* layout of GENERAL.
*/
struct anv_surface_state general_sampler_surface_state;
/** /**
* RENDER_SURFACE_STATE when using image as a storage image. Separate states * RENDER_SURFACE_STATE when using image as a sampler surface with an
* for write-only and readable, using the real format for write-only and the * image layout of SHADER_READ_ONLY_OPTIMAL or
* lowered format for readable. * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
*/ */
struct anv_surface_state storage_surface_state; struct anv_surface_state optimal_sampler_surface_state;
struct anv_surface_state writeonly_storage_surface_state;
struct brw_image_param storage_image_param; /**
* RENDER_SURFACE_STATE when using image as a sampler surface with an
* image layout of GENERAL.
*/
struct anv_surface_state general_sampler_surface_state;
/**
* RENDER_SURFACE_STATE when using image as a storage image. Separate
* states for write-only and readable, using the real format for
* write-only and the lowered format for readable.
*/
struct anv_surface_state storage_surface_state;
struct anv_surface_state writeonly_storage_surface_state;
struct brw_image_param storage_image_param;
} planes[3];
}; };
enum anv_image_view_state_flags { enum anv_image_view_state_flags {

View file

@ -221,7 +221,7 @@ anv_wsi_image_create(VkDevice device_h,
result = anv_AllocateMemory(anv_device_to_handle(device), result = anv_AllocateMemory(anv_device_to_handle(device),
&(VkMemoryAllocateInfo) { &(VkMemoryAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = image->size, .allocationSize = image->planes[0].surface.isl.size,
.memoryTypeIndex = 0, .memoryTypeIndex = 0,
}, },
NULL /* XXX: pAllocator */, NULL /* XXX: pAllocator */,
@ -240,7 +240,7 @@ anv_wsi_image_create(VkDevice device_h,
anv_BindImageMemory(device_h, image_h, memory_h, 0); anv_BindImageMemory(device_h, image_h, memory_h, 0);
struct anv_surface *surface = &image->color_surface; struct anv_surface *surface = &image->planes[0].surface;
assert(surface->isl.tiling == ISL_TILING_X); assert(surface->isl.tiling == ISL_TILING_X);
*row_pitch = surface->isl.row_pitch; *row_pitch = surface->isl.row_pitch;
@ -266,8 +266,8 @@ anv_wsi_image_create(VkDevice device_h,
*image_p = image_h; *image_p = image_h;
*memory_p = memory_h; *memory_p = memory_h;
*fd_p = fd; *fd_p = fd;
*size = image->size; *size = image->planes[0].surface.isl.size;
*offset = image->offset; *offset = image->planes[0].surface.offset;
return VK_SUCCESS; return VK_SUCCESS;
fail_alloc_memory: fail_alloc_memory:
anv_FreeMemory(device_h, memory_h, pAllocator); anv_FreeMemory(device_h, memory_h, pAllocator);

View file

@ -323,7 +323,7 @@ want_stencil_pma_fix(struct anv_cmd_buffer *cmd_buffer)
/* HiZ is enabled so we had better have a depth buffer with HiZ */ /* HiZ is enabled so we had better have a depth buffer with HiZ */
const struct anv_image_view *ds_iview = const struct anv_image_view *ds_iview =
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer); anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
assert(ds_iview && ds_iview->image->aux_usage == ISL_AUX_USAGE_HIZ); assert(ds_iview && ds_iview->image->planes[0].aux_usage == ISL_AUX_USAGE_HIZ);
/* 3DSTATE_PS_EXTRA::PixelShaderValid */ /* 3DSTATE_PS_EXTRA::PixelShaderValid */
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline; struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;

View file

@ -181,18 +181,45 @@ add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
static void static void
add_image_relocs(struct anv_cmd_buffer *cmd_buffer, add_image_relocs(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
const uint32_t plane,
struct anv_surface_state state) struct anv_surface_state state)
{ {
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
add_surface_state_reloc(cmd_buffer, state.state, image->bo, state.address); add_surface_state_reloc(cmd_buffer, state.state,
image->planes[plane].bo, state.address);
if (state.aux_address) { if (state.aux_address) {
VkResult result = VkResult result =
anv_reloc_list_add(&cmd_buffer->surface_relocs, anv_reloc_list_add(&cmd_buffer->surface_relocs,
&cmd_buffer->pool->alloc, &cmd_buffer->pool->alloc,
state.state.offset + isl_dev->ss.aux_addr_offset, state.state.offset + isl_dev->ss.aux_addr_offset,
image->bo, state.aux_address); image->planes[plane].bo,
state.aux_address);
if (result != VK_SUCCESS)
anv_batch_set_error(&cmd_buffer->batch, result);
}
}
static void
add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image_view *image_view,
const uint32_t plane,
struct anv_surface_state state)
{
const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
const struct anv_image *image = image_view->image;
uint32_t image_plane = image_view->planes[plane].image_plane;
add_surface_state_reloc(cmd_buffer, state.state,
image->planes[image_plane].bo, state.address);
if (state.aux_address) {
VkResult result =
anv_reloc_list_add(&cmd_buffer->surface_relocs,
&cmd_buffer->pool->alloc,
state.state.offset + isl_dev->ss.aux_addr_offset,
image->planes[image_plane].bo, state.aux_address);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
anv_batch_set_error(&cmd_buffer->batch, result); anv_batch_set_error(&cmd_buffer->batch, result);
} }
@ -225,8 +252,11 @@ color_attachment_compute_aux_usage(struct anv_device * device,
struct anv_attachment_state *att_state = &cmd_state->attachments[att]; struct anv_attachment_state *att_state = &cmd_state->attachments[att];
struct anv_image_view *iview = cmd_state->framebuffer->attachments[att]; struct anv_image_view *iview = cmd_state->framebuffer->attachments[att];
if (iview->isl.base_array_layer >= assert(iview->n_planes == 1);
anv_image_aux_layers(iview->image, iview->isl.base_level)) {
if (iview->planes[0].isl.base_array_layer >=
anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level)) {
/* There is no aux buffer which corresponds to the level and layer(s) /* There is no aux buffer which corresponds to the level and layer(s)
* being accessed. * being accessed.
*/ */
@ -234,12 +264,12 @@ color_attachment_compute_aux_usage(struct anv_device * device,
att_state->input_aux_usage = ISL_AUX_USAGE_NONE; att_state->input_aux_usage = ISL_AUX_USAGE_NONE;
att_state->fast_clear = false; att_state->fast_clear = false;
return; return;
} else if (iview->image->aux_usage == ISL_AUX_USAGE_MCS) { } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_MCS) {
att_state->aux_usage = ISL_AUX_USAGE_MCS; att_state->aux_usage = ISL_AUX_USAGE_MCS;
att_state->input_aux_usage = ISL_AUX_USAGE_MCS; att_state->input_aux_usage = ISL_AUX_USAGE_MCS;
att_state->fast_clear = false; att_state->fast_clear = false;
return; return;
} else if (iview->image->aux_usage == ISL_AUX_USAGE_CCS_E) { } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E) {
att_state->aux_usage = ISL_AUX_USAGE_CCS_E; att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E; att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
} else { } else {
@ -255,7 +285,7 @@ color_attachment_compute_aux_usage(struct anv_device * device,
* In other words, we can only sample from a fast-cleared image if it * In other words, we can only sample from a fast-cleared image if it
* also supports color compression. * also supports color compression.
*/ */
if (isl_format_supports_ccs_e(&device->info, iview->isl.format)) { if (isl_format_supports_ccs_e(&device->info, iview->planes[0].isl.format)) {
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D; att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D;
/* While fast-clear resolves and partial resolves are fairly cheap in the /* While fast-clear resolves and partial resolves are fairly cheap in the
@ -274,10 +304,10 @@ color_attachment_compute_aux_usage(struct anv_device * device,
} }
} }
assert(iview->image->aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT); assert(iview->image->planes[0].aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT);
att_state->clear_color_is_zero_one = att_state->clear_color_is_zero_one =
color_is_zero_one(att_state->clear_value.color, iview->isl.format); color_is_zero_one(att_state->clear_value.color, iview->planes[0].isl.format);
att_state->clear_color_is_zero = att_state->clear_color_is_zero =
att_state->clear_value.color.uint32[0] == 0 && att_state->clear_value.color.uint32[0] == 0 &&
att_state->clear_value.color.uint32[1] == 0 && att_state->clear_value.color.uint32[1] == 0 &&
@ -309,7 +339,8 @@ color_attachment_compute_aux_usage(struct anv_device * device,
* layers. * layers.
*/ */
if (cmd_state->framebuffer->layers != if (cmd_state->framebuffer->layers !=
anv_image_aux_layers(iview->image, iview->isl.base_level)) { anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level)) {
att_state->fast_clear = false; att_state->fast_clear = false;
if (GEN_GEN == 7) { if (GEN_GEN == 7) {
anv_perf_warn(device->instance, iview->image, anv_perf_warn(device->instance, iview->image,
@ -325,7 +356,7 @@ color_attachment_compute_aux_usage(struct anv_device * device,
if (cmd_state->pass->attachments[att].first_subpass_layout == if (cmd_state->pass->attachments[att].first_subpass_layout ==
VK_IMAGE_LAYOUT_GENERAL && VK_IMAGE_LAYOUT_GENERAL &&
(!att_state->clear_color_is_zero || (!att_state->clear_color_is_zero ||
iview->image->aux_usage == ISL_AUX_USAGE_NONE)) { iview->image->planes[0].aux_usage == ISL_AUX_USAGE_NONE)) {
att_state->fast_clear = false; att_state->fast_clear = false;
} }
@ -370,7 +401,7 @@ transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
* that's currently in the buffer. Therefore, a data-preserving resolve * that's currently in the buffer. Therefore, a data-preserving resolve
* operation is not needed. * operation is not needed.
*/ */
if (image->aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout) if (image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout)
return; return;
const bool hiz_enabled = ISL_AUX_USAGE_HIZ == const bool hiz_enabled = ISL_AUX_USAGE_HIZ ==
@ -403,14 +434,20 @@ enum fast_clear_state_field {
static inline struct anv_address static inline struct anv_address
get_fast_clear_state_address(const struct anv_device *device, get_fast_clear_state_address(const struct anv_device *device,
const struct anv_image *image, const struct anv_image *image,
unsigned level, enum fast_clear_state_field field) VkImageAspectFlagBits aspect,
unsigned level,
enum fast_clear_state_field field)
{ {
assert(device && image); assert(device && image);
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
uint32_t offset = image->offset + image->aux_surface.offset +
image->aux_surface.isl.size + uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
anv_fast_clear_state_entry_size(device) * level;
/* Refer to the definition of anv_image for the memory layout. */
uint32_t offset = image->planes[plane].fast_clear_state_offset;
offset += anv_fast_clear_state_entry_size(device) * level;
switch (field) { switch (field) {
case FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE: case FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE:
@ -420,9 +457,10 @@ get_fast_clear_state_address(const struct anv_device *device,
break; break;
} }
assert(offset < image->offset + image->size); assert(offset < image->planes[plane].surface.offset + image->planes[plane].size);
return (struct anv_address) { return (struct anv_address) {
.bo = image->bo, .bo = image->planes[plane].bo,
.offset = offset, .offset = offset,
}; };
} }
@ -436,14 +474,15 @@ get_fast_clear_state_address(const struct anv_device *device,
static void static void
genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer, genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
unsigned level, bool needs_resolve) unsigned level, bool needs_resolve)
{ {
assert(cmd_buffer && image); assert(cmd_buffer && image);
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
const struct anv_address resolve_flag_addr = const struct anv_address resolve_flag_addr =
get_fast_clear_state_address(cmd_buffer->device, image, level, get_fast_clear_state_address(cmd_buffer->device, image, aspect, level,
FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE); FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE);
/* The HW docs say that there is no way to guarantee the completion of /* The HW docs say that there is no way to guarantee the completion of
@ -459,14 +498,15 @@ genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer,
static void static void
genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer, genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
unsigned level) unsigned level)
{ {
assert(cmd_buffer && image); assert(cmd_buffer && image);
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
const struct anv_address resolve_flag_addr = const struct anv_address resolve_flag_addr =
get_fast_clear_state_address(cmd_buffer->device, image, level, get_fast_clear_state_address(cmd_buffer->device, image, aspect, level,
FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE); FAST_CLEAR_STATE_FIELD_NEEDS_RESOLVE);
/* Make the pending predicated resolve a no-op if one is not needed. /* Make the pending predicated resolve a no-op if one is not needed.
@ -487,11 +527,15 @@ genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer,
static void static void
init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer, init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
unsigned level) unsigned level)
{ {
assert(cmd_buffer && image); assert(cmd_buffer && image);
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
enum isl_aux_usage aux_usage = image->planes[plane].aux_usage;
/* The resolve flag should updated to signify that fast-clear/compression /* The resolve flag should updated to signify that fast-clear/compression
* data needs to be removed when leaving the undefined layout. Such data * data needs to be removed when leaving the undefined layout. Such data
@ -499,8 +543,8 @@ init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer,
* to return incorrect data. The fast clear data in CCS_D buffers should * to return incorrect data. The fast clear data in CCS_D buffers should
* be removed because CCS_D isn't enabled all the time. * be removed because CCS_D isn't enabled all the time.
*/ */
genX(set_image_needs_resolve)(cmd_buffer, image, level, genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level,
image->aux_usage == ISL_AUX_USAGE_NONE); aux_usage == ISL_AUX_USAGE_NONE);
/* The fast clear value dword(s) will be copied into a surface state object. /* The fast clear value dword(s) will be copied into a surface state object.
* Ensure that the restrictions of the fields in the dword(s) are followed. * Ensure that the restrictions of the fields in the dword(s) are followed.
@ -517,12 +561,12 @@ init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer,
for (; i < cmd_buffer->device->isl_dev.ss.clear_value_size; i += 4) { for (; i < cmd_buffer->device->isl_dev.ss.clear_value_size; i += 4) {
anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) { anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
sdi.Address = sdi.Address =
get_fast_clear_state_address(cmd_buffer->device, image, level, get_fast_clear_state_address(cmd_buffer->device, image, aspect, level,
FAST_CLEAR_STATE_FIELD_CLEAR_COLOR); FAST_CLEAR_STATE_FIELD_CLEAR_COLOR);
if (GEN_GEN >= 9) { if (GEN_GEN >= 9) {
/* MCS buffers on SKL+ can only have 1/0 clear colors. */ /* MCS buffers on SKL+ can only have 1/0 clear colors. */
assert(image->aux_usage == ISL_AUX_USAGE_MCS); assert(aux_usage == ISL_AUX_USAGE_MCS);
sdi.ImmediateData = 0; sdi.ImmediateData = 0;
} else if (GEN_VERSIONx10 >= 75) { } else if (GEN_VERSIONx10 >= 75) {
/* Pre-SKL, the dword containing the clear values also contains /* Pre-SKL, the dword containing the clear values also contains
@ -552,19 +596,20 @@ static void
genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer, genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
struct anv_state surface_state, struct anv_state surface_state,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
unsigned level, unsigned level,
bool copy_from_surface_state) bool copy_from_surface_state)
{ {
assert(cmd_buffer && image); assert(cmd_buffer && image);
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
assert(level < anv_image_aux_levels(image)); assert(level < anv_image_aux_levels(image, aspect));
struct anv_bo *ss_bo = struct anv_bo *ss_bo =
&cmd_buffer->device->surface_state_pool.block_pool.bo; &cmd_buffer->device->surface_state_pool.block_pool.bo;
uint32_t ss_clear_offset = surface_state.offset + uint32_t ss_clear_offset = surface_state.offset +
cmd_buffer->device->isl_dev.ss.clear_value_offset; cmd_buffer->device->isl_dev.ss.clear_value_offset;
const struct anv_address entry_addr = const struct anv_address entry_addr =
get_fast_clear_state_address(cmd_buffer->device, image, level, get_fast_clear_state_address(cmd_buffer->device, image, aspect, level,
FAST_CLEAR_STATE_FIELD_CLEAR_COLOR); FAST_CLEAR_STATE_FIELD_CLEAR_COLOR);
unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size; unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
@ -605,6 +650,7 @@ genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
static void static void
transition_color_buffer(struct anv_cmd_buffer *cmd_buffer, transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image, const struct anv_image *image,
VkImageAspectFlagBits aspect,
const uint32_t base_level, uint32_t level_count, const uint32_t base_level, uint32_t level_count,
uint32_t base_layer, uint32_t layer_count, uint32_t base_layer, uint32_t layer_count,
VkImageLayout initial_layout, VkImageLayout initial_layout,
@ -612,7 +658,7 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
{ {
/* Validate the inputs. */ /* Validate the inputs. */
assert(cmd_buffer); assert(cmd_buffer);
assert(image && image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image && image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT);
/* These values aren't supported for simplicity's sake. */ /* These values aren't supported for simplicity's sake. */
assert(level_count != VK_REMAINING_MIP_LEVELS && assert(level_count != VK_REMAINING_MIP_LEVELS &&
layer_count != VK_REMAINING_ARRAY_LAYERS); layer_count != VK_REMAINING_ARRAY_LAYERS);
@ -632,22 +678,25 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
if (initial_layout == final_layout) if (initial_layout == final_layout)
return; return;
if (image->shadow_surface.isl.size > 0 && uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
if (image->planes[plane].shadow_surface.isl.size > 0 &&
final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
/* This surface is a linear compressed image with a tiled shadow surface /* This surface is a linear compressed image with a tiled shadow surface
* for texturing. The client is about to use it in READ_ONLY_OPTIMAL so * for texturing. The client is about to use it in READ_ONLY_OPTIMAL so
* we need to ensure the shadow copy is up-to-date. * we need to ensure the shadow copy is up-to-date.
*/ */
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
assert(image->color_surface.isl.tiling == ISL_TILING_LINEAR); assert(image->planes[plane].surface.isl.tiling == ISL_TILING_LINEAR);
assert(image->shadow_surface.isl.tiling != ISL_TILING_LINEAR); assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR);
assert(isl_format_is_compressed(image->color_surface.isl.format)); assert(isl_format_is_compressed(image->planes[plane].surface.isl.format));
anv_image_copy_to_shadow(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT, assert(plane == 0);
anv_image_copy_to_shadow(cmd_buffer, image,
base_level, level_count, base_level, level_count,
base_layer, layer_count); base_layer, layer_count);
} }
if (base_layer >= anv_image_aux_layers(image, base_level)) if (base_layer >= anv_image_aux_layers(image, aspect, base_level))
return; return;
/* A transition of a 3D subresource works on all slices at a time. */ /* A transition of a 3D subresource works on all slices at a time. */
@ -657,9 +706,9 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
} }
/* We're interested in the subresource range subset that has aux data. */ /* We're interested in the subresource range subset that has aux data. */
level_count = MIN2(level_count, anv_image_aux_levels(image) - base_level); level_count = MIN2(level_count, anv_image_aux_levels(image, aspect) - base_level);
layer_count = MIN2(layer_count, layer_count = MIN2(layer_count,
anv_image_aux_layers(image, base_level) - base_layer); anv_image_aux_layers(image, aspect, base_level) - base_layer);
last_level_num = base_level + level_count; last_level_num = base_level + level_count;
/* Record whether or not the layout is undefined. Pre-initialized images /* Record whether or not the layout is undefined. Pre-initialized images
@ -680,7 +729,7 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
* Initialize the relevant clear buffer entries. * Initialize the relevant clear buffer entries.
*/ */
for (unsigned level = base_level; level < last_level_num; level++) for (unsigned level = base_level; level < last_level_num; level++)
init_fast_clear_state_entry(cmd_buffer, image, level); init_fast_clear_state_entry(cmd_buffer, image, aspect, level);
/* Initialize the aux buffers to enable correct rendering. This operation /* Initialize the aux buffers to enable correct rendering. This operation
* requires up to two steps: one to rid the aux buffer of data that may * requires up to two steps: one to rid the aux buffer of data that may
@ -704,7 +753,8 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
"define an MCS buffer."); "define an MCS buffer.");
} }
anv_image_fast_clear(cmd_buffer, image, base_level, level_count, anv_image_fast_clear(cmd_buffer, image, aspect,
base_level, level_count,
base_layer, layer_count); base_layer, layer_count);
} }
/* At this point, some elements of the CCS buffer may have the fast-clear /* At this point, some elements of the CCS buffer may have the fast-clear
@ -716,7 +766,8 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
* enabled. In this case, we must force the associated CCS buffers of the * enabled. In this case, we must force the associated CCS buffers of the
* specified range to enter the ambiguated state in advance. * specified range to enter the ambiguated state in advance.
*/ */
if (image->samples == 1 && image->aux_usage != ISL_AUX_USAGE_CCS_E && if (image->samples == 1 &&
image->planes[plane].aux_usage != ISL_AUX_USAGE_CCS_E &&
final_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { final_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
/* The CCS_D buffer may not be enabled in the final layout. Continue /* The CCS_D buffer may not be enabled in the final layout. Continue
* executing this function to perform a resolve. * executing this function to perform a resolve.
@ -768,13 +819,14 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
/* The number of layers changes at each 3D miplevel. */ /* The number of layers changes at each 3D miplevel. */
if (image->type == VK_IMAGE_TYPE_3D) { if (image->type == VK_IMAGE_TYPE_3D) {
layer_count = MIN2(layer_count, anv_image_aux_layers(image, level)); layer_count = MIN2(layer_count, anv_image_aux_layers(image, aspect, level));
} }
genX(load_needs_resolve_predicate)(cmd_buffer, image, level); genX(load_needs_resolve_predicate)(cmd_buffer, image, aspect, level);
enum isl_aux_usage aux_usage = image->aux_usage == ISL_AUX_USAGE_NONE ? enum isl_aux_usage aux_usage =
ISL_AUX_USAGE_CCS_D : image->aux_usage; image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE ?
ISL_AUX_USAGE_CCS_D : image->planes[plane].aux_usage;
/* Create a surface state with the right clear color and perform the /* Create a surface state with the right clear color and perform the
* resolve. * resolve.
@ -784,7 +836,7 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
anv_image_fill_surface_state(cmd_buffer->device, anv_image_fill_surface_state(cmd_buffer->device,
image, VK_IMAGE_ASPECT_COLOR_BIT, image, VK_IMAGE_ASPECT_COLOR_BIT,
&(struct isl_view) { &(struct isl_view) {
.format = image->color_surface.isl.format, .format = image->planes[plane].surface.isl.format,
.swizzle = ISL_SWIZZLE_IDENTITY, .swizzle = ISL_SWIZZLE_IDENTITY,
.base_level = level, .base_level = level,
.levels = 1, .levels = 1,
@ -794,16 +846,16 @@ transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
ISL_SURF_USAGE_RENDER_TARGET_BIT, ISL_SURF_USAGE_RENDER_TARGET_BIT,
aux_usage, NULL, 0, aux_usage, NULL, 0,
&surface_state, NULL); &surface_state, NULL);
add_image_relocs(cmd_buffer, image, surface_state); add_image_relocs(cmd_buffer, image, 0, surface_state);
genX(copy_fast_clear_dwords)(cmd_buffer, surface_state.state, image, genX(copy_fast_clear_dwords)(cmd_buffer, surface_state.state, image,
level, false /* copy to ss */); aspect, level, false /* copy to ss */);
anv_ccs_resolve(cmd_buffer, surface_state.state, image, anv_ccs_resolve(cmd_buffer, surface_state.state, image,
level, layer_count, aspect, level, layer_count,
image->aux_usage == ISL_AUX_USAGE_CCS_E ? image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E ?
BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL : BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL :
BLORP_FAST_CLEAR_OP_RESOLVE_FULL); BLORP_FAST_CLEAR_OP_RESOLVE_FULL);
genX(set_image_needs_resolve)(cmd_buffer, image, level, false); genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level, false);
} }
cmd_buffer->state.pending_pipe_bits |= cmd_buffer->state.pending_pipe_bits |=
@ -889,7 +941,7 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
VkImageAspectFlags att_aspects = vk_format_aspects(att->format); VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
VkImageAspectFlags clear_aspects = 0; VkImageAspectFlags clear_aspects = 0;
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
/* color attachment */ /* color attachment */
if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
@ -913,9 +965,11 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
struct anv_image_view *iview = framebuffer->attachments[i]; struct anv_image_view *iview = framebuffer->attachments[i];
anv_assert(iview->vk_format == att->format); anv_assert(iview->vk_format == att->format);
anv_assert(iview->n_planes == 1);
union isl_color_value clear_color = { .u32 = { 0, } }; union isl_color_value clear_color = { .u32 = { 0, } };
if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
assert(att_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
color_attachment_compute_aux_usage(cmd_buffer->device, color_attachment_compute_aux_usage(cmd_buffer->device,
state, i, begin->renderArea, state, i, begin->renderArea,
&clear_color); &clear_color);
@ -923,7 +977,7 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
anv_image_fill_surface_state(cmd_buffer->device, anv_image_fill_surface_state(cmd_buffer->device,
iview->image, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
&iview->isl, &iview->planes[0].isl,
ISL_SURF_USAGE_RENDER_TARGET_BIT, ISL_SURF_USAGE_RENDER_TARGET_BIT,
state->attachments[i].aux_usage, state->attachments[i].aux_usage,
&clear_color, &clear_color,
@ -931,8 +985,8 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
&state->attachments[i].color, &state->attachments[i].color,
NULL); NULL);
add_image_relocs(cmd_buffer, iview->image, add_image_view_relocs(cmd_buffer, iview, 0,
state->attachments[i].color); state->attachments[i].color);
} else { } else {
/* This field will be initialized after the first subpass /* This field will be initialized after the first subpass
* transition. * transition.
@ -946,7 +1000,7 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
anv_image_fill_surface_state(cmd_buffer->device, anv_image_fill_surface_state(cmd_buffer->device,
iview->image, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_ASPECT_COLOR_BIT,
&iview->isl, &iview->planes[0].isl,
ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_TEXTURE_BIT,
state->attachments[i].input_aux_usage, state->attachments[i].input_aux_usage,
&clear_color, &clear_color,
@ -954,8 +1008,8 @@ genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
&state->attachments[i].input, &state->attachments[i].input,
NULL); NULL);
add_image_relocs(cmd_buffer, iview->image, add_image_view_relocs(cmd_buffer, iview, 0,
state->attachments[i].input); state->attachments[i].input);
} }
} }
} }
@ -1380,14 +1434,20 @@ void genX(CmdPipelineBarrier)(
transition_depth_buffer(cmd_buffer, image, transition_depth_buffer(cmd_buffer, image,
pImageMemoryBarriers[i].oldLayout, pImageMemoryBarriers[i].oldLayout,
pImageMemoryBarriers[i].newLayout); pImageMemoryBarriers[i].newLayout);
} else if (range->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) { } else if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
transition_color_buffer(cmd_buffer, image, VkImageAspectFlags color_aspects =
range->baseMipLevel, anv_image_expand_aspects(image, range->aspectMask);
anv_get_levelCount(image, range), uint32_t aspect_bit;
range->baseArrayLayer,
anv_get_layerCount(image, range), anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) {
pImageMemoryBarriers[i].oldLayout, transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit,
pImageMemoryBarriers[i].newLayout); range->baseMipLevel,
anv_get_levelCount(image, range),
range->baseArrayLayer,
anv_get_layerCount(image, range),
pImageMemoryBarriers[i].oldLayout,
pImageMemoryBarriers[i].newLayout);
}
} }
} }
@ -1578,26 +1638,28 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: { case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
struct anv_surface_state sstate = struct anv_surface_state sstate =
(desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
desc->image_view->general_sampler_surface_state : desc->image_view->planes[binding->plane].general_sampler_surface_state :
desc->image_view->optimal_sampler_surface_state; desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
surface_state = sstate.state; surface_state = sstate.state;
assert(surface_state.alloc_size); assert(surface_state.alloc_size);
add_image_relocs(cmd_buffer, desc->image_view->image, sstate); add_image_view_relocs(cmd_buffer, desc->image_view,
binding->plane, sstate);
break; break;
} }
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
assert(stage == MESA_SHADER_FRAGMENT); assert(stage == MESA_SHADER_FRAGMENT);
if (desc->image_view->aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) { if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT) == 0) {
/* For depth and stencil input attachments, we treat it like any /* For depth and stencil input attachments, we treat it like any
* old texture that a user may have bound. * old texture that a user may have bound.
*/ */
struct anv_surface_state sstate = struct anv_surface_state sstate =
(desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
desc->image_view->general_sampler_surface_state : desc->image_view->planes[binding->plane].general_sampler_surface_state :
desc->image_view->optimal_sampler_surface_state; desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
surface_state = sstate.state; surface_state = sstate.state;
assert(surface_state.alloc_size); assert(surface_state.alloc_size);
add_image_relocs(cmd_buffer, desc->image_view->image, sstate); add_image_view_relocs(cmd_buffer, desc->image_view,
binding->plane, sstate);
} else { } else {
/* For color input attachments, we create the surface state at /* For color input attachments, we create the surface state at
* vkBeginRenderPass time so that we can include aux and clear * vkBeginRenderPass time so that we can include aux and clear
@ -1612,16 +1674,17 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
struct anv_surface_state sstate = (binding->write_only) struct anv_surface_state sstate = (binding->write_only)
? desc->image_view->writeonly_storage_surface_state ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state
: desc->image_view->storage_surface_state; : desc->image_view->planes[binding->plane].storage_surface_state;
surface_state = sstate.state; surface_state = sstate.state;
assert(surface_state.alloc_size); assert(surface_state.alloc_size);
add_image_relocs(cmd_buffer, desc->image_view->image, sstate); add_image_view_relocs(cmd_buffer, desc->image_view,
binding->plane, sstate);
struct brw_image_param *image_param = struct brw_image_param *image_param =
&cmd_buffer->state.push_constants[stage]->images[image++]; &cmd_buffer->state.push_constants[stage]->images[image++];
*image_param = desc->image_view->storage_image_param; *image_param = desc->image_view->planes[binding->plane].storage_image_param;
image_param->surface_idx = bias + s; image_param->surface_idx = bias + s;
break; break;
} }
@ -2750,41 +2813,51 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
}; };
if (iview) if (iview)
info.view = &iview->isl; info.view = &iview->planes[0].isl;
if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) { if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
info.depth_surf = &image->depth_surface.isl; uint32_t depth_plane =
anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
const struct anv_surface *surface = &image->planes[depth_plane].surface;
info.depth_surf = &surface->isl;
info.depth_address = info.depth_address =
anv_batch_emit_reloc(&cmd_buffer->batch, anv_batch_emit_reloc(&cmd_buffer->batch,
dw + device->isl_dev.ds.depth_offset / 4, dw + device->isl_dev.ds.depth_offset / 4,
image->bo, image->planes[depth_plane].bo,
image->offset + image->depth_surface.offset); image->planes[depth_plane].bo_offset +
surface->offset);
const uint32_t ds = const uint32_t ds =
cmd_buffer->state.subpass->depth_stencil_attachment.attachment; cmd_buffer->state.subpass->depth_stencil_attachment.attachment;
info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage; info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage;
if (info.hiz_usage == ISL_AUX_USAGE_HIZ) { if (info.hiz_usage == ISL_AUX_USAGE_HIZ) {
info.hiz_surf = &image->aux_surface.isl; info.hiz_surf = &image->planes[depth_plane].aux_surface.isl;
info.hiz_address = info.hiz_address =
anv_batch_emit_reloc(&cmd_buffer->batch, anv_batch_emit_reloc(&cmd_buffer->batch,
dw + device->isl_dev.ds.hiz_offset / 4, dw + device->isl_dev.ds.hiz_offset / 4,
image->bo, image->planes[depth_plane].bo,
image->offset + image->aux_surface.offset); image->planes[depth_plane].bo_offset +
image->planes[depth_plane].aux_surface.offset);
info.depth_clear_value = ANV_HZ_FC_VAL; info.depth_clear_value = ANV_HZ_FC_VAL;
} }
} }
if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) { if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
info.stencil_surf = &image->stencil_surface.isl; uint32_t stencil_plane =
anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_STENCIL_BIT);
const struct anv_surface *surface = &image->planes[stencil_plane].surface;
info.stencil_surf = &surface->isl;
info.stencil_address = info.stencil_address =
anv_batch_emit_reloc(&cmd_buffer->batch, anv_batch_emit_reloc(&cmd_buffer->batch,
dw + device->isl_dev.ds.stencil_offset / 4, dw + device->isl_dev.ds.stencil_offset / 4,
image->bo, image->planes[stencil_plane].bo,
image->offset + image->stencil_surface.offset); image->planes[stencil_plane].bo_offset + surface->offset);
} }
isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info); isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
@ -2874,7 +2947,7 @@ cmd_buffer_subpass_transition_layouts(struct anv_cmd_buffer * const cmd_buffer,
att_state->input_aux_usage != att_state->aux_usage; att_state->input_aux_usage != att_state->aux_usage;
if (subpass_end) { if (subpass_end) {
target_layout = att_desc->final_layout; target_layout = att_desc->final_layout;
} else if (iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT && } else if (iview->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT &&
!input_needs_resolve) { !input_needs_resolve) {
/* Layout transitions before the final only help to enable sampling as /* Layout transitions before the final only help to enable sampling as
* an input attachment. If the input attachment supports sampling * an input attachment. If the input attachment supports sampling
@ -2893,11 +2966,12 @@ cmd_buffer_subpass_transition_layouts(struct anv_cmd_buffer * const cmd_buffer,
att_state->aux_usage = att_state->aux_usage =
anv_layout_to_aux_usage(&cmd_buffer->device->info, image, anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
VK_IMAGE_ASPECT_DEPTH_BIT, target_layout); VK_IMAGE_ASPECT_DEPTH_BIT, target_layout);
} else if (image->aspects == VK_IMAGE_ASPECT_COLOR_BIT) { } else if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT) {
transition_color_buffer(cmd_buffer, image, assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
iview->isl.base_level, 1, transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
iview->isl.base_array_layer, iview->planes[0].isl.base_level, 1,
iview->isl.array_len, iview->planes[0].isl.base_array_layer,
iview->planes[0].isl.array_len,
att_state->current_layout, target_layout); att_state->current_layout, target_layout);
} }
@ -2942,11 +3016,13 @@ cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer)
if (att_state->pending_clear_aspects && att_state->fast_clear) { if (att_state->pending_clear_aspects && att_state->fast_clear) {
/* Update the fast clear state entry. */ /* Update the fast clear state entry. */
genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state, genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state,
iview->image, iview->isl.base_level, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level,
true /* copy from ss */); true /* copy from ss */);
/* Fast-clears impact whether or not a resolve will be necessary. */ /* Fast-clears impact whether or not a resolve will be necessary. */
if (iview->image->aux_usage == ISL_AUX_USAGE_CCS_E && if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E &&
att_state->clear_color_is_zero) { att_state->clear_color_is_zero) {
/* This image always has the auxiliary buffer enabled. We can mark /* This image always has the auxiliary buffer enabled. We can mark
* the subresource as not needing a resolve because the clear color * the subresource as not needing a resolve because the clear color
@ -2954,10 +3030,14 @@ cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer)
* being used for sampling. * being used for sampling.
*/ */
genX(set_image_needs_resolve)(cmd_buffer, iview->image, genX(set_image_needs_resolve)(cmd_buffer, iview->image,
iview->isl.base_level, false); VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level,
false);
} else { } else {
genX(set_image_needs_resolve)(cmd_buffer, iview->image, genX(set_image_needs_resolve)(cmd_buffer, iview->image,
iview->isl.base_level, true); VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level,
true);
} }
} else if (rp_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) { } else if (rp_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
/* The attachment may have been fast-cleared in a previous render /* The attachment may have been fast-cleared in a previous render
@ -2966,13 +3046,17 @@ cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer)
* TODO: Do this only once per render pass instead of every subpass. * TODO: Do this only once per render pass instead of every subpass.
*/ */
genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state, genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state,
iview->image, iview->isl.base_level, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level,
false /* copy to ss */); false /* copy to ss */);
if (need_input_attachment_state(rp_att) && if (need_input_attachment_state(rp_att) &&
att_state->input_aux_usage != ISL_AUX_USAGE_NONE) { att_state->input_aux_usage != ISL_AUX_USAGE_NONE) {
genX(copy_fast_clear_dwords)(cmd_buffer, att_state->input.state, genX(copy_fast_clear_dwords)(cmd_buffer, att_state->input.state,
iview->image, iview->isl.base_level, iview->image,
VK_IMAGE_ASPECT_COLOR_BIT,
iview->planes[0].isl.base_level,
false /* copy to ss */); false /* copy to ss */);
} }
} }