2016-08-22 21:37:28 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "anv_private.h"
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
lookup_blorp_shader(struct blorp_context *blorp,
|
|
|
|
|
const void *key, uint32_t key_size,
|
|
|
|
|
uint32_t *kernel_out, void *prog_data_out)
|
|
|
|
|
{
|
|
|
|
|
struct anv_device *device = blorp->driver_ctx;
|
|
|
|
|
|
|
|
|
|
/* The blorp cache must be a real cache */
|
|
|
|
|
assert(device->blorp_shader_cache.cache);
|
|
|
|
|
|
|
|
|
|
struct anv_shader_bin *bin =
|
|
|
|
|
anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
|
|
|
|
|
if (!bin)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* The cache already has a reference and it's not going anywhere so there
|
|
|
|
|
* is no need to hold a second reference.
|
|
|
|
|
*/
|
|
|
|
|
anv_shader_bin_unref(device, bin);
|
|
|
|
|
|
|
|
|
|
*kernel_out = bin->kernel.offset;
|
2016-11-01 16:03:12 -07:00
|
|
|
*(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
|
2016-08-22 21:37:28 -07:00
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-14 13:12:22 +01:00
|
|
|
static bool
|
2016-08-22 21:37:28 -07:00
|
|
|
upload_blorp_shader(struct blorp_context *blorp,
|
|
|
|
|
const void *key, uint32_t key_size,
|
|
|
|
|
const void *kernel, uint32_t kernel_size,
|
2016-11-01 14:16:34 -07:00
|
|
|
const struct brw_stage_prog_data *prog_data,
|
|
|
|
|
uint32_t prog_data_size,
|
2016-08-22 21:37:28 -07:00
|
|
|
uint32_t *kernel_out, void *prog_data_out)
|
|
|
|
|
{
|
|
|
|
|
struct anv_device *device = blorp->driver_ctx;
|
|
|
|
|
|
|
|
|
|
/* The blorp cache must be a real cache */
|
|
|
|
|
assert(device->blorp_shader_cache.cache);
|
|
|
|
|
|
|
|
|
|
struct anv_pipeline_bind_map bind_map = {
|
|
|
|
|
.surface_count = 0,
|
|
|
|
|
.sampler_count = 0,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct anv_shader_bin *bin =
|
|
|
|
|
anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
|
|
|
|
|
key, key_size, kernel, kernel_size,
|
|
|
|
|
prog_data, prog_data_size, &bind_map);
|
|
|
|
|
|
2017-03-03 10:58:23 +01:00
|
|
|
if (!bin)
|
|
|
|
|
return false;
|
|
|
|
|
|
2016-08-22 21:37:28 -07:00
|
|
|
/* The cache already has a reference and it's not going anywhere so there
|
|
|
|
|
* is no need to hold a second reference.
|
|
|
|
|
*/
|
|
|
|
|
anv_shader_bin_unref(device, bin);
|
|
|
|
|
|
|
|
|
|
*kernel_out = bin->kernel.offset;
|
2016-11-01 16:03:12 -07:00
|
|
|
*(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
|
2017-03-14 13:12:22 +01:00
|
|
|
|
|
|
|
|
return true;
|
2016-08-22 21:37:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_device_init_blorp(struct anv_device *device)
|
|
|
|
|
{
|
|
|
|
|
anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
|
|
|
|
|
blorp_init(&device->blorp, device, &device->isl_dev);
|
|
|
|
|
device->blorp.compiler = device->instance->physicalDevice.compiler;
|
|
|
|
|
device->blorp.lookup_shader = lookup_blorp_shader;
|
|
|
|
|
device->blorp.upload_shader = upload_blorp_shader;
|
|
|
|
|
switch (device->info.gen) {
|
|
|
|
|
case 7:
|
|
|
|
|
if (device->info.is_haswell) {
|
|
|
|
|
device->blorp.exec = gen75_blorp_exec;
|
|
|
|
|
} else {
|
|
|
|
|
device->blorp.exec = gen7_blorp_exec;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case 8:
|
|
|
|
|
device->blorp.exec = gen8_blorp_exec;
|
|
|
|
|
break;
|
|
|
|
|
case 9:
|
|
|
|
|
device->blorp.exec = gen9_blorp_exec;
|
|
|
|
|
break;
|
2017-05-26 09:11:20 -07:00
|
|
|
case 10:
|
|
|
|
|
device->blorp.exec = gen10_blorp_exec;
|
|
|
|
|
break;
|
2017-05-26 12:32:23 -07:00
|
|
|
case 11:
|
|
|
|
|
device->blorp.exec = gen11_blorp_exec;
|
|
|
|
|
break;
|
2016-08-22 21:37:28 -07:00
|
|
|
default:
|
|
|
|
|
unreachable("Unknown hardware generation");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_device_finish_blorp(struct anv_device *device)
|
|
|
|
|
{
|
|
|
|
|
blorp_finish(&device->blorp);
|
|
|
|
|
anv_pipeline_cache_finish(&device->blorp_shader_cache);
|
|
|
|
|
}
|
2016-08-22 22:33:06 -07:00
|
|
|
|
2016-08-23 20:19:57 -07:00
|
|
|
static void
|
|
|
|
|
get_blorp_surf_for_anv_buffer(struct anv_device *device,
|
|
|
|
|
struct anv_buffer *buffer, uint64_t offset,
|
|
|
|
|
uint32_t width, uint32_t height,
|
|
|
|
|
uint32_t row_pitch, enum isl_format format,
|
|
|
|
|
struct blorp_surf *blorp_surf,
|
|
|
|
|
struct isl_surf *isl_surf)
|
|
|
|
|
{
|
2016-11-02 16:18:44 -07:00
|
|
|
const struct isl_format_layout *fmtl =
|
|
|
|
|
isl_format_get_layout(format);
|
2017-02-24 17:15:43 -08:00
|
|
|
bool ok UNUSED;
|
2016-11-02 16:18:44 -07:00
|
|
|
|
|
|
|
|
/* ASTC is the only format which doesn't support linear layouts.
|
|
|
|
|
* Create an equivalently sized surface with ISL to get around this.
|
|
|
|
|
*/
|
|
|
|
|
if (fmtl->txc == ISL_TXC_ASTC) {
|
|
|
|
|
/* Use an equivalently sized format */
|
|
|
|
|
format = ISL_FORMAT_R32G32B32A32_UINT;
|
|
|
|
|
assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
|
|
|
|
|
|
|
|
|
|
/* Shrink the dimensions for the new format */
|
|
|
|
|
width = DIV_ROUND_UP(width, fmtl->bw);
|
|
|
|
|
height = DIV_ROUND_UP(height, fmtl->bh);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-23 20:19:57 -07:00
|
|
|
*blorp_surf = (struct blorp_surf) {
|
|
|
|
|
.surf = isl_surf,
|
|
|
|
|
.addr = {
|
|
|
|
|
.buffer = buffer->bo,
|
|
|
|
|
.offset = buffer->offset + offset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = device->default_mocs,
|
2016-08-23 20:19:57 -07:00
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
2017-02-24 17:15:43 -08:00
|
|
|
ok = isl_surf_init(&device->isl_dev, isl_surf,
|
|
|
|
|
.dim = ISL_SURF_DIM_2D,
|
|
|
|
|
.format = format,
|
|
|
|
|
.width = width,
|
|
|
|
|
.height = height,
|
|
|
|
|
.depth = 1,
|
|
|
|
|
.levels = 1,
|
|
|
|
|
.array_len = 1,
|
|
|
|
|
.samples = 1,
|
|
|
|
|
.row_pitch = row_pitch,
|
|
|
|
|
.usage = ISL_SURF_USAGE_TEXTURE_BIT |
|
|
|
|
|
ISL_SURF_USAGE_RENDER_TARGET_BIT,
|
|
|
|
|
.tiling_flags = ISL_TILING_LINEAR_BIT);
|
|
|
|
|
assert(ok);
|
2016-08-23 20:19:57 -07:00
|
|
|
}
|
|
|
|
|
|
2018-02-02 14:51:56 -08:00
|
|
|
/* Pick something high enough that it won't be used in core and low enough it
|
|
|
|
|
* will never map to an extension.
|
|
|
|
|
*/
|
|
|
|
|
#define ANV_IMAGE_LAYOUT_EXPLICIT_AUX (VkImageLayout)10000000
|
2017-10-05 11:22:47 -07:00
|
|
|
|
2017-11-11 12:22:45 -08:00
|
|
|
static struct blorp_address
|
|
|
|
|
anv_to_blorp_address(struct anv_address addr)
|
|
|
|
|
{
|
|
|
|
|
return (struct blorp_address) {
|
|
|
|
|
.buffer = addr.bo,
|
|
|
|
|
.offset = addr.offset,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-22 22:33:06 -07:00
|
|
|
static void
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(const struct anv_device *device,
|
|
|
|
|
const struct anv_image *image,
|
2016-08-22 22:33:06 -07:00
|
|
|
VkImageAspectFlags aspect,
|
2018-02-02 14:51:56 -08:00
|
|
|
VkImageLayout layout,
|
2016-10-25 10:32:18 -07:00
|
|
|
enum isl_aux_usage aux_usage,
|
2016-08-22 22:33:06 -07:00
|
|
|
struct blorp_surf *blorp_surf)
|
|
|
|
|
{
|
2017-07-19 12:14:19 +01:00
|
|
|
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
|
|
|
|
|
|
2018-02-02 14:51:56 -08:00
|
|
|
if (layout != ANV_IMAGE_LAYOUT_EXPLICIT_AUX)
|
|
|
|
|
aux_usage = anv_layout_to_aux_usage(&device->info, image, aspect, layout);
|
2016-10-25 10:32:18 -07:00
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
const struct anv_surface *surface = &image->planes[plane].surface;
|
2016-08-22 22:33:06 -07:00
|
|
|
*blorp_surf = (struct blorp_surf) {
|
|
|
|
|
.surf = &surface->isl,
|
|
|
|
|
.addr = {
|
2017-07-19 12:14:19 +01:00
|
|
|
.buffer = image->planes[plane].bo,
|
|
|
|
|
.offset = image->planes[plane].bo_offset + surface->offset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = device->default_mocs,
|
2016-08-22 22:33:06 -07:00
|
|
|
},
|
|
|
|
|
};
|
2016-10-25 10:32:18 -07:00
|
|
|
|
|
|
|
|
if (aux_usage != ISL_AUX_USAGE_NONE) {
|
2017-07-19 12:14:19 +01:00
|
|
|
const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
|
|
|
|
|
blorp_surf->aux_surf = &aux_surface->isl,
|
2016-10-25 10:32:18 -07:00
|
|
|
blorp_surf->aux_addr = (struct blorp_address) {
|
2017-07-19 12:14:19 +01:00
|
|
|
.buffer = image->planes[plane].bo,
|
|
|
|
|
.offset = image->planes[plane].bo_offset + aux_surface->offset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = device->default_mocs,
|
2016-10-25 10:32:18 -07:00
|
|
|
};
|
|
|
|
|
blorp_surf->aux_usage = aux_usage;
|
|
|
|
|
}
|
2016-08-22 22:33:06 -07:00
|
|
|
}
|
|
|
|
|
|
2016-08-27 21:05:13 -07:00
|
|
|
void anv_CmdCopyImage(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkImage srcImage,
|
|
|
|
|
VkImageLayout srcImageLayout,
|
|
|
|
|
VkImage dstImage,
|
|
|
|
|
VkImageLayout dstImageLayout,
|
|
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkImageCopy* pRegions)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-27 21:05:13 -07:00
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < regionCount; r++) {
|
|
|
|
|
VkOffset3D srcOffset =
|
|
|
|
|
anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
|
|
|
|
|
VkOffset3D dstOffset =
|
|
|
|
|
anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
|
|
|
|
|
VkExtent3D extent =
|
|
|
|
|
anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
|
|
|
|
|
|
2018-01-19 09:12:17 -08:00
|
|
|
const uint32_t dst_level = pRegions[r].dstSubresource.mipLevel;
|
2016-08-27 21:05:13 -07:00
|
|
|
unsigned dst_base_layer, layer_count;
|
|
|
|
|
if (dst_image->type == VK_IMAGE_TYPE_3D) {
|
|
|
|
|
dst_base_layer = pRegions[r].dstOffset.z;
|
|
|
|
|
layer_count = pRegions[r].extent.depth;
|
|
|
|
|
} else {
|
|
|
|
|
dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
|
2017-03-24 16:20:35 -07:00
|
|
|
layer_count =
|
|
|
|
|
anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
|
2016-08-27 21:05:13 -07:00
|
|
|
}
|
|
|
|
|
|
2018-01-19 09:12:17 -08:00
|
|
|
const uint32_t src_level = pRegions[r].srcSubresource.mipLevel;
|
2016-08-27 21:05:13 -07:00
|
|
|
unsigned src_base_layer;
|
|
|
|
|
if (src_image->type == VK_IMAGE_TYPE_3D) {
|
|
|
|
|
src_base_layer = pRegions[r].srcOffset.z;
|
|
|
|
|
} else {
|
|
|
|
|
src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
|
2017-03-24 16:20:35 -07:00
|
|
|
assert(layer_count ==
|
|
|
|
|
anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
|
2016-08-27 21:05:13 -07:00
|
|
|
}
|
|
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
|
|
|
|
|
dst_mask = pRegions[r].dstSubresource.aspectMask;
|
|
|
|
|
|
|
|
|
|
assert(anv_image_aspects_compatible(src_mask, dst_mask));
|
|
|
|
|
|
|
|
|
|
if (_mesa_bitcount(src_mask) > 1) {
|
|
|
|
|
uint32_t aspect_bit;
|
|
|
|
|
anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
|
|
|
|
|
struct blorp_surf src_surf, dst_surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
src_image, 1UL << aspect_bit,
|
2018-02-02 14:51:56 -08:00
|
|
|
srcImageLayout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&src_surf);
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
dst_image, 1UL << aspect_bit,
|
2018-02-02 14:51:56 -08:00
|
|
|
dstImageLayout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&dst_surf);
|
2017-11-27 08:35:12 -08:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
|
|
|
|
|
1UL << aspect_bit,
|
|
|
|
|
dst_surf.aux_usage, dst_level,
|
|
|
|
|
dst_base_layer, layer_count);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < layer_count; i++) {
|
2018-01-19 09:12:17 -08:00
|
|
|
blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
|
|
|
|
|
&dst_surf, dst_level, dst_base_layer + i,
|
2017-07-19 12:14:19 +01:00
|
|
|
srcOffset.x, srcOffset.y,
|
|
|
|
|
dstOffset.x, dstOffset.y,
|
|
|
|
|
extent.width, extent.height);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2016-08-27 21:05:13 -07:00
|
|
|
struct blorp_surf src_surf, dst_surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
|
2018-02-02 14:51:56 -08:00
|
|
|
srcImageLayout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&src_surf);
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
|
2018-02-02 14:51:56 -08:00
|
|
|
dstImageLayout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&dst_surf);
|
2017-11-27 08:35:12 -08:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image, dst_mask,
|
|
|
|
|
dst_surf.aux_usage, dst_level,
|
|
|
|
|
dst_base_layer, layer_count);
|
2016-08-27 21:05:13 -07:00
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < layer_count; i++) {
|
2018-01-19 09:12:17 -08:00
|
|
|
blorp_copy(&batch, &src_surf, src_level, src_base_layer + i,
|
|
|
|
|
&dst_surf, dst_level, dst_base_layer + i,
|
2016-08-27 21:05:13 -07:00
|
|
|
srcOffset.x, srcOffset.y,
|
|
|
|
|
dstOffset.x, dstOffset.y,
|
|
|
|
|
extent.width, extent.height);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-23 20:19:57 -07:00
|
|
|
static void
|
|
|
|
|
copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
struct anv_buffer *anv_buffer,
|
|
|
|
|
struct anv_image *anv_image,
|
2018-02-02 14:51:56 -08:00
|
|
|
VkImageLayout image_layout,
|
2016-08-23 20:19:57 -07:00
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkBufferImageCopy* pRegions,
|
|
|
|
|
bool buffer_to_image)
|
|
|
|
|
{
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-23 20:19:57 -07:00
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
struct blorp_surf surf;
|
|
|
|
|
uint32_t level;
|
|
|
|
|
VkOffset3D offset;
|
|
|
|
|
} image, buffer, *src, *dst;
|
|
|
|
|
|
|
|
|
|
buffer.level = 0;
|
|
|
|
|
buffer.offset = (VkOffset3D) { 0, 0, 0 };
|
|
|
|
|
|
|
|
|
|
if (buffer_to_image) {
|
|
|
|
|
src = &buffer;
|
|
|
|
|
dst = ℑ
|
|
|
|
|
} else {
|
|
|
|
|
src = ℑ
|
|
|
|
|
dst = &buffer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < regionCount; r++) {
|
|
|
|
|
const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
|
|
|
|
|
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
|
2018-02-02 14:51:56 -08:00
|
|
|
image_layout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&image.surf);
|
2016-08-23 20:19:57 -07:00
|
|
|
image.offset =
|
|
|
|
|
anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
|
|
|
|
|
image.level = pRegions[r].imageSubresource.mipLevel;
|
|
|
|
|
|
|
|
|
|
VkExtent3D extent =
|
|
|
|
|
anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
|
|
|
|
|
if (anv_image->type != VK_IMAGE_TYPE_3D) {
|
|
|
|
|
image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
|
2017-03-24 16:20:35 -07:00
|
|
|
extent.depth =
|
|
|
|
|
anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
|
2016-08-23 20:19:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const enum isl_format buffer_format =
|
|
|
|
|
anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
|
|
|
|
|
aspect, VK_IMAGE_TILING_LINEAR);
|
|
|
|
|
|
|
|
|
|
const VkExtent3D bufferImageExtent = {
|
|
|
|
|
.width = pRegions[r].bufferRowLength ?
|
|
|
|
|
pRegions[r].bufferRowLength : extent.width,
|
|
|
|
|
.height = pRegions[r].bufferImageHeight ?
|
|
|
|
|
pRegions[r].bufferImageHeight : extent.height,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const struct isl_format_layout *buffer_fmtl =
|
|
|
|
|
isl_format_get_layout(buffer_format);
|
|
|
|
|
|
|
|
|
|
const uint32_t buffer_row_pitch =
|
|
|
|
|
DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
|
|
|
|
|
(buffer_fmtl->bpb / 8);
|
|
|
|
|
|
|
|
|
|
const uint32_t buffer_layer_stride =
|
|
|
|
|
DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
|
|
|
|
|
buffer_row_pitch;
|
|
|
|
|
|
|
|
|
|
struct isl_surf buffer_isl_surf;
|
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
|
anv_buffer, pRegions[r].bufferOffset,
|
|
|
|
|
extent.width, extent.height,
|
|
|
|
|
buffer_row_pitch, buffer_format,
|
|
|
|
|
&buffer.surf, &buffer_isl_surf);
|
|
|
|
|
|
2017-11-27 08:35:12 -08:00
|
|
|
if (&image == dst) {
|
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, anv_image,
|
|
|
|
|
aspect, dst->surf.aux_usage,
|
|
|
|
|
dst->level,
|
|
|
|
|
dst->offset.z, extent.depth);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-23 20:19:57 -07:00
|
|
|
for (unsigned z = 0; z < extent.depth; z++) {
|
|
|
|
|
blorp_copy(&batch, &src->surf, src->level, src->offset.z,
|
|
|
|
|
&dst->surf, dst->level, dst->offset.z,
|
|
|
|
|
src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
|
|
|
|
|
extent.width, extent.height);
|
|
|
|
|
|
|
|
|
|
image.offset.z++;
|
|
|
|
|
buffer.surf.addr.offset += buffer_layer_stride;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-27 12:57:01 -07:00
|
|
|
void anv_CmdCopyBufferToImage(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkBuffer srcBuffer,
|
|
|
|
|
VkImage dstImage,
|
|
|
|
|
VkImageLayout dstImageLayout,
|
|
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkBufferImageCopy* pRegions)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
|
|
|
|
|
|
2018-02-02 14:51:56 -08:00
|
|
|
copy_buffer_to_image(cmd_buffer, src_buffer, dst_image, dstImageLayout,
|
2016-08-27 12:57:01 -07:00
|
|
|
regionCount, pRegions, true);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-23 20:19:57 -07:00
|
|
|
void anv_CmdCopyImageToBuffer(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkImage srcImage,
|
|
|
|
|
VkImageLayout srcImageLayout,
|
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkBufferImageCopy* pRegions)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
|
|
|
|
|
|
2018-02-02 14:51:56 -08:00
|
|
|
copy_buffer_to_image(cmd_buffer, dst_buffer, src_image, srcImageLayout,
|
2016-08-23 20:19:57 -07:00
|
|
|
regionCount, pRegions, false);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-22 22:33:06 -07:00
|
|
|
static bool
|
|
|
|
|
flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
|
|
|
|
|
{
|
|
|
|
|
bool flip = false;
|
|
|
|
|
if (*src0 > *src1) {
|
|
|
|
|
unsigned tmp = *src0;
|
|
|
|
|
*src0 = *src1;
|
|
|
|
|
*src1 = tmp;
|
|
|
|
|
flip = !flip;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (*dst0 > *dst1) {
|
|
|
|
|
unsigned tmp = *dst0;
|
|
|
|
|
*dst0 = *dst1;
|
|
|
|
|
*dst1 = tmp;
|
|
|
|
|
flip = !flip;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return flip;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void anv_CmdBlitImage(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkImage srcImage,
|
|
|
|
|
VkImageLayout srcImageLayout,
|
|
|
|
|
VkImage dstImage,
|
|
|
|
|
VkImageLayout dstImageLayout,
|
|
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkImageBlit* pRegions,
|
|
|
|
|
VkFilter filter)
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf src, dst;
|
|
|
|
|
|
|
|
|
|
uint32_t gl_filter;
|
|
|
|
|
switch (filter) {
|
|
|
|
|
case VK_FILTER_NEAREST:
|
|
|
|
|
gl_filter = 0x2600; /* GL_NEAREST */
|
|
|
|
|
break;
|
|
|
|
|
case VK_FILTER_LINEAR:
|
|
|
|
|
gl_filter = 0x2601; /* GL_LINEAR */
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid filter");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-22 22:33:06 -07:00
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < regionCount; r++) {
|
|
|
|
|
const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
|
|
|
|
|
const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
|
|
|
|
|
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
src_image, src_res->aspectMask,
|
2018-02-02 14:51:56 -08:00
|
|
|
srcImageLayout, ISL_AUX_USAGE_NONE, &src);
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
dst_image, dst_res->aspectMask,
|
2018-02-02 14:51:56 -08:00
|
|
|
dstImageLayout, ISL_AUX_USAGE_NONE, &dst);
|
2016-08-22 22:33:06 -07:00
|
|
|
|
2017-03-14 17:20:07 +00:00
|
|
|
struct anv_format_plane src_format =
|
|
|
|
|
anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
|
|
|
|
|
src_res->aspectMask, src_image->tiling);
|
|
|
|
|
struct anv_format_plane dst_format =
|
|
|
|
|
anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
|
|
|
|
|
dst_res->aspectMask, dst_image->tiling);
|
2016-08-22 22:33:06 -07:00
|
|
|
|
|
|
|
|
unsigned dst_start, dst_end;
|
|
|
|
|
if (dst_image->type == VK_IMAGE_TYPE_3D) {
|
|
|
|
|
assert(dst_res->baseArrayLayer == 0);
|
|
|
|
|
dst_start = pRegions[r].dstOffsets[0].z;
|
|
|
|
|
dst_end = pRegions[r].dstOffsets[1].z;
|
|
|
|
|
} else {
|
|
|
|
|
dst_start = dst_res->baseArrayLayer;
|
2017-03-24 16:20:35 -07:00
|
|
|
dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
|
2016-08-22 22:33:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned src_start, src_end;
|
|
|
|
|
if (src_image->type == VK_IMAGE_TYPE_3D) {
|
|
|
|
|
assert(src_res->baseArrayLayer == 0);
|
|
|
|
|
src_start = pRegions[r].srcOffsets[0].z;
|
|
|
|
|
src_end = pRegions[r].srcOffsets[1].z;
|
|
|
|
|
} else {
|
|
|
|
|
src_start = src_res->baseArrayLayer;
|
2017-03-24 16:20:35 -07:00
|
|
|
src_end = src_start + anv_get_layerCount(src_image, src_res);
|
2016-08-22 22:33:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
|
|
|
|
|
float src_z_step = (float)(src_end + 1 - src_start) /
|
|
|
|
|
(float)(dst_end + 1 - dst_start);
|
|
|
|
|
|
|
|
|
|
if (flip_z) {
|
|
|
|
|
src_start = src_end;
|
|
|
|
|
src_z_step *= -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned src_x0 = pRegions[r].srcOffsets[0].x;
|
|
|
|
|
unsigned src_x1 = pRegions[r].srcOffsets[1].x;
|
|
|
|
|
unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
|
|
|
|
|
unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
|
|
|
|
|
bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
|
|
|
|
|
|
|
|
|
|
unsigned src_y0 = pRegions[r].srcOffsets[0].y;
|
|
|
|
|
unsigned src_y1 = pRegions[r].srcOffsets[1].y;
|
|
|
|
|
unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
|
|
|
|
|
unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
|
|
|
|
|
bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
|
|
|
|
|
|
|
|
|
|
const unsigned num_layers = dst_end - dst_start;
|
2017-11-27 08:35:12 -08:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
|
|
|
|
|
dst_res->aspectMask,
|
|
|
|
|
dst.aux_usage,
|
|
|
|
|
dst_res->mipLevel,
|
|
|
|
|
dst_start, num_layers);
|
|
|
|
|
|
2016-08-22 22:33:06 -07:00
|
|
|
for (unsigned i = 0; i < num_layers; i++) {
|
|
|
|
|
unsigned dst_z = dst_start + i;
|
|
|
|
|
unsigned src_z = src_start + i * src_z_step;
|
|
|
|
|
|
|
|
|
|
blorp_blit(&batch, &src, src_res->mipLevel, src_z,
|
|
|
|
|
src_format.isl_format, src_format.swizzle,
|
|
|
|
|
&dst, dst_res->mipLevel, dst_z,
|
2017-02-01 12:27:59 -08:00
|
|
|
dst_format.isl_format,
|
|
|
|
|
anv_swizzle_for_render(dst_format.swizzle),
|
2016-08-22 22:33:06 -07:00
|
|
|
src_x0, src_y0, src_x1, src_y1,
|
|
|
|
|
dst_x0, dst_y0, dst_x1, dst_y1,
|
|
|
|
|
gl_filter, flip_x, flip_y);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
2016-08-30 15:43:46 -07:00
|
|
|
|
2016-09-25 08:44:40 -07:00
|
|
|
static enum isl_format
|
|
|
|
|
isl_format_for_size(unsigned size_B)
|
|
|
|
|
{
|
|
|
|
|
switch (size_B) {
|
2017-10-02 19:58:50 +02:00
|
|
|
case 4: return ISL_FORMAT_R32_UINT;
|
|
|
|
|
case 8: return ISL_FORMAT_R32G32_UINT;
|
2016-09-25 08:44:40 -07:00
|
|
|
case 16: return ISL_FORMAT_R32G32B32A32_UINT;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Not a power-of-two format size");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-09 19:21:18 -07:00
|
|
|
/**
|
|
|
|
|
* Returns the greatest common divisor of a and b that is a power of two.
|
|
|
|
|
*/
|
2017-07-06 21:18:03 -07:00
|
|
|
static uint64_t
|
2016-09-09 19:21:18 -07:00
|
|
|
gcd_pow2_u64(uint64_t a, uint64_t b)
|
|
|
|
|
{
|
|
|
|
|
assert(a > 0 || b > 0);
|
|
|
|
|
|
|
|
|
|
unsigned a_log2 = ffsll(a) - 1;
|
|
|
|
|
unsigned b_log2 = ffsll(b) - 1;
|
|
|
|
|
|
|
|
|
|
/* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
|
|
|
|
|
* case, the MIN2() will take the other one. If both are 0 then we will
|
|
|
|
|
* hit the assert above.
|
|
|
|
|
*/
|
|
|
|
|
return 1 << MIN2(a_log2, b_log2);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 15:43:46 -07:00
|
|
|
/* This is maximum possible width/height our HW can handle */
|
|
|
|
|
#define MAX_SURFACE_DIM (1ull << 14)
|
|
|
|
|
|
|
|
|
|
void anv_CmdCopyBuffer(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkBuffer srcBuffer,
|
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkBufferCopy* pRegions)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-30 15:43:46 -07:00
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < regionCount; r++) {
|
2017-08-28 15:57:20 -07:00
|
|
|
struct blorp_address src = {
|
|
|
|
|
.buffer = src_buffer->bo,
|
|
|
|
|
.offset = src_buffer->offset + pRegions[r].srcOffset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = cmd_buffer->device->default_mocs,
|
2017-08-28 15:57:20 -07:00
|
|
|
};
|
|
|
|
|
struct blorp_address dst = {
|
|
|
|
|
.buffer = dst_buffer->bo,
|
|
|
|
|
.offset = dst_buffer->offset + pRegions[r].dstOffset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = cmd_buffer->device->default_mocs,
|
2017-08-28 15:57:20 -07:00
|
|
|
};
|
2016-08-30 15:43:46 -07:00
|
|
|
|
2017-08-28 15:57:20 -07:00
|
|
|
blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
|
2016-08-30 15:43:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void anv_CmdUpdateBuffer(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
|
VkDeviceSize dstOffset,
|
|
|
|
|
VkDeviceSize dataSize,
|
2016-11-11 11:44:10 +10:00
|
|
|
const void* pData)
|
2016-08-30 15:43:46 -07:00
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-30 15:43:46 -07:00
|
|
|
|
|
|
|
|
/* We can't quite grab a full block because the state stream needs a
|
|
|
|
|
* little data at the top to build its linked list.
|
|
|
|
|
*/
|
|
|
|
|
const uint32_t max_update_size =
|
2017-04-26 01:27:33 -07:00
|
|
|
cmd_buffer->device->dynamic_state_pool.block_size - 64;
|
2016-08-30 15:43:46 -07:00
|
|
|
|
|
|
|
|
assert(max_update_size < MAX_SURFACE_DIM * 4);
|
|
|
|
|
|
2017-03-31 15:33:51 -07:00
|
|
|
/* We're about to read data that was written from the CPU. Flush the
|
|
|
|
|
* texture cache so we don't get anything stale.
|
|
|
|
|
*/
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
|
|
|
|
|
|
2016-08-30 15:43:46 -07:00
|
|
|
while (dataSize) {
|
|
|
|
|
const uint32_t copy_size = MIN2(dataSize, max_update_size);
|
|
|
|
|
|
|
|
|
|
struct anv_state tmp_data =
|
|
|
|
|
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
|
|
|
|
|
|
|
|
|
|
memcpy(tmp_data.map, pData, copy_size);
|
|
|
|
|
|
2017-04-17 14:45:08 -07:00
|
|
|
anv_state_flush(cmd_buffer->device, tmp_data);
|
|
|
|
|
|
2017-08-28 15:57:20 -07:00
|
|
|
struct blorp_address src = {
|
|
|
|
|
.buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
|
|
|
|
|
.offset = tmp_data.offset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = cmd_buffer->device->default_mocs,
|
2017-08-28 15:57:20 -07:00
|
|
|
};
|
|
|
|
|
struct blorp_address dst = {
|
|
|
|
|
.buffer = dst_buffer->bo,
|
|
|
|
|
.offset = dst_buffer->offset + dstOffset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = cmd_buffer->device->default_mocs,
|
2017-08-28 15:57:20 -07:00
|
|
|
};
|
2016-08-30 15:43:46 -07:00
|
|
|
|
2017-08-28 15:57:20 -07:00
|
|
|
blorp_buffer_copy(&batch, src, dst, copy_size);
|
2016-08-30 15:43:46 -07:00
|
|
|
|
|
|
|
|
dataSize -= copy_size;
|
|
|
|
|
dstOffset += copy_size;
|
|
|
|
|
pData = (void *)pData + copy_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
2016-08-30 16:56:25 -07:00
|
|
|
|
2016-09-25 08:44:40 -07:00
|
|
|
void anv_CmdFillBuffer(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkBuffer dstBuffer,
|
|
|
|
|
VkDeviceSize dstOffset,
|
|
|
|
|
VkDeviceSize fillSize,
|
|
|
|
|
uint32_t data)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
|
|
|
|
|
struct blorp_surf surf;
|
|
|
|
|
struct isl_surf isl_surf;
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-09-25 08:44:40 -07:00
|
|
|
|
2017-03-04 10:07:56 -08:00
|
|
|
fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
|
|
|
|
|
|
|
|
|
|
/* From the Vulkan spec:
|
|
|
|
|
*
|
|
|
|
|
* "size is the number of bytes to fill, and must be either a multiple
|
|
|
|
|
* of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
|
|
|
|
|
* the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
|
|
|
|
|
* buffer is not a multiple of 4, then the nearest smaller multiple is
|
|
|
|
|
* used."
|
|
|
|
|
*/
|
|
|
|
|
fillSize &= ~3ull;
|
2016-09-25 08:44:40 -07:00
|
|
|
|
|
|
|
|
/* First, we compute the biggest format that can be used with the
|
|
|
|
|
* given offsets and size.
|
|
|
|
|
*/
|
|
|
|
|
int bs = 16;
|
|
|
|
|
bs = gcd_pow2_u64(bs, dstOffset);
|
|
|
|
|
bs = gcd_pow2_u64(bs, fillSize);
|
|
|
|
|
enum isl_format isl_format = isl_format_for_size(bs);
|
|
|
|
|
|
|
|
|
|
union isl_color_value color = {
|
|
|
|
|
.u32 = { data, data, data, data },
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
|
|
|
|
|
while (fillSize >= max_fill_size) {
|
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
|
dst_buffer, dstOffset,
|
|
|
|
|
MAX_SURFACE_DIM, MAX_SURFACE_DIM,
|
|
|
|
|
MAX_SURFACE_DIM * bs, isl_format,
|
|
|
|
|
&surf, &isl_surf);
|
|
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
|
|
|
|
|
color, NULL);
|
|
|
|
|
fillSize -= max_fill_size;
|
|
|
|
|
dstOffset += max_fill_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
|
|
|
|
|
assert(height < MAX_SURFACE_DIM);
|
|
|
|
|
if (height != 0) {
|
|
|
|
|
const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
|
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
|
dst_buffer, dstOffset,
|
|
|
|
|
MAX_SURFACE_DIM, height,
|
|
|
|
|
MAX_SURFACE_DIM * bs, isl_format,
|
|
|
|
|
&surf, &isl_surf);
|
|
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
|
|
|
|
|
color, NULL);
|
|
|
|
|
fillSize -= rect_fill_size;
|
|
|
|
|
dstOffset += rect_fill_size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (fillSize != 0) {
|
|
|
|
|
const uint32_t width = fillSize / bs;
|
|
|
|
|
get_blorp_surf_for_anv_buffer(cmd_buffer->device,
|
|
|
|
|
dst_buffer, dstOffset,
|
|
|
|
|
width, 1,
|
|
|
|
|
width * bs, isl_format,
|
|
|
|
|
&surf, &isl_surf);
|
|
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
0, 0, 1, 0, 0, width, 1,
|
|
|
|
|
color, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 16:56:25 -07:00
|
|
|
void anv_CmdClearColorImage(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkImage _image,
|
|
|
|
|
VkImageLayout imageLayout,
|
|
|
|
|
const VkClearColorValue* pColor,
|
|
|
|
|
uint32_t rangeCount,
|
|
|
|
|
const VkImageSubresourceRange* pRanges)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, image, _image);
|
|
|
|
|
|
|
|
|
|
static const bool color_write_disable[4] = { false, false, false, false };
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-30 16:56:25 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rangeCount; r++) {
|
|
|
|
|
if (pRanges[r].aspectMask == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
2017-11-02 16:05:45 -07:00
|
|
|
assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
|
|
|
|
struct blorp_surf surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, pRanges[r].aspectMask,
|
2018-02-02 14:51:56 -08:00
|
|
|
imageLayout, ISL_AUX_USAGE_NONE, &surf);
|
2016-08-30 16:56:25 -07:00
|
|
|
|
2017-03-14 17:20:07 +00:00
|
|
|
struct anv_format_plane src_format =
|
|
|
|
|
anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
|
|
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
|
2016-08-30 16:56:25 -07:00
|
|
|
|
|
|
|
|
unsigned base_layer = pRanges[r].baseArrayLayer;
|
2017-03-20 15:31:35 +08:00
|
|
|
unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
|
2016-08-30 16:56:25 -07:00
|
|
|
|
2016-11-14 17:26:09 +00:00
|
|
|
for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
|
2016-08-30 16:56:25 -07:00
|
|
|
const unsigned level = pRanges[r].baseMipLevel + i;
|
|
|
|
|
const unsigned level_width = anv_minify(image->extent.width, level);
|
|
|
|
|
const unsigned level_height = anv_minify(image->extent.height, level);
|
|
|
|
|
|
|
|
|
|
if (image->type == VK_IMAGE_TYPE_3D) {
|
|
|
|
|
base_layer = 0;
|
|
|
|
|
layer_count = anv_minify(image->extent.depth, level);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-27 08:35:12 -08:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, image,
|
|
|
|
|
pRanges[r].aspectMask,
|
|
|
|
|
surf.aux_usage, level,
|
|
|
|
|
base_layer, layer_count);
|
|
|
|
|
|
2016-08-30 16:56:25 -07:00
|
|
|
blorp_clear(&batch, &surf,
|
2017-02-09 12:00:51 -08:00
|
|
|
src_format.isl_format, src_format.swizzle,
|
2016-08-30 16:56:25 -07:00
|
|
|
level, base_layer, layer_count,
|
|
|
|
|
0, 0, level_width, level_height,
|
2016-11-18 13:35:16 -08:00
|
|
|
vk_to_isl_color(*pColor), color_write_disable);
|
2016-08-30 16:56:25 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2016-10-06 23:35:22 -07:00
|
|
|
void anv_CmdClearDepthStencilImage(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkImage image_h,
|
|
|
|
|
VkImageLayout imageLayout,
|
|
|
|
|
const VkClearDepthStencilValue* pDepthStencil,
|
|
|
|
|
uint32_t rangeCount,
|
|
|
|
|
const VkImageSubresourceRange* pRanges)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, image, image_h);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf depth, stencil;
|
|
|
|
|
if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
imageLayout, ISL_AUX_USAGE_NONE, &depth);
|
2016-10-06 23:35:22 -07:00
|
|
|
} else {
|
|
|
|
|
memset(&depth, 0, sizeof(depth));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_STENCIL_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
imageLayout, ISL_AUX_USAGE_NONE, &stencil);
|
2016-10-06 23:35:22 -07:00
|
|
|
} else {
|
|
|
|
|
memset(&stencil, 0, sizeof(stencil));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned r = 0; r < rangeCount; r++) {
|
|
|
|
|
if (pRanges[r].aspectMask == 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
|
|
|
|
|
bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
|
|
|
|
|
|
|
|
|
|
unsigned base_layer = pRanges[r].baseArrayLayer;
|
2017-03-20 15:31:35 +08:00
|
|
|
unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
|
2016-10-06 23:35:22 -07:00
|
|
|
|
2016-11-14 17:26:09 +00:00
|
|
|
for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
|
2016-10-06 23:35:22 -07:00
|
|
|
const unsigned level = pRanges[r].baseMipLevel + i;
|
|
|
|
|
const unsigned level_width = anv_minify(image->extent.width, level);
|
|
|
|
|
const unsigned level_height = anv_minify(image->extent.height, level);
|
|
|
|
|
|
|
|
|
|
if (image->type == VK_IMAGE_TYPE_3D)
|
|
|
|
|
layer_count = anv_minify(image->extent.depth, level);
|
|
|
|
|
|
|
|
|
|
blorp_clear_depth_stencil(&batch, &depth, &stencil,
|
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
|
0, 0, level_width, level_height,
|
|
|
|
|
clear_depth, pDepthStencil->depth,
|
|
|
|
|
clear_stencil ? 0xff : 0,
|
|
|
|
|
pDepthStencil->stencil);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
VkResult
|
2016-10-21 17:01:17 -07:00
|
|
|
anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
uint32_t num_entries,
|
2017-03-09 11:49:01 +01:00
|
|
|
uint32_t *state_offset,
|
|
|
|
|
struct anv_state *bt_state)
|
2016-10-21 17:01:17 -07:00
|
|
|
{
|
2017-03-09 11:49:01 +01:00
|
|
|
*bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
|
|
|
|
|
state_offset);
|
|
|
|
|
if (bt_state->map == NULL) {
|
2016-10-21 17:01:17 -07:00
|
|
|
/* We ran out of space. Grab a new binding table block. */
|
2017-03-09 11:49:01 +01:00
|
|
|
VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
2016-10-21 17:01:17 -07:00
|
|
|
|
|
|
|
|
/* Re-emit state base addresses so we get the new surface state base
|
|
|
|
|
* address before we start emitting binding tables etc.
|
|
|
|
|
*/
|
|
|
|
|
anv_cmd_buffer_emit_state_base_address(cmd_buffer);
|
|
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
*bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
|
|
|
|
|
state_offset);
|
|
|
|
|
assert(bt_state->map != NULL);
|
2016-10-21 17:01:17 -07:00
|
|
|
}
|
|
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
return VK_SUCCESS;
|
2016-10-21 17:01:17 -07:00
|
|
|
}
|
|
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
static VkResult
|
2016-10-21 17:13:51 -07:00
|
|
|
binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
|
2017-03-09 11:49:01 +01:00
|
|
|
struct anv_state surface_state,
|
|
|
|
|
uint32_t *bt_offset)
|
2016-10-21 17:13:51 -07:00
|
|
|
{
|
|
|
|
|
uint32_t state_offset;
|
2017-03-09 11:49:01 +01:00
|
|
|
struct anv_state bt_state;
|
|
|
|
|
|
|
|
|
|
VkResult result =
|
|
|
|
|
anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
|
|
|
|
|
&bt_state);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
2016-10-21 17:13:51 -07:00
|
|
|
|
|
|
|
|
uint32_t *bt_map = bt_state.map;
|
|
|
|
|
bt_map[0] = surface_state.offset + state_offset;
|
|
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
*bt_offset = bt_state.offset;
|
|
|
|
|
return VK_SUCCESS;
|
2016-10-21 17:13:51 -07:00
|
|
|
}
|
|
|
|
|
|
2016-10-07 14:43:21 -07:00
|
|
|
static void
|
|
|
|
|
clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
|
const VkClearAttachment *attachment,
|
|
|
|
|
uint32_t rectCount, const VkClearRect *pRects)
|
|
|
|
|
{
|
|
|
|
|
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
|
2016-10-21 17:13:51 -07:00
|
|
|
const uint32_t color_att = attachment->colorAttachment;
|
2017-01-31 16:12:50 -08:00
|
|
|
const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
|
2016-10-21 23:19:44 -07:00
|
|
|
|
|
|
|
|
if (att_idx == VK_ATTACHMENT_UNUSED)
|
|
|
|
|
return;
|
|
|
|
|
|
2016-10-21 17:13:51 -07:00
|
|
|
struct anv_render_pass_attachment *pass_att =
|
|
|
|
|
&cmd_buffer->state.pass->attachments[att_idx];
|
|
|
|
|
struct anv_attachment_state *att_state =
|
|
|
|
|
&cmd_buffer->state.attachments[att_idx];
|
2016-10-07 14:43:21 -07:00
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
uint32_t binding_table;
|
|
|
|
|
VkResult result =
|
2017-07-11 11:06:49 -07:00
|
|
|
binding_table_for_surface_state(cmd_buffer, att_state->color.state,
|
2017-03-09 11:49:01 +01:00
|
|
|
&binding_table);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return;
|
2016-10-07 14:43:21 -07:00
|
|
|
|
2016-11-18 13:35:16 -08:00
|
|
|
union isl_color_value clear_color =
|
|
|
|
|
vk_to_isl_color(attachment->clearValue.color);
|
2016-10-07 14:43:21 -07:00
|
|
|
|
2017-05-18 08:23:38 +02:00
|
|
|
/* If multiview is enabled we ignore baseArrayLayer and layerCount */
|
|
|
|
|
if (subpass->view_mask) {
|
|
|
|
|
uint32_t view_idx;
|
|
|
|
|
for_each_bit(view_idx, subpass->view_mask) {
|
|
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
|
|
|
|
blorp_clear_attachments(batch, binding_table,
|
|
|
|
|
ISL_FORMAT_UNSUPPORTED, pass_att->samples,
|
|
|
|
|
view_idx, 1,
|
|
|
|
|
offset.x, offset.y,
|
|
|
|
|
offset.x + extent.width,
|
|
|
|
|
offset.y + extent.height,
|
|
|
|
|
true, clear_color, false, 0.0f, 0, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-07 14:43:21 -07:00
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
2017-12-19 08:59:36 +01:00
|
|
|
assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
|
2016-10-21 17:13:51 -07:00
|
|
|
blorp_clear_attachments(batch, binding_table,
|
|
|
|
|
ISL_FORMAT_UNSUPPORTED, pass_att->samples,
|
|
|
|
|
pRects[r].baseArrayLayer,
|
|
|
|
|
pRects[r].layerCount,
|
|
|
|
|
offset.x, offset.y,
|
|
|
|
|
offset.x + extent.width, offset.y + extent.height,
|
|
|
|
|
true, clear_color, false, 0.0f, 0, 0);
|
2016-10-07 14:43:21 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
struct blorp_batch *batch,
|
|
|
|
|
const VkClearAttachment *attachment,
|
|
|
|
|
uint32_t rectCount, const VkClearRect *pRects)
|
|
|
|
|
{
|
2016-10-21 17:13:51 -07:00
|
|
|
static const union isl_color_value color_value = { .u32 = { 0, } };
|
2016-10-07 14:43:21 -07:00
|
|
|
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
|
2017-01-31 16:12:50 -08:00
|
|
|
const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
|
2016-10-21 23:19:44 -07:00
|
|
|
|
|
|
|
|
if (att_idx == VK_ATTACHMENT_UNUSED)
|
|
|
|
|
return;
|
|
|
|
|
|
2016-10-21 17:13:51 -07:00
|
|
|
struct anv_render_pass_attachment *pass_att =
|
|
|
|
|
&cmd_buffer->state.pass->attachments[att_idx];
|
2016-10-07 14:43:21 -07:00
|
|
|
|
|
|
|
|
bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
|
|
|
|
|
bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
|
|
|
|
|
|
2016-10-21 17:13:51 -07:00
|
|
|
enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
|
2016-10-07 14:43:21 -07:00
|
|
|
if (clear_depth) {
|
2016-10-21 17:13:51 -07:00
|
|
|
depth_format = anv_get_isl_format(&cmd_buffer->device->info,
|
|
|
|
|
pass_att->format,
|
|
|
|
|
VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
|
|
|
VK_IMAGE_TILING_OPTIMAL);
|
2016-10-07 14:43:21 -07:00
|
|
|
}
|
|
|
|
|
|
2017-03-09 11:49:01 +01:00
|
|
|
uint32_t binding_table;
|
|
|
|
|
VkResult result =
|
2016-10-21 17:13:51 -07:00
|
|
|
binding_table_for_surface_state(cmd_buffer,
|
2017-03-09 11:49:01 +01:00
|
|
|
cmd_buffer->state.null_surface_state,
|
|
|
|
|
&binding_table);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return;
|
2016-10-07 14:43:21 -07:00
|
|
|
|
2017-05-18 08:23:38 +02:00
|
|
|
/* If multiview is enabled we ignore baseArrayLayer and layerCount */
|
|
|
|
|
if (subpass->view_mask) {
|
|
|
|
|
uint32_t view_idx;
|
|
|
|
|
for_each_bit(view_idx, subpass->view_mask) {
|
|
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
|
|
|
|
VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
|
|
|
|
|
blorp_clear_attachments(batch, binding_table,
|
|
|
|
|
depth_format, pass_att->samples,
|
|
|
|
|
view_idx, 1,
|
|
|
|
|
offset.x, offset.y,
|
|
|
|
|
offset.x + extent.width,
|
|
|
|
|
offset.y + extent.height,
|
|
|
|
|
false, color_value,
|
|
|
|
|
clear_depth, value.depth,
|
|
|
|
|
clear_stencil ? 0xff : 0, value.stencil);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-07 14:43:21 -07:00
|
|
|
for (uint32_t r = 0; r < rectCount; ++r) {
|
|
|
|
|
const VkOffset2D offset = pRects[r].rect.offset;
|
|
|
|
|
const VkExtent2D extent = pRects[r].rect.extent;
|
|
|
|
|
VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
|
2017-12-19 08:59:36 +01:00
|
|
|
assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
|
2016-10-21 17:13:51 -07:00
|
|
|
blorp_clear_attachments(batch, binding_table,
|
|
|
|
|
depth_format, pass_att->samples,
|
|
|
|
|
pRects[r].baseArrayLayer,
|
|
|
|
|
pRects[r].layerCount,
|
|
|
|
|
offset.x, offset.y,
|
|
|
|
|
offset.x + extent.width, offset.y + extent.height,
|
|
|
|
|
false, color_value,
|
|
|
|
|
clear_depth, value.depth,
|
|
|
|
|
clear_stencil ? 0xff : 0, value.stencil);
|
2016-10-07 14:43:21 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void anv_CmdClearAttachments(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
uint32_t attachmentCount,
|
|
|
|
|
const VkClearAttachment* pAttachments,
|
|
|
|
|
uint32_t rectCount,
|
|
|
|
|
const VkClearRect* pRects)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
|
|
|
|
|
/* Because this gets called within a render pass, we tell blorp not to
|
|
|
|
|
* trash our depth and stencil buffers.
|
|
|
|
|
*/
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
|
|
|
|
|
BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
|
|
|
|
|
|
|
|
|
|
for (uint32_t a = 0; a < attachmentCount; ++a) {
|
2017-11-02 16:05:45 -07:00
|
|
|
if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
|
2016-10-07 14:43:21 -07:00
|
|
|
clear_color_attachment(cmd_buffer, &batch,
|
|
|
|
|
&pAttachments[a],
|
|
|
|
|
rectCount, pRects);
|
|
|
|
|
} else {
|
|
|
|
|
clear_depth_stencil_attachment(cmd_buffer, &batch,
|
|
|
|
|
&pAttachments[a],
|
|
|
|
|
rectCount, pRects);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-15 23:11:55 -08:00
|
|
|
enum subpass_stage {
|
|
|
|
|
SUBPASS_STAGE_LOAD,
|
|
|
|
|
SUBPASS_STAGE_DRAW,
|
|
|
|
|
SUBPASS_STAGE_RESOLVE,
|
|
|
|
|
};
|
|
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
static void
|
|
|
|
|
resolve_surface(struct blorp_batch *batch,
|
|
|
|
|
struct blorp_surf *src_surf,
|
|
|
|
|
uint32_t src_level, uint32_t src_layer,
|
|
|
|
|
struct blorp_surf *dst_surf,
|
|
|
|
|
uint32_t dst_level, uint32_t dst_layer,
|
|
|
|
|
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
|
|
|
|
|
uint32_t width, uint32_t height)
|
|
|
|
|
{
|
|
|
|
|
blorp_blit(batch,
|
|
|
|
|
src_surf, src_level, src_layer,
|
|
|
|
|
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
dst_surf, dst_level, dst_layer,
|
|
|
|
|
ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
|
|
|
|
|
src_x, src_y, src_x + width, src_y + height,
|
|
|
|
|
dst_x, dst_y, dst_x + width, dst_y + height,
|
|
|
|
|
0x2600 /* GL_NEAREST */, false, false);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 17:49:56 -07:00
|
|
|
static void
|
2017-11-03 15:18:45 -07:00
|
|
|
resolve_image(struct anv_device *device,
|
|
|
|
|
struct blorp_batch *batch,
|
2016-08-30 17:49:56 -07:00
|
|
|
const struct anv_image *src_image,
|
2018-02-02 14:51:56 -08:00
|
|
|
VkImageLayout src_image_layout,
|
2016-08-30 17:49:56 -07:00
|
|
|
uint32_t src_level, uint32_t src_layer,
|
|
|
|
|
const struct anv_image *dst_image,
|
2018-02-02 14:51:56 -08:00
|
|
|
VkImageLayout dst_image_layout,
|
2016-08-30 17:49:56 -07:00
|
|
|
uint32_t dst_level, uint32_t dst_layer,
|
|
|
|
|
VkImageAspectFlags aspect_mask,
|
|
|
|
|
uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
|
|
|
|
|
uint32_t width, uint32_t height)
|
|
|
|
|
{
|
2017-11-27 08:35:12 -08:00
|
|
|
struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
|
|
|
|
|
|
2016-08-30 17:49:56 -07:00
|
|
|
assert(src_image->type == VK_IMAGE_TYPE_2D);
|
|
|
|
|
assert(src_image->samples > 1);
|
|
|
|
|
assert(dst_image->type == VK_IMAGE_TYPE_2D);
|
|
|
|
|
assert(dst_image->samples == 1);
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(src_image->n_planes == dst_image->n_planes);
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
uint32_t aspect_bit;
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
|
2016-08-30 17:49:56 -07:00
|
|
|
struct blorp_surf src_surf, dst_surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(device, src_image, 1UL << aspect_bit,
|
2018-02-02 14:51:56 -08:00
|
|
|
src_image_layout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&src_surf);
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(device, dst_image, 1UL << aspect_bit,
|
2018-02-02 14:51:56 -08:00
|
|
|
dst_image_layout, ISL_AUX_USAGE_NONE,
|
|
|
|
|
&dst_surf);
|
2017-11-27 08:35:12 -08:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_image,
|
|
|
|
|
1UL << aspect_bit,
|
|
|
|
|
dst_surf.aux_usage,
|
|
|
|
|
dst_level, dst_layer, 1);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
|
|
|
|
assert(!src_image->format->can_ycbcr);
|
|
|
|
|
assert(!dst_image->format->can_ycbcr);
|
|
|
|
|
|
|
|
|
|
resolve_surface(batch,
|
|
|
|
|
&src_surf, src_level, src_layer,
|
|
|
|
|
&dst_surf, dst_level, dst_layer,
|
|
|
|
|
src_x, src_y, dst_x, dst_y, width, height);
|
2016-08-30 17:49:56 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void anv_CmdResolveImage(
|
|
|
|
|
VkCommandBuffer commandBuffer,
|
|
|
|
|
VkImage srcImage,
|
|
|
|
|
VkImageLayout srcImageLayout,
|
|
|
|
|
VkImage dstImage,
|
|
|
|
|
VkImageLayout dstImageLayout,
|
|
|
|
|
uint32_t regionCount,
|
|
|
|
|
const VkImageResolve* pRegions)
|
|
|
|
|
{
|
|
|
|
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, src_image, srcImage);
|
|
|
|
|
ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2016-10-07 17:20:00 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
2016-08-30 17:49:56 -07:00
|
|
|
|
|
|
|
|
for (uint32_t r = 0; r < regionCount; r++) {
|
|
|
|
|
assert(pRegions[r].srcSubresource.aspectMask ==
|
|
|
|
|
pRegions[r].dstSubresource.aspectMask);
|
2017-03-24 16:20:35 -07:00
|
|
|
assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
|
|
|
|
|
anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2017-03-24 16:20:35 -07:00
|
|
|
const uint32_t layer_count =
|
|
|
|
|
anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
|
|
|
|
|
dst_mask = pRegions[r].dstSubresource.aspectMask;
|
|
|
|
|
|
|
|
|
|
assert(anv_image_aspects_compatible(src_mask, dst_mask));
|
|
|
|
|
|
2016-08-30 17:49:56 -07:00
|
|
|
for (uint32_t layer = 0; layer < layer_count; layer++) {
|
2017-11-03 15:18:45 -07:00
|
|
|
resolve_image(cmd_buffer->device, &batch,
|
2018-02-02 14:51:56 -08:00
|
|
|
src_image, srcImageLayout,
|
2017-03-31 16:05:34 -07:00
|
|
|
pRegions[r].srcSubresource.mipLevel,
|
2016-08-30 17:49:56 -07:00
|
|
|
pRegions[r].srcSubresource.baseArrayLayer + layer,
|
2018-02-02 14:51:56 -08:00
|
|
|
dst_image, dstImageLayout,
|
2017-03-31 16:05:34 -07:00
|
|
|
pRegions[r].dstSubresource.mipLevel,
|
2016-08-30 17:49:56 -07:00
|
|
|
pRegions[r].dstSubresource.baseArrayLayer + layer,
|
|
|
|
|
pRegions[r].dstSubresource.aspectMask,
|
|
|
|
|
pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
|
|
|
|
|
pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
|
|
|
|
|
pRegions[r].extent.width, pRegions[r].extent.height);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
static enum isl_aux_usage
|
|
|
|
|
fast_clear_aux_usage(const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlagBits aspect)
|
|
|
|
|
{
|
|
|
|
|
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
|
|
|
|
|
if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
|
|
|
|
|
return ISL_AUX_USAGE_CCS_D;
|
|
|
|
|
else
|
|
|
|
|
return image->planes[plane].aux_usage;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-30 17:49:56 -07:00
|
|
|
void
|
|
|
|
|
anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
|
|
|
|
|
{
|
|
|
|
|
struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
|
|
|
|
|
struct anv_subpass *subpass = cmd_buffer->state.subpass;
|
|
|
|
|
|
2016-10-24 22:03:45 -07:00
|
|
|
if (subpass->has_resolve) {
|
2017-03-17 22:36:05 -07:00
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
2017-03-10 17:50:01 -08:00
|
|
|
/* We are about to do some MSAA resolves. We need to flush so that the
|
|
|
|
|
* result of writes to the MSAA color attachments show up in the sampler
|
|
|
|
|
* when we blit to the single-sampled resolve target.
|
|
|
|
|
*/
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |=
|
|
|
|
|
ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
|
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
|
|
|
|
|
|
2016-10-24 22:03:45 -07:00
|
|
|
for (uint32_t i = 0; i < subpass->color_count; ++i) {
|
2017-01-31 16:12:50 -08:00
|
|
|
uint32_t src_att = subpass->color_attachments[i].attachment;
|
|
|
|
|
uint32_t dst_att = subpass->resolve_attachments[i].attachment;
|
2016-10-24 22:03:45 -07:00
|
|
|
|
|
|
|
|
if (dst_att == VK_ATTACHMENT_UNUSED)
|
|
|
|
|
continue;
|
|
|
|
|
|
2017-04-06 14:15:55 -07:00
|
|
|
assert(src_att < cmd_buffer->state.pass->attachment_count);
|
|
|
|
|
assert(dst_att < cmd_buffer->state.pass->attachment_count);
|
|
|
|
|
|
2016-10-24 22:03:45 -07:00
|
|
|
if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
|
|
|
|
|
/* From the Vulkan 1.0 spec:
|
|
|
|
|
*
|
|
|
|
|
* If the first use of an attachment in a render pass is as a
|
|
|
|
|
* resolve attachment, then the loadOp is effectively ignored
|
|
|
|
|
* as the resolve is guaranteed to overwrite all pixels in the
|
|
|
|
|
* render area.
|
|
|
|
|
*/
|
|
|
|
|
cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
|
|
|
|
|
}
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2016-10-24 22:03:45 -07:00
|
|
|
struct anv_image_view *src_iview = fb->attachments[src_att];
|
|
|
|
|
struct anv_image_view *dst_iview = fb->attachments[dst_att];
|
2016-10-27 22:42:02 -07:00
|
|
|
|
2017-03-31 16:05:34 -07:00
|
|
|
enum isl_aux_usage src_aux_usage =
|
|
|
|
|
cmd_buffer->state.attachments[src_att].aux_usage;
|
|
|
|
|
enum isl_aux_usage dst_aux_usage =
|
|
|
|
|
cmd_buffer->state.attachments[dst_att].aux_usage;
|
|
|
|
|
|
2016-10-24 22:03:45 -07:00
|
|
|
const VkRect2D render_area = cmd_buffer->state.render_area;
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
|
|
|
|
|
dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf src_surf, dst_surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, src_iview->image,
|
2017-07-19 12:14:19 +01:00
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-07-19 12:14:19 +01:00
|
|
|
src_aux_usage, &src_surf);
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, dst_iview->image,
|
2017-07-19 12:14:19 +01:00
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-07-19 12:14:19 +01:00
|
|
|
dst_aux_usage, &dst_surf);
|
2018-02-14 11:48:05 +01:00
|
|
|
|
|
|
|
|
uint32_t base_src_layer = src_iview->planes[0].isl.base_array_layer;
|
|
|
|
|
uint32_t base_dst_layer = dst_iview->planes[0].isl.base_array_layer;
|
|
|
|
|
|
|
|
|
|
assert(src_iview->planes[0].isl.array_len >= fb->layers);
|
|
|
|
|
assert(dst_iview->planes[0].isl.array_len >= fb->layers);
|
|
|
|
|
|
2017-11-27 08:35:12 -08:00
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, dst_iview->image,
|
|
|
|
|
VK_IMAGE_ASPECT_COLOR_BIT,
|
|
|
|
|
dst_surf.aux_usage,
|
|
|
|
|
dst_iview->planes[0].isl.base_level,
|
2018-02-14 11:48:05 +01:00
|
|
|
base_dst_layer, fb->layers);
|
2017-07-19 12:14:19 +01:00
|
|
|
|
|
|
|
|
assert(!src_iview->image->format->can_ycbcr);
|
|
|
|
|
assert(!dst_iview->image->format->can_ycbcr);
|
|
|
|
|
|
2018-02-14 11:48:05 +01:00
|
|
|
for (uint32_t i = 0; i < fb->layers; i++) {
|
|
|
|
|
resolve_surface(&batch,
|
|
|
|
|
&src_surf,
|
|
|
|
|
src_iview->planes[0].isl.base_level,
|
|
|
|
|
base_src_layer + i,
|
|
|
|
|
&dst_surf,
|
|
|
|
|
dst_iview->planes[0].isl.base_level,
|
|
|
|
|
base_dst_layer + i,
|
|
|
|
|
render_area.offset.x, render_area.offset.y,
|
|
|
|
|
render_area.offset.x, render_area.offset.y,
|
|
|
|
|
render_area.extent.width, render_area.extent.height);
|
|
|
|
|
}
|
2016-10-24 22:03:45 -07:00
|
|
|
}
|
2016-08-30 17:49:56 -07:00
|
|
|
|
2017-03-17 22:36:05 -07:00
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
2016-08-30 17:49:56 -07:00
|
|
|
}
|
2017-01-05 23:32:07 -08:00
|
|
|
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 13:17:06 -07:00
|
|
|
void
|
|
|
|
|
anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
uint32_t base_level, uint32_t level_count,
|
|
|
|
|
uint32_t base_layer, uint32_t layer_count)
|
|
|
|
|
{
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
|
|
|
|
|
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 13:17:06 -07:00
|
|
|
struct blorp_surf surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_COLOR_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
VK_IMAGE_LAYOUT_GENERAL,
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 13:17:06 -07:00
|
|
|
ISL_AUX_USAGE_NONE, &surf);
|
2018-02-02 14:51:56 -08:00
|
|
|
assert(surf.aux_usage == ISL_AUX_USAGE_NONE);
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 13:17:06 -07:00
|
|
|
|
|
|
|
|
struct blorp_surf shadow_surf = {
|
2017-07-19 12:14:19 +01:00
|
|
|
.surf = &image->planes[0].shadow_surface.isl,
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 13:17:06 -07:00
|
|
|
.addr = {
|
2017-07-19 12:14:19 +01:00
|
|
|
.buffer = image->planes[0].bo,
|
|
|
|
|
.offset = image->planes[0].bo_offset +
|
|
|
|
|
image->planes[0].shadow_surface.offset,
|
2017-11-03 15:20:08 -07:00
|
|
|
.mocs = cmd_buffer->device->default_mocs,
|
anv/image: Support creating uncompressed views of compressed images
In order to get support everywhere, this gets a bit complicated. On Sky
Lake and later, everything is fine because HALIGN/VALIGN are specified
in surface elements and are required to be at least 4 so any offsetting
we may need to do falls neatly within the heavy restrictions placed on
the X/Y Offset parameter of RENDER_SURFACE_STATE. On Broadwell and
earlier, HALIGN/VALIGN are specified in pixels and are hard-coded to
align to exactly the block size of the compressed texture. This means
that, when reinterpreted as a non-compressed texture, the tile offsets
may be anything and we can't rely on X/Y Offset.
In order to work around this issue, we fall back to linear where we can
trivially offset to whatever element we so choose. However, since
linear texturing performance is terrible, we create a tiled shadow copy
of the image to use for texturing. Whenever the user does a layout
transition from anything to SHADER_READ_ONLY_OPTIMAL, we use blorp to
copy the contents of the texture from the linear copy to the tiled
shadow copy. This assumes that the client will use the image far more
for texturing than as a storage image or render target.
Even though we don't need the shadow copy on Sky Lake, we implement it
this way first to make testing easier. Due to the hardware restriction
that ASTC must not be linear, ASTC does not work yet.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
2017-07-11 13:17:06 -07:00
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for (uint32_t l = 0; l < level_count; l++) {
|
|
|
|
|
const uint32_t level = base_level + l;
|
|
|
|
|
|
|
|
|
|
const VkExtent3D extent = {
|
|
|
|
|
.width = anv_minify(image->extent.width, level),
|
|
|
|
|
.height = anv_minify(image->extent.height, level),
|
|
|
|
|
.depth = anv_minify(image->extent.depth, level),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (image->type == VK_IMAGE_TYPE_3D)
|
|
|
|
|
layer_count = extent.depth;
|
|
|
|
|
|
|
|
|
|
for (uint32_t a = 0; a < layer_count; a++) {
|
|
|
|
|
const uint32_t layer = base_layer + a;
|
|
|
|
|
|
|
|
|
|
blorp_copy(&batch, &surf, level, layer,
|
|
|
|
|
&shadow_surf, level, layer,
|
|
|
|
|
0, 0, 0, 0, extent.width, extent.height);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-21 13:30:49 -08:00
|
|
|
void
|
|
|
|
|
anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlagBits aspect,
|
|
|
|
|
enum isl_aux_usage aux_usage,
|
|
|
|
|
enum isl_format format, struct isl_swizzle swizzle,
|
|
|
|
|
uint32_t level, uint32_t base_layer, uint32_t layer_count,
|
|
|
|
|
VkRect2D area, union isl_color_value clear_color)
|
|
|
|
|
{
|
|
|
|
|
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
|
|
|
|
|
|
/* We don't support planar images with multisampling yet */
|
|
|
|
|
assert(image->n_planes == 1);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf surf;
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 13:30:49 -08:00
|
|
|
aux_usage, &surf);
|
|
|
|
|
anv_cmd_buffer_mark_image_written(cmd_buffer, image, aspect, aux_usage,
|
|
|
|
|
level, base_layer, layer_count);
|
|
|
|
|
|
|
|
|
|
blorp_clear(&batch, &surf, format, anv_swizzle_for_render(swizzle),
|
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
|
area.offset.y + area.extent.height,
|
|
|
|
|
clear_color, NULL);
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-21 14:46:25 -08:00
|
|
|
void
|
|
|
|
|
anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlags aspects,
|
|
|
|
|
enum isl_aux_usage depth_aux_usage,
|
|
|
|
|
uint32_t level,
|
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
|
VkRect2D area,
|
|
|
|
|
float depth_value, uint8_t stencil_value)
|
|
|
|
|
{
|
|
|
|
|
assert(image->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
|
|
|
|
|
VK_IMAGE_ASPECT_STENCIL_BIT));
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf depth = {};
|
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 14:46:25 -08:00
|
|
|
depth_aux_usage, &depth);
|
|
|
|
|
depth.clear_color.f32[0] = ANV_HZ_FC_VAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct blorp_surf stencil = {};
|
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_STENCIL_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 14:46:25 -08:00
|
|
|
ISL_AUX_USAGE_NONE, &stencil);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_clear_depth_stencil(&batch, &depth, &stencil,
|
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
|
area.offset.y + area.extent.height,
|
|
|
|
|
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
|
|
|
depth_value,
|
|
|
|
|
(aspects & VK_IMAGE_ASPECT_STENCIL_BIT) ? 0xff : 0,
|
|
|
|
|
stencil_value);
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-21 10:20:57 -08:00
|
|
|
void
|
|
|
|
|
anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlagBits aspect, uint32_t level,
|
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
|
enum isl_aux_op hiz_op)
|
|
|
|
|
{
|
|
|
|
|
assert(aspect == VK_IMAGE_ASPECT_DEPTH_BIT);
|
|
|
|
|
assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(anv_image_aspect_to_plane(image->aspects,
|
|
|
|
|
VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
|
|
|
|
|
|
2017-01-05 23:32:07 -08:00
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 12:10:30 -08:00
|
|
|
ISL_AUX_USAGE_HIZ, &surf);
|
2017-05-20 15:00:42 -07:00
|
|
|
surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
|
2017-01-05 23:32:07 -08:00
|
|
|
|
2018-01-19 15:14:37 -08:00
|
|
|
blorp_hiz_op(&batch, &surf, level, base_layer, layer_count, hiz_op);
|
2017-11-21 10:20:57 -08:00
|
|
|
|
2017-01-05 23:32:07 -08:00
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
2017-03-10 16:31:16 -08:00
|
|
|
|
2017-11-21 14:46:25 -08:00
|
|
|
void
|
|
|
|
|
anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlags aspects,
|
|
|
|
|
uint32_t level,
|
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
|
VkRect2D area, uint8_t stencil_value)
|
|
|
|
|
{
|
|
|
|
|
assert(image->aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
|
|
|
|
|
VK_IMAGE_ASPECT_STENCIL_BIT));
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf depth = {};
|
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
|
|
|
|
assert(base_layer + layer_count <=
|
|
|
|
|
anv_image_aux_layers(image, VK_IMAGE_ASPECT_DEPTH_BIT, level));
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_DEPTH_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 14:46:25 -08:00
|
|
|
ISL_AUX_USAGE_HIZ, &depth);
|
|
|
|
|
depth.clear_color.f32[0] = ANV_HZ_FC_VAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct blorp_surf stencil = {};
|
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device,
|
|
|
|
|
image, VK_IMAGE_ASPECT_STENCIL_BIT,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-11-21 14:46:25 -08:00
|
|
|
ISL_AUX_USAGE_NONE, &stencil);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
blorp_hiz_clear_depth_stencil(&batch, &depth, &stencil,
|
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
|
area.offset.x, area.offset.y,
|
|
|
|
|
area.offset.x + area.extent.width,
|
|
|
|
|
area.offset.y + area.extent.height,
|
|
|
|
|
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
|
|
|
ANV_HZ_FC_VAL,
|
|
|
|
|
aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
|
|
|
|
|
stencil_value);
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
|
|
|
|
|
/* From the SKL PRM, Depth Buffer Clear:
|
|
|
|
|
*
|
|
|
|
|
* Depth Buffer Clear Workaround
|
|
|
|
|
* Depth buffer clear pass using any of the methods (WM_STATE, 3DSTATE_WM
|
|
|
|
|
* or 3DSTATE_WM_HZ_OP) must be followed by a PIPE_CONTROL command with
|
|
|
|
|
* DEPTH_STALL bit and Depth FLUSH bits “set” before starting to render.
|
|
|
|
|
* DepthStall and DepthFlush are not needed between consecutive depth clear
|
|
|
|
|
* passes nor is it required if the depth-clear pass was done with
|
|
|
|
|
* “full_surf_clear” bit set in the 3DSTATE_WM_HZ_OP.
|
|
|
|
|
*/
|
|
|
|
|
if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |=
|
|
|
|
|
ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-10 16:31:16 -08:00
|
|
|
void
|
2017-11-21 09:56:41 -08:00
|
|
|
anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlagBits aspect,
|
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
|
enum isl_aux_op mcs_op, bool predicate)
|
2017-03-10 16:31:16 -08:00
|
|
|
{
|
2017-11-21 09:56:41 -08:00
|
|
|
assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
|
assert(image->samples > 1);
|
|
|
|
|
assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, 0));
|
2017-03-10 16:31:16 -08:00
|
|
|
|
2017-11-21 09:56:41 -08:00
|
|
|
/* Multisampling with multi-planar formats is not supported */
|
|
|
|
|
assert(image->n_planes == 1);
|
|
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
|
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
|
|
|
|
|
predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
|
|
|
|
|
|
|
|
|
|
struct blorp_surf surf;
|
|
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
|
|
|
|
ISL_AUX_USAGE_MCS, &surf);
|
2017-11-21 09:56:41 -08:00
|
|
|
|
2017-11-11 14:32:21 -08:00
|
|
|
if (mcs_op == ISL_AUX_OP_PARTIAL_RESOLVE) {
|
|
|
|
|
/* If we're doing a partial resolve, then we need the indirect clear
|
|
|
|
|
* color. The clear operation just stomps the CCS to a particular value
|
|
|
|
|
* and don't care about format or clear value.
|
|
|
|
|
*/
|
|
|
|
|
const struct anv_address clear_color_addr =
|
|
|
|
|
anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
|
|
|
|
|
surf.clear_color_addr = anv_to_blorp_address(clear_color_addr);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-21 09:56:41 -08:00
|
|
|
/* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
|
|
|
|
|
*
|
|
|
|
|
* "After Render target fast clear, pipe-control with color cache
|
|
|
|
|
* write-flush must be issued before sending any DRAW commands on
|
|
|
|
|
* that render target."
|
|
|
|
|
*
|
|
|
|
|
* This comment is a bit cryptic and doesn't really tell you what's going
|
|
|
|
|
* or what's really needed. It appears that fast clear ops are not
|
|
|
|
|
* properly synchronized with other drawing. This means that we cannot
|
|
|
|
|
* have a fast clear operation in the pipe at the same time as other
|
|
|
|
|
* regular drawing operations. We need to use a PIPE_CONTROL to ensure
|
|
|
|
|
* that the contents of the previous draw hit the render target before we
|
|
|
|
|
* resolve and then use a second PIPE_CONTROL after the resolve to ensure
|
|
|
|
|
* that it is completed before any additional drawing occurs.
|
|
|
|
|
*/
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |=
|
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
|
2017-07-19 12:14:19 +01:00
|
|
|
|
2017-11-21 09:56:41 -08:00
|
|
|
switch (mcs_op) {
|
|
|
|
|
case ISL_AUX_OP_FAST_CLEAR:
|
|
|
|
|
blorp_fast_clear(&batch, &surf, surf.surf->format,
|
|
|
|
|
0, base_layer, layer_count,
|
|
|
|
|
0, 0, image->extent.width, image->extent.height);
|
|
|
|
|
break;
|
|
|
|
|
case ISL_AUX_OP_PARTIAL_RESOLVE:
|
2017-11-11 14:32:21 -08:00
|
|
|
blorp_mcs_partial_resolve(&batch, &surf, surf.surf->format,
|
|
|
|
|
base_layer, layer_count);
|
|
|
|
|
break;
|
|
|
|
|
case ISL_AUX_OP_FULL_RESOLVE:
|
2017-11-21 09:56:41 -08:00
|
|
|
case ISL_AUX_OP_AMBIGUATE:
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Unsupported MCS operation");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |=
|
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
|
|
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
|
|
|
|
|
const struct anv_image *image,
|
|
|
|
|
VkImageAspectFlagBits aspect, uint32_t level,
|
|
|
|
|
uint32_t base_layer, uint32_t layer_count,
|
|
|
|
|
enum isl_aux_op ccs_op, bool predicate)
|
|
|
|
|
{
|
|
|
|
|
assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
|
|
|
|
|
assert(image->samples == 1);
|
2017-07-19 12:14:19 +01:00
|
|
|
assert(level < anv_image_aux_levels(image, aspect));
|
2017-11-21 09:56:41 -08:00
|
|
|
/* Multi-LOD YcBcR is not allowed */
|
|
|
|
|
assert(image->n_planes == 1 || level == 0);
|
|
|
|
|
assert(base_layer + layer_count <=
|
2017-11-11 12:22:45 -08:00
|
|
|
anv_image_aux_layers(image, aspect, level));
|
2017-11-21 09:56:41 -08:00
|
|
|
|
|
|
|
|
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
|
|
|
|
|
uint32_t width_div = image->format->planes[plane].denominator_scales[0];
|
|
|
|
|
uint32_t height_div = image->format->planes[plane].denominator_scales[1];
|
|
|
|
|
uint32_t level_width = anv_minify(image->extent.width, level) / width_div;
|
|
|
|
|
uint32_t level_height = anv_minify(image->extent.height, level) / height_div;
|
2017-03-10 16:31:16 -08:00
|
|
|
|
|
|
|
|
struct blorp_batch batch;
|
2017-04-18 11:03:42 -07:00
|
|
|
blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
|
2017-11-21 09:56:41 -08:00
|
|
|
predicate ? BLORP_BATCH_PREDICATE_ENABLE : 0);
|
2017-03-10 16:31:16 -08:00
|
|
|
|
|
|
|
|
struct blorp_surf surf;
|
2017-11-03 15:18:45 -07:00
|
|
|
get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
|
2018-02-02 14:51:56 -08:00
|
|
|
ANV_IMAGE_LAYOUT_EXPLICIT_AUX,
|
2017-07-19 12:14:19 +01:00
|
|
|
fast_clear_aux_usage(image, aspect),
|
2017-03-10 16:31:16 -08:00
|
|
|
&surf);
|
|
|
|
|
|
2017-11-21 09:56:41 -08:00
|
|
|
if (ccs_op == ISL_AUX_OP_FULL_RESOLVE ||
|
|
|
|
|
ccs_op == ISL_AUX_OP_PARTIAL_RESOLVE) {
|
|
|
|
|
/* If we're doing a resolve operation, then we need the indirect clear
|
|
|
|
|
* color. The clear and ambiguate operations just stomp the CCS to a
|
|
|
|
|
* particular value and don't care about format or clear value.
|
|
|
|
|
*/
|
|
|
|
|
const struct anv_address clear_color_addr =
|
2017-11-21 08:46:25 -08:00
|
|
|
anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
|
2017-11-21 09:56:41 -08:00
|
|
|
surf.clear_color_addr = anv_to_blorp_address(clear_color_addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
|
|
|
|
|
*
|
|
|
|
|
* "After Render target fast clear, pipe-control with color cache
|
|
|
|
|
* write-flush must be issued before sending any DRAW commands on
|
|
|
|
|
* that render target."
|
|
|
|
|
*
|
|
|
|
|
* This comment is a bit cryptic and doesn't really tell you what's going
|
|
|
|
|
* or what's really needed. It appears that fast clear ops are not
|
|
|
|
|
* properly synchronized with other drawing. This means that we cannot
|
|
|
|
|
* have a fast clear operation in the pipe at the same time as other
|
|
|
|
|
* regular drawing operations. We need to use a PIPE_CONTROL to ensure
|
|
|
|
|
* that the contents of the previous draw hit the render target before we
|
|
|
|
|
* resolve and then use a second PIPE_CONTROL after the resolve to ensure
|
|
|
|
|
* that it is completed before any additional drawing occurs.
|
|
|
|
|
*/
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |=
|
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
|
|
|
|
|
|
|
|
|
|
switch (ccs_op) {
|
|
|
|
|
case ISL_AUX_OP_FAST_CLEAR:
|
|
|
|
|
blorp_fast_clear(&batch, &surf, surf.surf->format,
|
|
|
|
|
level, base_layer, layer_count,
|
|
|
|
|
0, 0, level_width, level_height);
|
|
|
|
|
break;
|
|
|
|
|
case ISL_AUX_OP_FULL_RESOLVE:
|
|
|
|
|
case ISL_AUX_OP_PARTIAL_RESOLVE:
|
|
|
|
|
blorp_ccs_resolve(&batch, &surf, level, base_layer, layer_count,
|
2018-01-19 15:02:07 -08:00
|
|
|
surf.surf->format, ccs_op);
|
2017-11-21 09:56:41 -08:00
|
|
|
break;
|
|
|
|
|
case ISL_AUX_OP_AMBIGUATE:
|
2017-11-27 18:09:48 -08:00
|
|
|
for (uint32_t a = 0; a < layer_count; a++) {
|
|
|
|
|
const uint32_t layer = base_layer + a;
|
|
|
|
|
blorp_ccs_ambiguate(&batch, &surf, level, layer);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2017-11-21 09:56:41 -08:00
|
|
|
default:
|
|
|
|
|
unreachable("Unsupported CCS operation");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cmd_buffer->state.pending_pipe_bits |=
|
|
|
|
|
ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
|
2017-03-10 16:31:16 -08:00
|
|
|
|
|
|
|
|
blorp_batch_finish(&batch);
|
|
|
|
|
}
|