radv: Use common ycbcr conversion lowering

Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20731>
This commit is contained in:
Konstantin Seurer 2023-01-16 20:49:32 +01:00 committed by Marge Bot
parent 9104dafb6f
commit 569517d7ad
9 changed files with 42 additions and 404 deletions

View file

@ -93,7 +93,6 @@ libradv_files = files(
'radv_nir_apply_pipeline_layout.c',
'radv_nir_lower_abi.c',
'radv_nir_lower_ray_queries.c',
'radv_nir_lower_ycbcr_textures.c',
'radv_perfcounter.c',
'radv_pipeline.c',
'radv_pipeline_cache.c',

View file

@ -170,8 +170,8 @@ radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCrea
/* Store block of offsets first, followed by the conversion descriptors (padded to the struct
* alignment) */
size += num_bindings * sizeof(uint32_t);
size = ALIGN(size, alignof(struct radv_sampler_ycbcr_conversion_state));
size += ycbcr_sampler_count * sizeof(struct radv_sampler_ycbcr_conversion_state);
size = ALIGN(size, alignof(struct vk_ycbcr_conversion_state));
size += ycbcr_sampler_count * sizeof(struct vk_ycbcr_conversion_state);
}
/* We need to allocate decriptor set layouts off the device allocator with DEVICE scope because
@ -187,7 +187,7 @@ radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCrea
/* We just allocate all the samplers at the end of the struct */
uint32_t *samplers = (uint32_t *)&set_layout->binding[num_bindings];
struct radv_sampler_ycbcr_conversion_state *ycbcr_samplers = NULL;
struct vk_ycbcr_conversion_state *ycbcr_samplers = NULL;
uint32_t *ycbcr_sampler_offsets = NULL;
if (ycbcr_sampler_count > 0) {
@ -197,8 +197,8 @@ radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCrea
uintptr_t first_ycbcr_sampler_offset =
(uintptr_t)ycbcr_sampler_offsets + sizeof(uint32_t) * num_bindings;
first_ycbcr_sampler_offset =
ALIGN(first_ycbcr_sampler_offset, alignof(struct radv_sampler_ycbcr_conversion_state));
ycbcr_samplers = (struct radv_sampler_ycbcr_conversion_state *)first_ycbcr_sampler_offset;
ALIGN(first_ycbcr_sampler_offset, alignof(struct vk_ycbcr_conversion_state));
ycbcr_samplers = (struct vk_ycbcr_conversion_state *)first_ycbcr_sampler_offset;
} else
set_layout->ycbcr_sampler_offsets_offset = 0;
@ -250,7 +250,7 @@ radv_CreateDescriptorSetLayout(VkDevice _device, const VkDescriptorSetLayoutCrea
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
binding->pImmutableSamplers) {
for (unsigned i = 0; i < binding->descriptorCount; ++i) {
struct radv_sampler_ycbcr_conversion *conversion =
struct vk_ycbcr_conversion *conversion =
radv_sampler_from_handle(binding->pImmutableSamplers[i])->ycbcr_sampler;
if (conversion) {
@ -1763,49 +1763,6 @@ radv_GetDescriptorSetHostMappingVALVE(VkDevice _device, VkDescriptorSet descript
*ppData = set->header.mapped_ptr;
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateSamplerYcbcrConversion(VkDevice _device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion)
{
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_sampler_ycbcr_conversion *conversion = NULL;
conversion = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*conversion), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (conversion == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &conversion->base, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
conversion->state.format = pCreateInfo->format;
conversion->state.ycbcr_model = pCreateInfo->ycbcrModel;
conversion->state.ycbcr_range = pCreateInfo->ycbcrRange;
conversion->state.components = pCreateInfo->components;
conversion->state.chroma_offsets[0] = pCreateInfo->xChromaOffset;
conversion->state.chroma_offsets[1] = pCreateInfo->yChromaOffset;
conversion->state.chroma_filter = pCreateInfo->chromaFilter;
*pYcbcrConversion = radv_sampler_ycbcr_conversion_to_handle(conversion);
return VK_SUCCESS;
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroySamplerYcbcrConversion(VkDevice _device, VkSamplerYcbcrConversion ycbcrConversion,
const VkAllocationCallbacks *pAllocator)
{
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_sampler_ycbcr_conversion, ycbcr_conversion, ycbcrConversion);
if (!ycbcr_conversion)
return;
vk_object_base_finish(&ycbcr_conversion->base);
vk_free2(&device->vk.alloc, pAllocator, ycbcr_conversion);
}
/* VK_EXT_descriptor_buffer */
VKAPI_ATTR void VKAPI_CALL
radv_GetDescriptorSetLayoutSizeEXT(VkDevice device, VkDescriptorSetLayout layout,

View file

@ -120,7 +120,7 @@ radv_combined_image_descriptor_sampler_offset(
return binding->size - ((!binding->immutable_samplers_equal) ? 16 : 0);
}
static inline const struct radv_sampler_ycbcr_conversion_state *
static inline const struct vk_ycbcr_conversion_state *
radv_immutable_ycbcr_samplers(const struct radv_descriptor_set_layout *set, unsigned binding_index)
{
if (!set->ycbcr_sampler_offsets_offset)
@ -131,8 +131,7 @@ radv_immutable_ycbcr_samplers(const struct radv_descriptor_set_layout *set, unsi
if (offsets[binding_index] == 0)
return NULL;
return (const struct radv_sampler_ycbcr_conversion_state *)((const char *)set +
offsets[binding_index]);
return (const struct vk_ycbcr_conversion_state *)((const char *)set + offsets[binding_index]);
}
struct radv_device;

View file

@ -7722,8 +7722,7 @@ radv_CreateSampler(VkDevice _device, const VkSamplerCreateInfo *pCreateInfo,
radv_init_sampler(device, sampler, pCreateInfo);
sampler->ycbcr_sampler =
ycbcr_conversion ? radv_sampler_ycbcr_conversion_from_handle(ycbcr_conversion->conversion)
: NULL;
ycbcr_conversion ? vk_ycbcr_conversion_from_handle(ycbcr_conversion->conversion) : NULL;
*pSampler = radv_sampler_to_handle(sampler);
return VK_SUCCESS;

View file

@ -697,13 +697,15 @@ radv_physical_device_get_format_properties(struct radv_physical_device *physical
if (multiplanar || desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) {
uint64_t tiling = VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT |
VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT |
VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT |
VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT |
VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT;
VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT;
/* The subsampled formats have no support for linear filters. */
if (desc->layout != UTIL_FORMAT_LAYOUT_SUBSAMPLED) {
tiling |= VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT;
if (vk_format_get_ycbcr_info(format)) {
tiling |= VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT |
VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT;
/* The subsampled formats have no support for linear filters. */
if (desc->layout != UTIL_FORMAT_LAYOUT_SUBSAMPLED)
tiling |= VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT;
}
if (multiplanar)

View file

@ -1,321 +0,0 @@
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "nir/nir.h"
#include "nir/nir_builder.h"
#include "nir/nir_vulkan.h"
#include "radv_private.h"
#include "radv_shader.h"
#include "vk_format.h"
struct ycbcr_state {
nir_builder *builder;
nir_ssa_def *image_size;
nir_tex_instr *origin_tex;
nir_deref_instr *tex_deref;
const struct radv_sampler_ycbcr_conversion_state *conversion;
bool unnormalized_coordinates;
};
static nir_ssa_def *
get_texture_size(struct ycbcr_state *state, nir_deref_instr *texture)
{
nir_builder *b = state->builder;
const struct glsl_type *type = texture->type;
nir_tex_instr *tex = nir_tex_instr_create(b->shader, 1);
tex->op = nir_texop_txs;
tex->sampler_dim = glsl_get_sampler_dim(type);
tex->is_array = glsl_sampler_type_is_array(type);
tex->is_shadow = glsl_sampler_type_is_shadow(type);
tex->dest_type = nir_type_int32;
tex->src[0].src_type = nir_tex_src_texture_deref;
tex->src[0].src = nir_src_for_ssa(&texture->dest.ssa);
nir_ssa_dest_init(&tex->instr, &tex->dest, nir_tex_instr_dest_size(tex), 32, NULL);
nir_builder_instr_insert(b, &tex->instr);
state->builder->shader->info.uses_resource_info_query = true;
return nir_i2f32(b, &tex->dest.ssa);
}
static nir_ssa_def *
implicit_downsampled_coord(nir_builder *b, nir_ssa_def *value, nir_ssa_def *max_value,
int div_scale)
{
return nir_fadd(
b, value,
nir_fdiv(b, nir_imm_float(b, 1.0f), nir_fmul(b, nir_imm_float(b, div_scale), max_value)));
}
static nir_ssa_def *
implicit_downsampled_coord_unnormalized(nir_builder *b, nir_ssa_def *value, int div_scale)
{
return nir_fadd(
b, value,
nir_imm_float(b, 1.0f / (float)div_scale));
}
static nir_ssa_def *
implicit_downsampled_coords(struct ycbcr_state *state, nir_ssa_def *old_coords)
{
nir_builder *b = state->builder;
const struct radv_sampler_ycbcr_conversion_state *conversion = state->conversion;
nir_ssa_def *image_size = NULL;
nir_ssa_def *comp[4] = {
NULL,
};
enum pipe_video_chroma_format chroma_format =
pipe_format_to_chroma_format(vk_format_to_pipe_format(state->conversion->format));
const unsigned divisors[2] = {chroma_format <= PIPE_VIDEO_CHROMA_FORMAT_422 ? 2 : 1,
chroma_format <= PIPE_VIDEO_CHROMA_FORMAT_420 ? 2 : 1};
for (int c = 0; c < old_coords->num_components; c++) {
comp[c] = nir_channel(b, old_coords, c);
if (c < ARRAY_SIZE(divisors) && divisors[c] > 1) {
if (state->unnormalized_coordinates)
comp[c] = nir_fdiv(b, comp[c], nir_imm_float(b, divisors[c]));
if (conversion->chroma_offsets[c] == VK_CHROMA_LOCATION_COSITED_EVEN) {
if (state->unnormalized_coordinates) {
comp[c] = implicit_downsampled_coord_unnormalized(b, comp[c], divisors[c]);
} else {
if (!image_size)
image_size = get_texture_size(state, state->tex_deref);
comp[c] = implicit_downsampled_coord(b, comp[c], nir_channel(b, image_size, c), divisors[c]);
}
}
}
}
return nir_vec(b, comp, old_coords->num_components);
}
static nir_ssa_def *
create_plane_tex_instr_implicit(struct ycbcr_state *state, uint32_t plane)
{
nir_builder *b = state->builder;
nir_tex_instr *old_tex = state->origin_tex;
nir_tex_instr *tex = nir_tex_instr_create(b->shader, old_tex->num_srcs + 1);
for (uint32_t i = 0; i < old_tex->num_srcs; i++) {
tex->src[i].src_type = old_tex->src[i].src_type;
switch (old_tex->src[i].src_type) {
case nir_tex_src_coord:
if (plane && true /*state->conversion->chroma_reconstruction*/) {
assert(old_tex->src[i].src.is_ssa);
tex->src[i].src =
nir_src_for_ssa(implicit_downsampled_coords(state, old_tex->src[i].src.ssa));
break;
}
FALLTHROUGH;
default:
nir_src_copy(&tex->src[i].src, &old_tex->src[i].src, &tex->instr);
break;
}
}
tex->src[tex->num_srcs - 1].src = nir_src_for_ssa(nir_imm_int(b, plane));
tex->src[tex->num_srcs - 1].src_type = nir_tex_src_plane;
tex->sampler_dim = old_tex->sampler_dim;
tex->dest_type = old_tex->dest_type;
tex->is_array = old_tex->is_array;
tex->op = old_tex->op;
tex->coord_components = old_tex->coord_components;
tex->is_new_style_shadow = old_tex->is_new_style_shadow;
tex->component = old_tex->component;
tex->texture_index = old_tex->texture_index;
tex->sampler_index = old_tex->sampler_index;
nir_ssa_dest_init(&tex->instr, &tex->dest, old_tex->dest.ssa.num_components,
nir_dest_bit_size(old_tex->dest), NULL);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
}
struct swizzle_info {
unsigned plane[4];
unsigned swizzle[4];
};
static struct swizzle_info
get_plane_swizzles(VkFormat format)
{
int planes = vk_format_get_plane_count(format);
switch (planes) {
case 3:
return (struct swizzle_info){{2, 0, 1, 0}, {0, 0, 0, 3}};
case 2:
return (struct swizzle_info){{1, 0, 1, 0}, {1, 0, 0, 3}};
case 1:
return (struct swizzle_info){{0, 0, 0, 0}, {0, 1, 2, 3}};
default:
unreachable("unhandled plane count for ycbcr swizzling");
}
}
static nir_ssa_def *
build_swizzled_components(nir_builder *builder, VkFormat format, VkComponentMapping mapping,
nir_ssa_def **plane_values)
{
struct swizzle_info plane_swizzle = get_plane_swizzles(format);
enum pipe_swizzle swizzles[4];
nir_ssa_def *values[4];
vk_format_compose_swizzles(&mapping, (const unsigned char[4]){0, 1, 2, 3}, swizzles);
nir_ssa_def *zero = nir_imm_float(builder, 0.0f);
nir_ssa_def *one = nir_imm_float(builder, 1.0f);
for (unsigned i = 0; i < 4; ++i) {
switch (swizzles[i]) {
case PIPE_SWIZZLE_X:
case PIPE_SWIZZLE_Y:
case PIPE_SWIZZLE_Z:
case PIPE_SWIZZLE_W: {
unsigned channel = swizzles[i] - PIPE_SWIZZLE_X;
values[i] = nir_channel(builder, plane_values[plane_swizzle.plane[channel]],
plane_swizzle.swizzle[channel]);
break;
}
case PIPE_SWIZZLE_0:
values[i] = zero;
break;
case PIPE_SWIZZLE_1:
values[i] = one;
break;
default:
unreachable("unhandled swizzle");
}
}
return nir_vec(builder, values, 4);
}
static bool
try_lower_tex_ycbcr(const struct radv_pipeline_layout *layout, nir_builder *builder,
nir_tex_instr *tex)
{
int deref_src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
assert(deref_src_idx >= 0);
nir_deref_instr *deref = nir_src_as_deref(tex->src[deref_src_idx].src);
nir_variable *var = nir_deref_instr_get_variable(deref);
const struct radv_descriptor_set_layout *set_layout =
layout->set[var->data.descriptor_set].layout;
const struct radv_descriptor_set_binding_layout *binding =
&set_layout->binding[var->data.binding];
const struct radv_sampler_ycbcr_conversion_state *ycbcr_samplers =
radv_immutable_ycbcr_samplers(set_layout, var->data.binding);
if (!ycbcr_samplers)
return false;
assert(binding->immutable_samplers_offset);
const uint32_t *immutable_samplers =
radv_immutable_samplers(set_layout, binding);
/* For the following instructions, we don't apply any change and let the
* instruction apply to the first plane.
*/
if (tex->op == nir_texop_txs || tex->op == nir_texop_query_levels || tex->op == nir_texop_lod)
return false;
assert(tex->texture_index == 0);
unsigned array_index = 0;
if (deref->deref_type != nir_deref_type_var) {
assert(deref->deref_type == nir_deref_type_array);
if (!nir_src_is_const(deref->arr.index))
return false;
array_index = nir_src_as_uint(deref->arr.index);
array_index = MIN2(array_index, binding->array_size - 1);
}
const struct radv_sampler_ycbcr_conversion_state *ycbcr_sampler = ycbcr_samplers + array_index;
if (ycbcr_sampler->format == VK_FORMAT_UNDEFINED)
return false;
bool unnormalized_coordinates = immutable_samplers[4 * array_index + 0] & S_008F30_FORCE_UNNORMALIZED(1);
struct ycbcr_state state = {
.builder = builder,
.origin_tex = tex,
.tex_deref = deref,
.conversion = ycbcr_sampler,
.unnormalized_coordinates = unnormalized_coordinates,
};
builder->cursor = nir_before_instr(&tex->instr);
VkFormat format = state.conversion->format;
const int plane_count = vk_format_get_plane_count(format);
nir_ssa_def *plane_values[3];
for (int p = 0; p < plane_count; ++p) {
plane_values[p] = create_plane_tex_instr_implicit(&state, p);
}
nir_ssa_def *result =
build_swizzled_components(builder, format, ycbcr_sampler->components, plane_values);
if (state.conversion->ycbcr_model != VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) {
VkFormat first_format = vk_format_get_plane_format(format, 0);
uint32_t bits =
vk_format_get_component_bits(first_format, UTIL_FORMAT_COLORSPACE_RGB, PIPE_SWIZZLE_X);
/* TODO: swizzle and bpcs */
uint32_t bpcs[3] = {bits, bits, bits};
result = nir_convert_ycbcr_to_rgb(builder, state.conversion->ycbcr_model,
state.conversion->ycbcr_range, result, bpcs);
}
nir_ssa_def_rewrite_uses(&tex->dest.ssa, result);
nir_instr_remove(&tex->instr);
return true;
}
static bool
radv_nir_lower_ycbcr_textures_instr(nir_builder *b, nir_instr *instr, void *layout)
{
if (instr->type != nir_instr_type_tex)
return false;
nir_tex_instr *tex = nir_instr_as_tex(instr);
return try_lower_tex_ycbcr(layout, b, tex);
}
bool
radv_nir_lower_ycbcr_textures(nir_shader *shader, const struct radv_pipeline_layout *layout)
{
return nir_shader_instructions_pass(shader,
radv_nir_lower_ycbcr_textures_instr,
nir_metadata_block_index |
nir_metadata_dominance,
(void *)layout);
}

View file

@ -27,6 +27,7 @@
#include "nir/nir.h"
#include "nir/nir_builder.h"
#include "nir/nir_vulkan.h"
#include "spirv/nir_spirv.h"
#include "util/disk_cache.h"
#include "util/mesa-sha1.h"
@ -3192,6 +3193,21 @@ radv_pipeline_load_retained_shaders(struct radv_graphics_pipeline *pipeline,
}
}
static const struct vk_ycbcr_conversion_state *
ycbcr_conversion_lookup(const void *data, uint32_t set, uint32_t binding, uint32_t array_index)
{
const struct radv_pipeline_layout *layout = data;
const struct radv_descriptor_set_layout *set_layout = layout->set[set].layout;
const struct vk_ycbcr_conversion_state *ycbcr_samplers =
radv_immutable_ycbcr_samplers(set_layout, binding);
if (!ycbcr_samplers)
return NULL;
return ycbcr_samplers + array_index;
}
static void
radv_postprocess_nir(struct radv_pipeline *pipeline,
const struct radv_pipeline_layout *pipeline_layout,
@ -3201,6 +3217,7 @@ radv_postprocess_nir(struct radv_pipeline *pipeline,
{
struct radv_device *device = pipeline->device;
enum amd_gfx_level gfx_level = device->physical_device->rad_info.gfx_level;
bool progress;
/* Wave and workgroup size should already be filled. */
assert(stage->info.wave_size && stage->info.workgroup_size);
@ -3254,7 +3271,7 @@ radv_postprocess_nir(struct radv_pipeline *pipeline,
}
if (!pipeline_key->optimisations_disabled) {
bool progress = false;
progress = false;
NIR_PASS(progress, stage->nir, nir_opt_load_store_vectorize, &vectorize_opts);
if (progress) {
NIR_PASS(_, stage->nir, nir_copy_prop);
@ -3266,7 +3283,11 @@ radv_postprocess_nir(struct radv_pipeline *pipeline,
}
}
NIR_PASS(_, stage->nir, radv_nir_lower_ycbcr_textures, pipeline_layout);
progress = false;
NIR_PASS(progress, stage->nir, nir_vk_lower_ycbcr_tex, ycbcr_conversion_lookup, pipeline_layout);
/* Gather info in the case that nir_vk_lower_ycbcr_tex might have emitted resinfo instructions. */
if (progress)
nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
if (stage->nir->info.uses_resource_info_query)
NIR_PASS(_, stage->nir, ac_nir_lower_resinfo, gfx_level);

View file

@ -67,6 +67,7 @@
#include "vk_queue.h"
#include "vk_util.h"
#include "vk_image.h"
#include "vk_ycbcr_conversion.h"
#include "rmv/vk_rmv_common.h"
#include "rmv/vk_rmv_tokens.h"
@ -2736,21 +2737,6 @@ void radv_image_view_finish(struct radv_image_view *iview);
VkFormat radv_get_aspect_format(struct radv_image *image, VkImageAspectFlags mask);
struct radv_sampler_ycbcr_conversion_state {
VkFormat format;
VkSamplerYcbcrModelConversion ycbcr_model;
VkSamplerYcbcrRange ycbcr_range;
VkComponentMapping components;
VkChromaLocation chroma_offsets[2];
VkFilter chroma_filter;
};
struct radv_sampler_ycbcr_conversion {
struct vk_object_base base;
/* The state is hashed for the descriptor set layout. */
struct radv_sampler_ycbcr_conversion_state state;
};
struct radv_buffer_view {
struct vk_object_base base;
struct radeon_winsys_bo *bo;
@ -2773,7 +2759,7 @@ radv_image_extent_compare(const struct radv_image *image, const VkExtent3D *exte
struct radv_sampler {
struct vk_object_base base;
uint32_t state[4];
struct radv_sampler_ycbcr_conversion *ycbcr_sampler;
struct vk_ycbcr_conversion *ycbcr_sampler;
uint32_t border_color_slot;
};
@ -3511,9 +3497,6 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(radv_query_pool, base, VkQueryPool,
VK_OBJECT_TYPE_QUERY_POOL)
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler, base, VkSampler,
VK_OBJECT_TYPE_SAMPLER)
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_sampler_ycbcr_conversion, base,
VkSamplerYcbcrConversion,
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION)
#ifdef __cplusplus
}

View file

@ -532,7 +532,6 @@ struct radv_pipeline_layout;
void radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively, bool allow_copies);
void radv_optimize_nir_algebraic(nir_shader *shader, bool opt_offsets);
bool radv_nir_lower_ycbcr_textures(nir_shader *shader, const struct radv_pipeline_layout *layout);
bool radv_nir_lower_ray_queries(nir_shader *shader, struct radv_device *device);