pvr: Store enum pvr_stage_allocation instead of VkShaderStageFlags

This commit changes the pipeline layout, desc. set layout,
and desc. set layout binding to keep track of shader stage usage
with a mask of enum pvr_stage_allocation instead of
VkShaderStageFlags.

This commit also makes renames the relevant fields to
'shader_stage_mask' to make the naming uniform across stucts.

Signed-off-by: Karmjit Mahil <Karmjit.Mahil@imgtec.com>
Reviewed-by: Frank Binns <frank.binns@imgtec.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21331>
This commit is contained in:
Karmjit Mahil 2022-12-06 15:34:40 +00:00 committed by Marge Bot
parent 15f1478eb9
commit b7f8a120bf
2 changed files with 27 additions and 38 deletions

View file

@ -171,27 +171,22 @@ static void pvr_descriptor_size_info_init(
}
}
static bool pvr_stage_matches_vk_flags(enum pvr_stage_allocation pvr_stage,
VkShaderStageFlags flags)
static uint8_t vk_to_pvr_shader_stage_flags(VkShaderStageFlags vk_flags)
{
VkShaderStageFlags flags_per_stage;
uint8_t flags = 0;
switch (pvr_stage) {
case PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY:
flags_per_stage = VK_SHADER_STAGE_VERTEX_BIT |
VK_SHADER_STAGE_GEOMETRY_BIT;
break;
case PVR_STAGE_ALLOCATION_FRAGMENT:
flags_per_stage = VK_SHADER_STAGE_FRAGMENT_BIT;
break;
case PVR_STAGE_ALLOCATION_COMPUTE:
flags_per_stage = VK_SHADER_STAGE_COMPUTE_BIT;
break;
default:
unreachable("Unrecognized allocation stage.");
}
static_assert(PVR_STAGE_ALLOCATION_COUNT <= 8, "Not enough bits for flags.");
return !!(flags_per_stage & flags);
if (vk_flags & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT))
flags |= BITFIELD_BIT(PVR_STAGE_ALLOCATION_VERTEX_GEOMETRY);
if (vk_flags & VK_SHADER_STAGE_FRAGMENT_BIT)
flags |= BITFIELD_BIT(PVR_STAGE_ALLOCATION_FRAGMENT);
if (vk_flags & VK_SHADER_STAGE_COMPUTE_BIT)
flags |= BITFIELD_BIT(PVR_STAGE_ALLOCATION_COMPUTE);
return flags;
}
/* If allocator == NULL, the internal one will be used. */
@ -506,7 +501,7 @@ VkResult pvr_CreateDescriptorSetLayout(
const VkDescriptorSetLayoutBinding *const binding = &bindings[bind_num];
struct pvr_descriptor_set_layout_binding *const internal_binding =
&layout->bindings[bind_num];
VkShaderStageFlags shader_stages = 0;
uint8_t shader_stages = 0;
internal_binding->type = binding->descriptorType;
/* The binding_numbers can be non-contiguous so we ignore the user
@ -522,7 +517,7 @@ VkResult pvr_CreateDescriptorSetLayout(
* So do not use bindings->stageFlags, use shader_stages instead.
*/
if (binding->descriptorCount) {
shader_stages = binding->stageFlags;
shader_stages = vk_to_pvr_shader_stage_flags(binding->stageFlags);
internal_binding->descriptor_count = binding->descriptorCount;
internal_binding->descriptor_index = layout->descriptor_count;
@ -572,19 +567,17 @@ VkResult pvr_CreateDescriptorSetLayout(
if (!shader_stages)
continue;
internal_binding->shader_stages = shader_stages;
layout->shader_stages |= shader_stages;
internal_binding->shader_stage_mask = shader_stages;
layout->shader_stage_mask |= shader_stages;
for (uint32_t stage = 0;
stage < ARRAY_SIZE(layout->bindings[0].per_stage_offset_in_dwords);
stage++) {
const VkDescriptorType descriptor_type = binding->descriptorType;
if (!pvr_stage_matches_vk_flags(stage, shader_stages))
if (!(shader_stages & BITFIELD_BIT(stage)))
continue;
internal_binding->shader_stage_mask |= (1U << stage);
/* We allocate dynamics primary and secondaries separately so that we
* can do a partial update of USC shared registers by just DMAing the
* dynamic section and not having to re-DMA everything again.
@ -635,10 +628,8 @@ VkResult pvr_CreateDescriptorSetLayout(
stage < ARRAY_SIZE(layout->bindings[0].per_stage_offset_in_dwords);
stage++) {
struct pvr_descriptor_size_info size_info;
const VkShaderStageFlags shader_stages =
internal_binding->shader_stages;
if (!pvr_stage_matches_vk_flags(stage, shader_stages))
if (!(internal_binding->shader_stage_mask & BITFIELD_BIT(stage)))
continue;
pvr_descriptor_size_info_init(device, descriptor_type, &size_info);
@ -879,7 +870,7 @@ VkResult pvr_CreatePipelineLayout(VkDevice _device,
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
layout->set_count = pCreateInfo->setLayoutCount;
layout->shader_stages = 0;
layout->shader_stage_mask = 0;
for (uint32_t stage = 0; stage < PVR_STAGE_ALLOCATION_COUNT; stage++) {
uint32_t descriptor_counts
[PVR_PIPELINE_LAYOUT_SUPPORTED_DESCRIPTOR_TYPE_COUNT] = { 0 };
@ -898,7 +889,7 @@ VkResult pvr_CreatePipelineLayout(VkDevice _device,
pCreateInfo->pSetLayouts[set_num]);
layout->set_layout[set_num] = set_layout;
layout->shader_stages |= set_layout->shader_stages;
layout->shader_stage_mask |= set_layout->shader_stage_mask;
}
const struct pvr_descriptor_set_layout_mem_layout *const mem_layout =

View file

@ -512,12 +512,8 @@ struct pvr_descriptor_set_layout_binding {
/* Index into the flattened descriptor set */
uint16_t descriptor_index;
VkShaderStageFlags shader_stages;
/* Mask composed by shifted PVR_STAGE_ALLOCATION_...
* Makes it easier to check active shader stages by just shifting and
* ANDing instead of using VkShaderStageFlags and match the PVR_STAGE_...
*/
uint32_t shader_stage_mask;
/* Mask of enum pvr_stage_allocation. */
uint8_t shader_stage_mask;
struct {
uint32_t primary;
@ -559,7 +555,8 @@ struct pvr_descriptor_set_layout {
const struct pvr_sampler **immutable_samplers;
/* Shader stages requiring access to descriptors in this set. */
VkShaderStageFlags shader_stages;
/* Mask of enum pvr_stage_allocation. */
uint8_t shader_stage_mask;
/* Count of each VkDescriptorType per shader stage. Dynamically allocated
* arrays per stage as to not hard code the max descriptor type here.
@ -1037,7 +1034,8 @@ struct pvr_pipeline_layout {
VkShaderStageFlags push_constants_shader_stages;
VkShaderStageFlags shader_stages;
/* Mask of enum pvr_stage_allocation. */
uint8_t shader_stage_mask;
/* Per stage masks indicating which set in the layout contains any
* descriptor of the appropriate types: VK..._{SAMPLER, SAMPLED_IMAGE,