v3dv/cmd_buffer: move variant checking to CmdDraw

In order to properly check (and possibly compile) shader variants we
need a pipeline and a compatible descriptor set. So far we were trying
to do that check as early as possible, so we were trying to do it at
CmdBindPipeline or CmdBindDescriptorSets, and a combination of dirty
flags. This showed to not cover all the corners cases, and made the
code complex, as needed to handle cases where the descriptors were not
yet available, and return early. The latter also meant that we were
running several checks that failed in the middle.

This commit moves the variant check to CmdDraw, when we should have a
pipeline and compatible descriptor sets, and simplifies and makes more
strict the existing code.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6766>
This commit is contained in:
Alejandro Piñeiro 2020-05-21 15:18:44 +02:00 committed by Marge Bot
parent 57a254c48d
commit 03f5fae88f
3 changed files with 56 additions and 113 deletions

View file

@ -2110,15 +2110,8 @@ job_update_ez_state(struct v3dv_job *job, struct v3dv_pipeline *pipeline)
* the v3d_fs_key. Here we just fill-up cmd_buffer specific info. All info
* coming from the pipeline create info was alredy filled up when the pipeline
* was created
*
* It also returns if it was able to populate the info based on the descriptor
* info. Note that returning false is a possible valid outcome, that could
* happens if the descriptors are being bound with more than one
* CmdBindDescriptorSet call (and this is needed in some cases, like if you
* are skipping descriptor sets). If that is the case we just stop trying
* getting the variant.
*/
static bool
static void
cmd_buffer_populate_v3d_key(struct v3d_key *key,
struct v3dv_cmd_buffer *cmd_buffer)
{
@ -2144,9 +2137,7 @@ cmd_buffer_populate_v3d_key(struct v3d_key *key,
cmd_buffer->state.pipeline->layout,
texture_idx);
if (image_view == NULL)
return false;
assert(image_view);
const struct v3dv_sampler *sampler = NULL;
if (sampler_idx != V3DV_NO_SAMPLER_IDX) {
@ -2155,8 +2146,7 @@ cmd_buffer_populate_v3d_key(struct v3d_key *key,
sampler_map,
cmd_buffer->state.pipeline->layout,
sampler_idx);
if (sampler == NULL)
return false;
assert(sampler);
}
key->tex[combined_idx].return_size =
@ -2176,11 +2166,9 @@ cmd_buffer_populate_v3d_key(struct v3d_key *key,
*/
}
}
return true;
}
static bool
static void
update_fs_variant(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_shader_variant *variant;
@ -2190,78 +2178,65 @@ update_fs_variant(struct v3dv_cmd_buffer *cmd_buffer)
/* We start with a copy of the original pipeline key */
memcpy(&local_key, &p_stage->key.fs, sizeof(struct v3d_fs_key));
if (cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer)) {
VkResult vk_result;
variant = v3dv_get_shader_variant(p_stage, &local_key.base,
sizeof(struct v3d_fs_key),
&cmd_buffer->device->alloc,
&vk_result);
/* At this point we are not creating a vulkan object to return to the
* API user, so we can't really return back a OOM error
*/
assert(variant);
cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer);
if (p_stage->current_variant != variant) {
p_stage->current_variant = variant;
}
} else {
return false;
}
VkResult vk_result;
variant = v3dv_get_shader_variant(p_stage, &local_key.base,
sizeof(struct v3d_fs_key),
&cmd_buffer->device->alloc,
&vk_result);
/* At this point we are not creating a vulkan object to return to the
* API user, so we can't really return back a OOM error
*/
assert(variant);
assert(vk_result == VK_SUCCESS);
return true;
p_stage->current_variant = variant;
}
static bool
static void
update_vs_variant(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_shader_variant *variant;
struct v3dv_pipeline_stage *p_stage = cmd_buffer->state.pipeline->vs;
struct v3dv_pipeline_stage *p_stage;
struct v3d_vs_key local_key;
VkResult vk_result;
/* We start with a copy of the original pipeline key */
p_stage = cmd_buffer->state.pipeline->vs;
memcpy(&local_key, &p_stage->key.vs, sizeof(struct v3d_vs_key));
if (cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer)) {
VkResult vk_result;
variant = v3dv_get_shader_variant(p_stage, &local_key.base,
sizeof(struct v3d_vs_key),
&cmd_buffer->device->alloc,
&vk_result);
/* At this point we are not creating a vulkan object to return to the
* API user, so we can't really return back a OOM error
*/
assert(variant);
cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer);
if (p_stage->current_variant != variant) {
p_stage->current_variant = variant;
}
} else {
return false;
}
variant = v3dv_get_shader_variant(p_stage, &local_key.base,
sizeof(struct v3d_vs_key),
&cmd_buffer->device->alloc,
&vk_result);
/* At this point we are not creating a vulkan object to return to the
* API user, so we can't really return back a OOM error
*/
assert(variant);
assert(vk_result == VK_SUCCESS);
p_stage->current_variant = variant;
/* Now the vs_bin */
p_stage = cmd_buffer->state.pipeline->vs_bin;
memcpy(&local_key, &p_stage->key.vs, sizeof(struct v3d_vs_key));
if (cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer)) {
VkResult vk_result;
variant = v3dv_get_shader_variant(p_stage, &local_key.base,
sizeof(struct v3d_vs_key),
&cmd_buffer->device->alloc,
&vk_result);
cmd_buffer_populate_v3d_key(&local_key.base, cmd_buffer);
variant = v3dv_get_shader_variant(p_stage, &local_key.base,
sizeof(struct v3d_vs_key),
&cmd_buffer->device->alloc,
&vk_result);
/* At this point we are not creating a vulkan object to return to the
* API user, so we can't really return back a OOM error
*/
assert(variant);
/* At this point we are not creating a vulkan object to return to the
* API user, so we can't really return back a OOM error
*/
assert(variant);
assert(vk_result == VK_SUCCESS);
if (p_stage->current_variant != variant) {
p_stage->current_variant = variant;
}
} else {
return false;
}
return true;
p_stage->current_variant = variant;
}
/*
@ -2271,18 +2246,14 @@ update_vs_variant(struct v3dv_cmd_buffer *cmd_buffer)
* re-create the v3d_keys and update the variant. Note that internally the
* pipeline has a variant cache (hash table) to avoid unneeded compilations
*
* Returns if it was able to go to the end of the update variants
* process. Note that this is not the same that getting a new variant or
* not. If at this moment we don't have all the descriptors bound, we can't
* check for a new variant, and the SHADER_VARIANTS flag needs to keep dirty.
*/
static bool
static void
update_pipeline_variants(struct v3dv_cmd_buffer *cmd_buffer)
{
assert(cmd_buffer->state.pipeline);
return (update_fs_variant(cmd_buffer) ||
update_vs_variant(cmd_buffer));
update_fs_variant(cmd_buffer);
update_vs_variant(cmd_buffer);
}
static void
@ -2316,11 +2287,6 @@ bind_graphics_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
cmd_buffer_bind_pipeline_static_state(cmd_buffer, &pipeline->dynamic_state);
if (cmd_buffer->state.dirty & V3DV_CMD_DIRTY_SHADER_VARIANTS) {
if (update_pipeline_variants(cmd_buffer))
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_SHADER_VARIANTS;
}
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_PIPELINE;
}
@ -3211,6 +3177,7 @@ cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer)
V3DV_CMD_DIRTY_VERTEX_BUFFER |
V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
V3DV_CMD_DIRTY_PUSH_CONSTANTS)) {
update_pipeline_variants(cmd_buffer);
emit_gl_shader_state(cmd_buffer);
}
@ -3571,12 +3538,6 @@ v3dv_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
}
}
if (cmd_buffer->state.pipeline) {
update_pipeline_variants(cmd_buffer);
} else {
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_SHADER_VARIANTS;
}
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_DESCRIPTOR_SETS;
}

View file

@ -41,12 +41,6 @@ descriptor_type_is_dynamic(VkDescriptorType type)
/*
* Tries to get a real descriptor using a descriptor map index from the
* descriptor_state + pipeline_layout.
*
* Note that it is possible to get a NULL. This could happens if not all the
* needed descriptors are bound yet (this can happens while checking for
* variants). Caller should decide if getting a NULL descriptor is a valid
* outcome at the context or not.
*
*/
struct v3dv_descriptor *
v3dv_descriptor_map_get_descriptor(struct v3dv_descriptor_state *descriptor_state,
@ -58,16 +52,11 @@ v3dv_descriptor_map_get_descriptor(struct v3dv_descriptor_state *descriptor_stat
assert(index >= 0 && index < map->num_desc);
uint32_t set_number = map->set[index];
if (!(descriptor_state->valid & 1 << set_number)) {
return NULL;
}
assert((descriptor_state->valid & 1 << set_number));
struct v3dv_descriptor_set *set =
descriptor_state->descriptor_sets[set_number];
if (set == NULL)
return NULL;
assert(set);
uint32_t binding_number = map->binding[index];
assert(binding_number < set->layout->binding_count);
@ -104,15 +93,11 @@ v3dv_descriptor_map_get_sampler(struct v3dv_descriptor_state *descriptor_state,
assert(index >= 0 && index < map->num_desc);
uint32_t set_number = map->set[index];
if (!(descriptor_state->valid & 1 << set_number)) {
return NULL;
}
assert(descriptor_state->valid & 1 << set_number);
struct v3dv_descriptor_set *set =
descriptor_state->descriptor_sets[set_number];
if (set == NULL)
return NULL;
assert(set);
uint32_t binding_number = map->binding[index];
assert(binding_number < set->layout->binding_count);
@ -160,9 +145,7 @@ v3dv_descriptor_map_get_image_view(struct v3dv_descriptor_state *descriptor_stat
pipeline_layout,
index, NULL);
if (image_descriptor == NULL)
return NULL;
assert(image_descriptor);
assert(image_descriptor->type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
image_descriptor->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
assert(image_descriptor->image_view);

View file

@ -595,10 +595,9 @@ enum v3dv_cmd_dirty_bits {
V3DV_CMD_DIRTY_DESCRIPTOR_SETS = 1 << 7,
V3DV_CMD_DIRTY_PUSH_CONSTANTS = 1 << 8,
V3DV_CMD_DIRTY_BLEND_CONSTANTS = 1 << 9,
V3DV_CMD_DIRTY_SHADER_VARIANTS = 1 << 10,
V3DV_CMD_DIRTY_OCCLUSION_QUERY = 1 << 11,
V3DV_CMD_DIRTY_DEPTH_BIAS = 1 << 12,
V3DV_CMD_DIRTY_LINE_WIDTH = 1 << 13,
V3DV_CMD_DIRTY_OCCLUSION_QUERY = 1 << 10,
V3DV_CMD_DIRTY_DEPTH_BIAS = 1 << 11,
V3DV_CMD_DIRTY_LINE_WIDTH = 1 << 12,
};
struct v3dv_dynamic_state {