mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-20 04:30:22 +01:00
radv: fix skipping on-disk shaders cache when not useful
This was just broken because individual shaders were still stored on-disk in many situations: - for shader object, all compute/graphics shaders were stored - for fast-GPL, graphics shaders were stored - for pipeline binaries, when the create flag was used - for rt capture/replay and ray history This should stop storing unused binaries on-disk and save space. Found this by inspection. Cc: mesa-stable Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32227>
This commit is contained in:
parent
9c1de5c6b3
commit
08c9dca8db
6 changed files with 39 additions and 31 deletions
|
|
@ -89,7 +89,7 @@ radv_compute_pipeline_init(struct radv_compute_pipeline *pipeline, const struct
|
|||
|
||||
struct radv_shader *
|
||||
radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, struct radv_shader_stage *cs_stage,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool is_internal,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool is_internal, bool skip_shaders_cache,
|
||||
struct radv_shader_binary **cs_binary)
|
||||
{
|
||||
struct radv_physical_device *pdev = radv_device_physical(device);
|
||||
|
|
@ -137,7 +137,7 @@ radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, str
|
|||
*cs_binary =
|
||||
radv_shader_nir_to_asm(device, cs_stage, &cs_stage->nir, 1, NULL, keep_executable_info, keep_statistic_info);
|
||||
|
||||
cs_shader = radv_shader_create(device, cache, *cs_binary, keep_executable_info || dump_shader);
|
||||
cs_shader = radv_shader_create(device, cache, *cs_binary, skip_shaders_cache || dump_shader);
|
||||
|
||||
cs_shader->nir_string = nir_string;
|
||||
|
||||
|
|
@ -223,8 +223,9 @@ radv_compute_pipeline_compile(const VkComputePipelineCreateInfo *pCreateInfo, st
|
|||
|
||||
radv_pipeline_stage_init(pipeline->base.create_flags, pStage, pipeline_layout, &stage_key, &cs_stage);
|
||||
|
||||
pipeline->base.shaders[MESA_SHADER_COMPUTE] = radv_compile_cs(
|
||||
device, cache, &cs_stage, keep_executable_info, keep_statistic_info, pipeline->base.is_internal, &cs_binary);
|
||||
pipeline->base.shaders[MESA_SHADER_COMPUTE] =
|
||||
radv_compile_cs(device, cache, &cs_stage, keep_executable_info, keep_statistic_info, pipeline->base.is_internal,
|
||||
skip_shaders_cache, &cs_binary);
|
||||
|
||||
cs_stage.feedback.duration += os_time_get_nano() - stage_start;
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,8 @@ void radv_compute_pipeline_init(struct radv_compute_pipeline *pipeline, const st
|
|||
|
||||
struct radv_shader *radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
struct radv_shader_stage *cs_stage, bool keep_executable_info,
|
||||
bool keep_statistic_info, bool is_internal, struct radv_shader_binary **cs_binary);
|
||||
bool keep_statistic_info, bool is_internal, bool skip_shaders_cache,
|
||||
struct radv_shader_binary **cs_binary);
|
||||
|
||||
VkResult radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache,
|
||||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
|
|
|
|||
|
|
@ -2224,7 +2224,7 @@ radv_declare_pipeline_args(struct radv_device *device, struct radv_shader_stage
|
|||
static struct radv_shader *
|
||||
radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
struct radv_shader_stage *gs_stage, const struct radv_graphics_state_key *gfx_state,
|
||||
bool keep_executable_info, bool keep_statistic_info,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool skip_shaders_cache,
|
||||
struct radv_shader_binary **gs_copy_binary)
|
||||
{
|
||||
const struct radv_physical_device *pdev = radv_device_physical(device);
|
||||
|
|
@ -2284,7 +2284,7 @@ radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache
|
|||
*gs_copy_binary = radv_shader_nir_to_asm(device, &gs_copy_stage, &nir, 1, &key.gfx_state, keep_executable_info,
|
||||
keep_statistic_info);
|
||||
struct radv_shader *copy_shader =
|
||||
radv_shader_create(device, cache, *gs_copy_binary, keep_executable_info || dump_shader);
|
||||
radv_shader_create(device, cache, *gs_copy_binary, skip_shaders_cache || dump_shader);
|
||||
|
||||
if (copy_shader) {
|
||||
copy_shader->nir_string = nir_string;
|
||||
|
|
@ -2301,7 +2301,7 @@ radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache
|
|||
static void
|
||||
radv_graphics_shaders_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
struct radv_shader_stage *stages, const struct radv_graphics_state_key *gfx_state,
|
||||
bool keep_executable_info, bool keep_statistic_info,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool skip_shaders_cache,
|
||||
VkShaderStageFlagBits active_nir_stages, struct radv_shader **shaders,
|
||||
struct radv_shader_binary **binaries, struct radv_shader **gs_copy_shader,
|
||||
struct radv_shader_binary **gs_copy_binary)
|
||||
|
|
@ -2357,7 +2357,7 @@ radv_graphics_shaders_nir_to_asm(struct radv_device *device, struct vk_pipeline_
|
|||
|
||||
binaries[s] = radv_shader_nir_to_asm(device, &stages[s], nir_shaders, shader_count, gfx_state,
|
||||
keep_executable_info, keep_statistic_info);
|
||||
shaders[s] = radv_shader_create(device, cache, binaries[s], keep_executable_info || dump_shader);
|
||||
shaders[s] = radv_shader_create(device, cache, binaries[s], skip_shaders_cache || dump_shader);
|
||||
|
||||
shaders[s]->nir_string = nir_string;
|
||||
|
||||
|
|
@ -2368,8 +2368,9 @@ radv_graphics_shaders_nir_to_asm(struct radv_device *device, struct vk_pipeline_
|
|||
simple_mtx_unlock(&instance->shader_dump_mtx);
|
||||
|
||||
if (s == MESA_SHADER_GEOMETRY && !stages[s].info.is_ngg) {
|
||||
*gs_copy_shader = radv_create_gs_copy_shader(device, cache, &stages[MESA_SHADER_GEOMETRY], gfx_state,
|
||||
keep_executable_info, keep_statistic_info, gs_copy_binary);
|
||||
*gs_copy_shader =
|
||||
radv_create_gs_copy_shader(device, cache, &stages[MESA_SHADER_GEOMETRY], gfx_state, keep_executable_info,
|
||||
keep_statistic_info, skip_shaders_cache, gs_copy_binary);
|
||||
}
|
||||
|
||||
stages[s].feedback.duration += os_time_get_nano() - stage_start;
|
||||
|
|
@ -2578,7 +2579,7 @@ void
|
|||
radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
struct radv_shader_stage *stages, const struct radv_graphics_state_key *gfx_state,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool is_internal,
|
||||
struct radv_retained_shaders *retained_shaders, bool noop_fs,
|
||||
bool skip_shaders_cache, struct radv_retained_shaders *retained_shaders, bool noop_fs,
|
||||
struct radv_shader **shaders, struct radv_shader_binary **binaries,
|
||||
struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary)
|
||||
{
|
||||
|
|
@ -2749,7 +2750,8 @@ radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cac
|
|||
|
||||
/* Compile NIR shaders to AMD assembly. */
|
||||
radv_graphics_shaders_nir_to_asm(device, cache, stages, gfx_state, keep_executable_info, keep_statistic_info,
|
||||
active_nir_stages, shaders, binaries, gs_copy_shader, gs_copy_binary);
|
||||
skip_shaders_cache, active_nir_stages, shaders, binaries, gs_copy_shader,
|
||||
gs_copy_binary);
|
||||
|
||||
if (keep_executable_info) {
|
||||
for (int i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i) {
|
||||
|
|
@ -2993,8 +2995,9 @@ radv_graphics_pipeline_compile(struct radv_graphics_pipeline *pipeline, const Vk
|
|||
const bool noop_fs = radv_pipeline_needs_noop_fs(pipeline, &gfx_state->key.gfx_state);
|
||||
|
||||
radv_graphics_shaders_compile(device, cache, stages, &gfx_state->key.gfx_state, keep_executable_info,
|
||||
keep_statistic_info, pipeline->base.is_internal, retained_shaders, noop_fs,
|
||||
pipeline->base.shaders, binaries, &pipeline->base.gs_copy_shader, &gs_copy_binary);
|
||||
keep_statistic_info, pipeline->base.is_internal, skip_shaders_cache, retained_shaders,
|
||||
noop_fs, pipeline->base.shaders, binaries, &pipeline->base.gs_copy_shader,
|
||||
&gs_copy_binary);
|
||||
|
||||
if (!skip_shaders_cache) {
|
||||
radv_pipeline_cache_insert(device, cache, &pipeline->base);
|
||||
|
|
|
|||
|
|
@ -593,8 +593,8 @@ struct radv_ps_epilog_key radv_generate_ps_epilog_key(const struct radv_device *
|
|||
void radv_graphics_shaders_compile(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
struct radv_shader_stage *stages, const struct radv_graphics_state_key *gfx_state,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool is_internal,
|
||||
struct radv_retained_shaders *retained_shaders, bool noop_fs,
|
||||
struct radv_shader **shaders, struct radv_shader_binary **binaries,
|
||||
bool skip_shaders_cache, struct radv_retained_shaders *retained_shaders,
|
||||
bool noop_fs, struct radv_shader **shaders, struct radv_shader_binary **binaries,
|
||||
struct radv_shader **gs_copy_shader, struct radv_shader_binary **gs_copy_binary);
|
||||
|
||||
struct radv_vgt_shader_key {
|
||||
|
|
|
|||
|
|
@ -360,7 +360,8 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
|
|||
bool monolithic, struct radv_shader_stage *stage, uint32_t *stack_size,
|
||||
struct radv_ray_tracing_stage_info *stage_info,
|
||||
const struct radv_ray_tracing_stage_info *traversal_stage_info,
|
||||
struct radv_serialized_shader_arena_block *replay_block, struct radv_shader **out_shader)
|
||||
struct radv_serialized_shader_arena_block *replay_block, bool skip_shaders_cache,
|
||||
struct radv_shader **out_shader)
|
||||
{
|
||||
struct radv_physical_device *pdev = radv_device_physical(device);
|
||||
struct radv_instance *instance = radv_physical_device_instance(pdev);
|
||||
|
|
@ -466,7 +467,7 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
|
|||
return result;
|
||||
}
|
||||
} else
|
||||
shader = radv_shader_create(device, cache, binary, keep_executable_info || dump_shader);
|
||||
shader = radv_shader_create(device, cache, binary, skip_shaders_cache || dump_shader);
|
||||
|
||||
if (shader) {
|
||||
shader->nir_string = nir_string;
|
||||
|
|
@ -583,7 +584,7 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
|
|||
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
|
||||
const VkPipelineCreationFeedbackCreateInfo *creation_feedback,
|
||||
const struct radv_shader_stage_key *stage_keys, struct radv_ray_tracing_pipeline *pipeline,
|
||||
struct radv_serialized_shader_arena_block *capture_replay_handles)
|
||||
struct radv_serialized_shader_arena_block *capture_replay_handles, bool skip_shaders_cache)
|
||||
{
|
||||
VK_FROM_HANDLE(radv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
|
||||
|
||||
|
|
@ -681,8 +682,9 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
|
|||
|
||||
bool monolithic_raygen = monolithic && stage->stage == MESA_SHADER_RAYGEN;
|
||||
|
||||
result = radv_rt_nir_to_asm(device, cache, pCreateInfo, pipeline, monolithic_raygen, stage, &stack_size,
|
||||
&rt_stages[idx].info, NULL, replay_block, &rt_stages[idx].shader);
|
||||
result =
|
||||
radv_rt_nir_to_asm(device, cache, pCreateInfo, pipeline, monolithic_raygen, stage, &stack_size,
|
||||
&rt_stages[idx].info, NULL, replay_block, skip_shaders_cache, &rt_stages[idx].shader);
|
||||
if (result != VK_SUCCESS)
|
||||
goto cleanup;
|
||||
|
||||
|
|
@ -739,8 +741,9 @@ radv_rt_compile_shaders(struct radv_device *device, struct vk_pipeline_cache *ca
|
|||
.key = stage_keys[MESA_SHADER_INTERSECTION],
|
||||
};
|
||||
radv_shader_layout_init(pipeline_layout, MESA_SHADER_INTERSECTION, &traversal_stage.layout);
|
||||
result = radv_rt_nir_to_asm(device, cache, pCreateInfo, pipeline, false, &traversal_stage, NULL, NULL,
|
||||
&traversal_info, NULL, &pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
|
||||
result =
|
||||
radv_rt_nir_to_asm(device, cache, pCreateInfo, pipeline, false, &traversal_stage, NULL, NULL, &traversal_info,
|
||||
NULL, skip_shaders_cache, &pipeline->base.base.shaders[MESA_SHADER_INTERSECTION]);
|
||||
ralloc_free(traversal_nir);
|
||||
|
||||
cleanup:
|
||||
|
|
@ -942,7 +945,7 @@ radv_rt_pipeline_compile(struct radv_device *device, const VkRayTracingPipelineC
|
|||
}
|
||||
|
||||
result = radv_rt_compile_shaders(device, cache, pCreateInfo, creation_feedback, rt_state->stage_keys, pipeline,
|
||||
capture_replay_blocks);
|
||||
capture_replay_blocks, skip_shaders_cache);
|
||||
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ radv_shader_object_init_graphics(struct radv_shader_object *shader_obj, struct r
|
|||
struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES] = {NULL};
|
||||
struct radv_shader_binary *binaries[MESA_VULKAN_SHADER_STAGES] = {NULL};
|
||||
|
||||
radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, false, false, false, NULL, false, shaders,
|
||||
radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, false, false, false, true, NULL, false, shaders,
|
||||
binaries, &shader_obj->gs.copy_shader, &shader_obj->gs.copy_binary);
|
||||
|
||||
shader = shaders[stage];
|
||||
|
|
@ -205,8 +205,8 @@ radv_shader_object_init_graphics(struct radv_shader_object *shader_obj, struct r
|
|||
radv_shader_stage_init(pCreateInfo, &stages[stage]);
|
||||
stages[stage].next_stage = next_stage;
|
||||
|
||||
radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, false, false, false, NULL, false, shaders,
|
||||
binaries, &shader_obj->gs.copy_shader, &shader_obj->gs.copy_binary);
|
||||
radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, false, false, false, true, NULL, false,
|
||||
shaders, binaries, &shader_obj->gs.copy_shader, &shader_obj->gs.copy_binary);
|
||||
|
||||
shader = shaders[stage];
|
||||
binary = binaries[stage];
|
||||
|
|
@ -251,7 +251,7 @@ radv_shader_object_init_compute(struct radv_shader_object *shader_obj, struct ra
|
|||
|
||||
radv_shader_stage_init(pCreateInfo, &stage);
|
||||
|
||||
struct radv_shader *cs_shader = radv_compile_cs(device, NULL, &stage, false, false, false, &cs_binary);
|
||||
struct radv_shader *cs_shader = radv_compile_cs(device, NULL, &stage, false, false, false, true, &cs_binary);
|
||||
|
||||
ralloc_free(stage.nir);
|
||||
|
||||
|
|
@ -512,8 +512,8 @@ radv_shader_object_create_linked(VkDevice _device, uint32_t createInfoCount, con
|
|||
struct radv_shader *gs_copy_shader = NULL;
|
||||
struct radv_shader_binary *gs_copy_binary = NULL;
|
||||
|
||||
radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, false, false, false, NULL, false, shaders, binaries,
|
||||
&gs_copy_shader, &gs_copy_binary);
|
||||
radv_graphics_shaders_compile(device, NULL, stages, &gfx_state, false, false, false, true, NULL, false, shaders,
|
||||
binaries, &gs_copy_shader, &gs_copy_binary);
|
||||
|
||||
for (unsigned i = 0; i < createInfoCount; i++) {
|
||||
const VkShaderCreateInfoEXT *pCreateInfo = &pCreateInfos[i];
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue