nvk: allocatable nvk_shaders

This will be needed later when the shader life cycle will be managed
by the pipeline cache

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25550>
This commit is contained in:
Thomas H.P. Andersen 2023-10-17 02:08:33 +02:00 committed by Marge Bot
parent 3e4411a1d3
commit ad802ae2f9
9 changed files with 55 additions and 26 deletions

View file

@ -537,8 +537,10 @@ nvk_CmdBindPipeline(VkCommandBuffer commandBuffer,
struct nvk_device *dev = nvk_cmd_buffer_device(cmd);
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (pipeline->shaders[s].info.slm_size)
nvk_device_ensure_slm(dev, pipeline->shaders[s].info.slm_size);
if(!pipeline->shaders[s])
continue;
if (pipeline->shaders[s]->info.slm_size)
nvk_device_ensure_slm(dev, pipeline->shaders[s]->info.slm_size);
}
switch (pipelineBindPoint) {

View file

@ -148,7 +148,7 @@ nvk_compute_local_size(struct nvk_cmd_buffer *cmd)
{
const struct nvk_compute_pipeline *pipeline = cmd->state.cs.pipeline;
const struct nvk_shader *shader =
&pipeline->base.shaders[MESA_SHADER_COMPUTE];
pipeline->base.shaders[MESA_SHADER_COMPUTE];
return shader->info.cs.local_size[0] *
shader->info.cs.local_size[1] *
@ -211,7 +211,7 @@ nvk_flush_compute_state(struct nvk_cmd_buffer *cmd,
}
const struct nvk_shader *shader =
&pipeline->base.shaders[MESA_SHADER_COMPUTE];
pipeline->base.shaders[MESA_SHADER_COMPUTE];
for (uint32_t c = 0; c < shader->cbuf_map.cbuf_count; c++) {
const struct nvk_cbuf *cbuf = &shader->cbuf_map.cbufs[c];

View file

@ -934,7 +934,7 @@ nvk_cmd_bind_graphics_pipeline(struct nvk_cmd_buffer *cmd,
* tessellation parameters at flush_ts_state, as the domain origin can be
* dynamic.
*/
if (nvk_shader_is_enabled(&pipeline->base.shaders[MESA_SHADER_TESS_EVAL])) {
if (nvk_shader_is_enabled(pipeline->base.shaders[MESA_SHADER_TESS_EVAL])) {
BITSET_SET(cmd->vk.dynamic_graphics_state.dirty,
MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN);
}
@ -1017,7 +1017,7 @@ nvk_flush_ts_state(struct nvk_cmd_buffer *cmd)
if (BITSET_TEST(dyn->dirty, MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN)) {
const struct nvk_graphics_pipeline *pipeline= cmd->state.gfx.pipeline;
const struct nvk_shader *shader =
&pipeline->base.shaders[MESA_SHADER_TESS_EVAL];
pipeline->base.shaders[MESA_SHADER_TESS_EVAL];
if (nvk_shader_is_enabled(shader)) {
enum nak_ts_prims prims = shader->info.ts.prims;
@ -1883,8 +1883,8 @@ nvk_flush_descriptors(struct nvk_cmd_buffer *cmd)
uint32_t root_cbuf_count = 0;
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
const struct nvk_shader *shader = &pipeline->base.shaders[stage];
if (shader->code_size == 0)
const struct nvk_shader *shader = pipeline->base.shaders[stage];
if (!shader || shader->code_size == 0)
continue;
uint32_t group = stage;
@ -1953,8 +1953,8 @@ nvk_flush_descriptors(struct nvk_cmd_buffer *cmd)
P_NV9097_SET_CONSTANT_BUFFER_SELECTOR_C(p, root_desc_addr);
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
const struct nvk_shader *shader = &pipeline->base.shaders[stage];
if (shader->code_size == 0)
const struct nvk_shader *shader = pipeline->base.shaders[stage];
if (!shader || shader->code_size == 0)
continue;
uint32_t group = stage;

View file

@ -184,7 +184,8 @@ nvk_compute_pipeline_create(struct nvk_device *dev,
if (result != VK_SUCCESS)
goto fail;
struct nvk_shader *shader = &pipeline->base.shaders[MESA_SHADER_COMPUTE];
struct nvk_shader *shader = nvk_shader_init(dev);
pipeline->base.shaders[MESA_SHADER_COMPUTE] = shader;
nvk_lower_nir(dev, nir, &robustness, false, pipeline_layout, shader);
@ -192,7 +193,7 @@ nvk_compute_pipeline_create(struct nvk_device *dev,
nvk_hash_shader(sha1, &pCreateInfo->stage, &robustness, false,
pipeline_layout, NULL);
result = nvk_compile_nir(pdev, nir, pipeline_flags, &robustness, NULL,
result = nvk_compile_nir(dev, nir, pipeline_flags, &robustness, NULL,
shader);
ralloc_free(nir);
if (result != VK_SUCCESS)

View file

@ -165,7 +165,6 @@ nvk_graphics_pipeline_create(struct nvk_device *dev,
VkPipeline *pPipeline)
{
VK_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
struct nvk_physical_device *pdev = nvk_device_physical(dev);
struct nvk_graphics_pipeline *pipeline;
VkResult result = VK_SUCCESS;
@ -217,9 +216,11 @@ nvk_graphics_pipeline_create(struct nvk_device *dev,
if (sinfo == NULL)
continue;
pipeline->base.shaders[stage] = nvk_shader_init(dev);
nvk_lower_nir(dev, nir[stage], &robustness[stage],
state.rp->view_mask != 0, pipeline_layout,
&pipeline->base.shaders[stage]);
pipeline->base.shaders[stage]);
}
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
@ -238,14 +239,14 @@ nvk_graphics_pipeline_create(struct nvk_device *dev,
state.rp->view_mask != 0,
pipeline_layout, fs_key);
result = nvk_compile_nir(pdev, nir[stage], pipeline_flags,
result = nvk_compile_nir(dev, nir[stage], pipeline_flags,
&robustness[stage], fs_key,
&pipeline->base.shaders[stage]);
pipeline->base.shaders[stage]);
ralloc_free(nir[stage]);
if (result != VK_SUCCESS)
goto fail;
result = nvk_shader_upload(dev, &pipeline->base.shaders[stage]);
result = nvk_shader_upload(dev, pipeline->base.shaders[stage]);
if (result != VK_SUCCESS)
goto fail;
}
@ -258,7 +259,7 @@ nvk_graphics_pipeline_create(struct nvk_device *dev,
struct nvk_shader *last_geom = NULL;
for (gl_shader_stage stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
struct nvk_shader *shader = &pipeline->base.shaders[stage];
struct nvk_shader *shader = pipeline->base.shaders[stage];
uint32_t idx = mesa_to_nv9097_shader_type[stage];
P_IMMD(p, NV9097, SET_PIPELINE_SHADER(idx), {

View file

@ -33,7 +33,7 @@ nvk_pipeline_free(struct nvk_device *dev,
const VkAllocationCallbacks *pAllocator)
{
for (uint32_t s = 0; s < ARRAY_SIZE(pipeline->shaders); s++)
nvk_shader_finish(dev, &pipeline->shaders[s]);
nvk_shader_finish(dev, pipeline->shaders[s]);
vk_object_free(&dev->vk, pAllocator, pipeline);
}
@ -131,7 +131,8 @@ nvk_GetPipelineExecutablePropertiesKHR(
pProperties, pExecutableCount);
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
if (pipeline->shaders[stage].code_size == 0)
const struct nvk_shader *shader = pipeline->shaders[stage];
if (!shader || shader->code_size == 0)
continue;
vk_outarray_append_typed(VkPipelineExecutablePropertiesKHR, &out, props) {
@ -150,11 +151,12 @@ static struct nvk_shader *
shader_for_exe_idx(struct nvk_pipeline *pipeline, uint32_t idx)
{
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
if (pipeline->shaders[stage].code_size == 0)
const struct nvk_shader *shader = pipeline->shaders[stage];
if (!shader || shader->code_size == 0)
continue;
if (idx == 0)
return &pipeline->shaders[stage];
return pipeline->shaders[stage];
idx--;
}

View file

@ -23,7 +23,7 @@ struct nvk_pipeline {
enum nvk_pipeline_type type;
struct nvk_shader shaders[MESA_SHADER_STAGES];
struct nvk_shader *shaders[MESA_SHADER_STAGES];
};
VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_pipeline, base, VkPipeline,

View file

@ -438,13 +438,28 @@ nvk_compile_nir_with_nak(struct nvk_physical_device *pdev,
return VK_SUCCESS;
}
struct nvk_shader *
nvk_shader_init(struct nvk_device *dev)
{
VK_MULTIALLOC(ma);
VK_MULTIALLOC_DECL(&ma, struct nvk_shader, shader, 1);
if (!vk_multialloc_zalloc(&ma, &dev->vk.alloc,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
return NULL;
return shader;
}
VkResult
nvk_compile_nir(struct nvk_physical_device *pdev, nir_shader *nir,
nvk_compile_nir(struct nvk_device *dev, nir_shader *nir,
VkPipelineCreateFlagBits2KHR pipeline_flags,
const struct vk_pipeline_robustness_state *rs,
const struct nak_fs_key *fs_key,
struct nvk_shader *shader)
{
struct nvk_physical_device *pdev = nvk_device_physical(dev);
if (use_nak(pdev, nir->info.stage)) {
return nvk_compile_nir_with_nak(pdev, nir, pipeline_flags, rs,
fs_key, shader);
@ -505,6 +520,9 @@ nvk_shader_upload(struct nvk_device *dev, struct nvk_shader *shader)
void
nvk_shader_finish(struct nvk_device *dev, struct nvk_shader *shader)
{
if (shader == NULL)
return;
if (shader->upload_size > 0) {
nvk_heap_free(dev, &dev->shader_heap,
shader->upload_addr,
@ -517,6 +535,8 @@ nvk_shader_finish(struct nvk_device *dev, struct nvk_shader *shader)
/* This came from codegen, just free it */
free((void *)shader->code_ptr);
}
vk_free(&dev->vk.alloc, shader);
}
void

View file

@ -67,7 +67,7 @@ nvk_shader_address(const struct nvk_shader *shader)
static inline bool
nvk_shader_is_enabled(const struct nvk_shader *shader)
{
return shader->upload_size > 0;
return shader && shader->upload_size > 0;
}
VkShaderStageFlags nvk_nak_stages(const struct nv_device_info *info);
@ -118,7 +118,7 @@ nvk_lower_nir(struct nvk_device *dev, nir_shader *nir,
struct nvk_shader *shader);
VkResult
nvk_compile_nir(struct nvk_physical_device *dev, nir_shader *nir,
nvk_compile_nir(struct nvk_device *dev, nir_shader *nir,
VkPipelineCreateFlagBits2KHR pipeline_flags,
const struct vk_pipeline_robustness_state *rstate,
const struct nak_fs_key *fs_key,
@ -127,6 +127,9 @@ nvk_compile_nir(struct nvk_physical_device *dev, nir_shader *nir,
VkResult
nvk_shader_upload(struct nvk_device *dev, struct nvk_shader *shader);
struct nvk_shader *
nvk_shader_init(struct nvk_device *dev);
void
nvk_shader_finish(struct nvk_device *dev, struct nvk_shader *shader);