mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-08 06:58:05 +02:00
nvk: Use more consistent device variable names
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24518>
This commit is contained in:
parent
8088d73fd1
commit
93362f801a
16 changed files with 119 additions and 123 deletions
|
|
@ -38,14 +38,14 @@ nvk_create_cmd_buffer(struct vk_command_pool *vk_pool,
|
|||
struct vk_command_buffer **cmd_buffer_out)
|
||||
{
|
||||
struct nvk_cmd_pool *pool = container_of(vk_pool, struct nvk_cmd_pool, vk);
|
||||
struct nvk_device *device = nvk_cmd_pool_device(pool);
|
||||
struct nvk_device *dev = nvk_cmd_pool_device(pool);
|
||||
struct nvk_cmd_buffer *cmd;
|
||||
VkResult result;
|
||||
|
||||
cmd = vk_zalloc(&pool->vk.alloc, sizeof(*cmd), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (cmd == NULL)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
result = vk_command_buffer_init(&pool->vk, &cmd->vk,
|
||||
&nvk_cmd_buffer_ops, 0);
|
||||
|
|
|
|||
|
|
@ -152,63 +152,62 @@ nvc6c0_compute_setup_launch_desc_template(uint32_t *qmd,
|
|||
}
|
||||
|
||||
VkResult
|
||||
nvk_compute_pipeline_create(struct nvk_device *device,
|
||||
nvk_compute_pipeline_create(struct nvk_device *dev,
|
||||
struct vk_pipeline_cache *cache,
|
||||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipeline)
|
||||
{
|
||||
VK_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
|
||||
struct nvk_physical_device *pdevice = nvk_device_physical(device);
|
||||
struct nvk_physical_device *pdev = nvk_device_physical(dev);
|
||||
struct nvk_compute_pipeline *pipeline;
|
||||
VkResult result;
|
||||
|
||||
pipeline = (void *)nvk_pipeline_zalloc(device, NVK_PIPELINE_COMPUTE,
|
||||
pipeline = (void *)nvk_pipeline_zalloc(dev, NVK_PIPELINE_COMPUTE,
|
||||
sizeof(*pipeline), pAllocator);
|
||||
if (pipeline == NULL)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
|
||||
|
||||
struct vk_pipeline_robustness_state robustness;
|
||||
vk_pipeline_robustness_state_fill(&device->vk, &robustness,
|
||||
vk_pipeline_robustness_state_fill(&dev->vk, &robustness,
|
||||
pCreateInfo->pNext,
|
||||
pCreateInfo->stage.pNext);
|
||||
|
||||
const nir_shader_compiler_options *nir_options =
|
||||
nvk_physical_device_nir_options(pdevice, MESA_SHADER_COMPUTE);
|
||||
nvk_physical_device_nir_options(pdev, MESA_SHADER_COMPUTE);
|
||||
const struct spirv_to_nir_options spirv_options =
|
||||
nvk_physical_device_spirv_options(pdevice, &robustness);
|
||||
nvk_physical_device_spirv_options(pdev, &robustness);
|
||||
|
||||
nir_shader *nir;
|
||||
result = vk_pipeline_shader_stage_to_nir(&device->vk,
|
||||
&pCreateInfo->stage,
|
||||
result = vk_pipeline_shader_stage_to_nir(&dev->vk, &pCreateInfo->stage,
|
||||
&spirv_options, nir_options,
|
||||
NULL, &nir);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
nvk_lower_nir(device, nir, &robustness, false, pipeline_layout);
|
||||
nvk_lower_nir(dev, nir, &robustness, false, pipeline_layout);
|
||||
|
||||
result = nvk_compile_nir(pdevice, nir, NULL,
|
||||
result = nvk_compile_nir(pdev, nir, NULL,
|
||||
&pipeline->base.shaders[MESA_SHADER_COMPUTE]);
|
||||
ralloc_free(nir);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
result = nvk_shader_upload(device,
|
||||
result = nvk_shader_upload(dev,
|
||||
&pipeline->base.shaders[MESA_SHADER_COMPUTE]);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
struct nvk_shader *shader = &pipeline->base.shaders[MESA_SHADER_COMPUTE];
|
||||
if (device->pdev->info.cls_compute >= AMPERE_COMPUTE_A)
|
||||
if (pdev->info.cls_compute >= AMPERE_COMPUTE_A)
|
||||
nvc6c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
|
||||
else if (device->pdev->info.cls_compute >= VOLTA_COMPUTE_A)
|
||||
else if (pdev->info.cls_compute >= VOLTA_COMPUTE_A)
|
||||
nvc3c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
|
||||
else if (device->pdev->info.cls_compute >= PASCAL_COMPUTE_A)
|
||||
else if (pdev->info.cls_compute >= PASCAL_COMPUTE_A)
|
||||
nvc0c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
|
||||
else if (device->pdev->info.cls_compute >= KEPLER_COMPUTE_A)
|
||||
else if (pdev->info.cls_compute >= KEPLER_COMPUTE_A)
|
||||
nva0c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
|
||||
else
|
||||
unreachable("Fermi and older not supported!");
|
||||
|
|
@ -217,6 +216,6 @@ nvk_compute_pipeline_create(struct nvk_device *device,
|
|||
return VK_SUCCESS;
|
||||
|
||||
fail:
|
||||
nvk_pipeline_free(device, &pipeline->base, pAllocator);
|
||||
nvk_pipeline_free(dev, &pipeline->base, pAllocator);
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -332,7 +332,7 @@ nvk_push_descriptor_set_update(struct nvk_push_descriptor_set *push_set,
|
|||
}
|
||||
|
||||
static void
|
||||
nvk_descriptor_set_destroy(struct nvk_device *device,
|
||||
nvk_descriptor_set_destroy(struct nvk_device *dev,
|
||||
struct nvk_descriptor_pool *pool,
|
||||
struct nvk_descriptor_set *set, bool free_bo)
|
||||
{
|
||||
|
|
@ -347,18 +347,18 @@ nvk_descriptor_set_destroy(struct nvk_device *device,
|
|||
}
|
||||
}
|
||||
|
||||
vk_descriptor_set_layout_unref(&device->vk, &set->layout->vk);
|
||||
vk_descriptor_set_layout_unref(&dev->vk, &set->layout->vk);
|
||||
|
||||
vk_object_free(&device->vk, NULL, set);
|
||||
vk_object_free(&dev->vk, NULL, set);
|
||||
}
|
||||
|
||||
static void
|
||||
nvk_destroy_descriptor_pool(struct nvk_device *device,
|
||||
nvk_destroy_descriptor_pool(struct nvk_device *dev,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
struct nvk_descriptor_pool *pool)
|
||||
{
|
||||
for (int i = 0; i < pool->entry_count; ++i) {
|
||||
nvk_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
|
||||
nvk_descriptor_set_destroy(dev, pool, pool->entries[i].set, false);
|
||||
}
|
||||
|
||||
if (pool->bo) {
|
||||
|
|
@ -366,7 +366,7 @@ nvk_destroy_descriptor_pool(struct nvk_device *device,
|
|||
nouveau_ws_bo_destroy(pool->bo);
|
||||
}
|
||||
|
||||
vk_object_free(&device->vk, pAllocator, pool);
|
||||
vk_object_free(&dev->vk, pAllocator, pool);
|
||||
}
|
||||
|
||||
VKAPI_ATTR VkResult VKAPI_CALL
|
||||
|
|
@ -375,7 +375,7 @@ nvk_CreateDescriptorPool(VkDevice _device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkDescriptorPool *pDescriptorPool)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, _device);
|
||||
struct nvk_descriptor_pool *pool;
|
||||
uint64_t size = sizeof(struct nvk_descriptor_pool);
|
||||
uint64_t bo_size = 0;
|
||||
|
|
@ -424,22 +424,22 @@ nvk_CreateDescriptorPool(VkDevice _device,
|
|||
pCreateInfo->maxSets;
|
||||
size += entries_size;
|
||||
|
||||
pool = vk_object_zalloc(&device->vk, pAllocator, size,
|
||||
pool = vk_object_zalloc(&dev->vk, pAllocator, size,
|
||||
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
|
||||
if (!pool)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
if (bo_size) {
|
||||
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP | NOUVEAU_WS_BO_NO_SHARE;
|
||||
pool->bo = nouveau_ws_bo_new(device->ws_dev, bo_size, 0, flags);
|
||||
pool->bo = nouveau_ws_bo_new(dev->ws_dev, bo_size, 0, flags);
|
||||
if (!pool->bo) {
|
||||
nvk_destroy_descriptor_pool(device, pAllocator, pool);
|
||||
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
nvk_destroy_descriptor_pool(dev, pAllocator, pool);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
pool->mapped_ptr = nouveau_ws_bo_map(pool->bo, NOUVEAU_WS_BO_WR);
|
||||
if (!pool->mapped_ptr) {
|
||||
nvk_destroy_descriptor_pool(device, pAllocator, pool);
|
||||
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
nvk_destroy_descriptor_pool(dev, pAllocator, pool);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -451,7 +451,7 @@ nvk_CreateDescriptorPool(VkDevice _device,
|
|||
}
|
||||
|
||||
static VkResult
|
||||
nvk_descriptor_set_create(struct nvk_device *device,
|
||||
nvk_descriptor_set_create(struct nvk_device *dev,
|
||||
struct nvk_descriptor_pool *pool,
|
||||
struct nvk_descriptor_set_layout *layout,
|
||||
uint32_t variable_count,
|
||||
|
|
@ -462,10 +462,10 @@ nvk_descriptor_set_create(struct nvk_device *device,
|
|||
uint32_t mem_size = sizeof(struct nvk_descriptor_set) +
|
||||
layout->dynamic_buffer_count * sizeof(struct nvk_buffer_address);
|
||||
|
||||
set = vk_object_zalloc(&device->vk, NULL, mem_size,
|
||||
set = vk_object_zalloc(&dev->vk, NULL, mem_size,
|
||||
VK_OBJECT_TYPE_DESCRIPTOR_SET);
|
||||
if (!set)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
if (pool->entry_count == pool->max_entry_count)
|
||||
return VK_ERROR_OUT_OF_POOL_MEMORY;
|
||||
|
|
@ -520,11 +520,11 @@ nvk_descriptor_set_create(struct nvk_device *device,
|
|||
}
|
||||
|
||||
VKAPI_ATTR VkResult VKAPI_CALL
|
||||
nvk_AllocateDescriptorSets(VkDevice _device,
|
||||
nvk_AllocateDescriptorSets(VkDevice device,
|
||||
const VkDescriptorSetAllocateInfo *pAllocateInfo,
|
||||
VkDescriptorSet *pDescriptorSets)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, device);
|
||||
VK_FROM_HANDLE(nvk_descriptor_pool, pool, pAllocateInfo->descriptorPool);
|
||||
|
||||
VkResult result = VK_SUCCESS;
|
||||
|
|
@ -547,7 +547,7 @@ nvk_AllocateDescriptorSets(VkDevice _device,
|
|||
var_desc_count && var_desc_count->descriptorSetCount > 0 ?
|
||||
var_desc_count->pDescriptorCounts[i] : 0;
|
||||
|
||||
result = nvk_descriptor_set_create(device, pool, layout,
|
||||
result = nvk_descriptor_set_create(dev, pool, layout,
|
||||
variable_count, &set);
|
||||
if (result != VK_SUCCESS)
|
||||
break;
|
||||
|
|
@ -556,7 +556,7 @@ nvk_AllocateDescriptorSets(VkDevice _device,
|
|||
}
|
||||
|
||||
if (result != VK_SUCCESS) {
|
||||
nvk_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
|
||||
nvk_FreeDescriptorSets(device, pAllocateInfo->descriptorPool, i, pDescriptorSets);
|
||||
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
|
||||
pDescriptorSets[i] = VK_NULL_HANDLE;
|
||||
}
|
||||
|
|
@ -565,47 +565,47 @@ nvk_AllocateDescriptorSets(VkDevice _device,
|
|||
}
|
||||
|
||||
VKAPI_ATTR VkResult VKAPI_CALL
|
||||
nvk_FreeDescriptorSets(VkDevice _device,
|
||||
nvk_FreeDescriptorSets(VkDevice device,
|
||||
VkDescriptorPool descriptorPool,
|
||||
uint32_t descriptorSetCount,
|
||||
const VkDescriptorSet *pDescriptorSets)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, device);
|
||||
VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool);
|
||||
|
||||
for (uint32_t i = 0; i < descriptorSetCount; i++) {
|
||||
VK_FROM_HANDLE(nvk_descriptor_set, set, pDescriptorSets[i]);
|
||||
|
||||
if (set)
|
||||
nvk_descriptor_set_destroy(device, pool, set, true);
|
||||
nvk_descriptor_set_destroy(dev, pool, set, true);
|
||||
}
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
nvk_DestroyDescriptorPool(VkDevice _device,
|
||||
nvk_DestroyDescriptorPool(VkDevice device,
|
||||
VkDescriptorPool _pool,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, device);
|
||||
VK_FROM_HANDLE(nvk_descriptor_pool, pool, _pool);
|
||||
|
||||
if (!_pool)
|
||||
return;
|
||||
|
||||
nvk_destroy_descriptor_pool(device, pAllocator, pool);
|
||||
nvk_destroy_descriptor_pool(dev, pAllocator, pool);
|
||||
}
|
||||
|
||||
VKAPI_ATTR VkResult VKAPI_CALL
|
||||
nvk_ResetDescriptorPool(VkDevice _device,
|
||||
nvk_ResetDescriptorPool(VkDevice device,
|
||||
VkDescriptorPool descriptorPool,
|
||||
VkDescriptorPoolResetFlags flags)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, device);
|
||||
VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool);
|
||||
|
||||
for (int i = 0; i < pool->entry_count; ++i) {
|
||||
nvk_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
|
||||
nvk_descriptor_set_destroy(dev, pool, pool->entries[i].set, false);
|
||||
}
|
||||
pool->entry_count = 0;
|
||||
pool->current_offset = 0;
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ nvk_descriptor_table_grow_locked(struct nvk_device *dev,
|
|||
}
|
||||
|
||||
VkResult
|
||||
nvk_descriptor_table_init(struct nvk_device *device,
|
||||
nvk_descriptor_table_init(struct nvk_device *dev,
|
||||
struct nvk_descriptor_table *table,
|
||||
uint32_t descriptor_size,
|
||||
uint32_t min_descriptor_count,
|
||||
|
|
@ -79,10 +79,9 @@ nvk_descriptor_table_init(struct nvk_device *device,
|
|||
table->next_desc = 0;
|
||||
table->free_count = 0;
|
||||
|
||||
result = nvk_descriptor_table_grow_locked(device, table,
|
||||
min_descriptor_count);
|
||||
result = nvk_descriptor_table_grow_locked(dev, table, min_descriptor_count);
|
||||
if (result != VK_SUCCESS) {
|
||||
nvk_descriptor_table_finish(device, table);
|
||||
nvk_descriptor_table_finish(dev, table);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -90,14 +89,14 @@ nvk_descriptor_table_init(struct nvk_device *device,
|
|||
}
|
||||
|
||||
void
|
||||
nvk_descriptor_table_finish(struct nvk_device *device,
|
||||
nvk_descriptor_table_finish(struct nvk_device *dev,
|
||||
struct nvk_descriptor_table *table)
|
||||
{
|
||||
if (table->bo != NULL) {
|
||||
nouveau_ws_bo_unmap(table->bo, table->map);
|
||||
nouveau_ws_bo_destroy(table->bo);
|
||||
}
|
||||
vk_free(&device->vk.alloc, table->free_table);
|
||||
vk_free(&dev->vk.alloc, table->free_table);
|
||||
simple_mtx_destroy(&table->mutex);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,13 +24,13 @@ struct nvk_descriptor_table {
|
|||
uint32_t *free_table;
|
||||
};
|
||||
|
||||
VkResult nvk_descriptor_table_init(struct nvk_device *device,
|
||||
VkResult nvk_descriptor_table_init(struct nvk_device *dev,
|
||||
struct nvk_descriptor_table *table,
|
||||
uint32_t descriptor_size,
|
||||
uint32_t min_descriptor_count,
|
||||
uint32_t max_descriptor_count);
|
||||
|
||||
void nvk_descriptor_table_finish(struct nvk_device *device,
|
||||
void nvk_descriptor_table_finish(struct nvk_device *dev,
|
||||
struct nvk_descriptor_table *table);
|
||||
|
||||
VkResult nvk_descriptor_table_add(struct nvk_device *dev,
|
||||
|
|
|
|||
|
|
@ -57,9 +57,9 @@ VkResult nvk_device_ensure_slm(struct nvk_device *dev,
|
|||
uint32_t bytes_per_thread);
|
||||
|
||||
static inline struct nvk_physical_device *
|
||||
nvk_device_physical(struct nvk_device *device)
|
||||
nvk_device_physical(struct nvk_device *dev)
|
||||
{
|
||||
return (struct nvk_physical_device *)device->vk.physical;
|
||||
return (struct nvk_physical_device *)dev->vk.physical;
|
||||
}
|
||||
|
||||
VkResult nvk_device_init_meta(struct nvk_device *dev);
|
||||
|
|
|
|||
|
|
@ -31,13 +31,13 @@ struct nvk_memory_tiling_info {
|
|||
uint8_t pte_kind;
|
||||
};
|
||||
|
||||
VkResult nvk_allocate_memory(struct nvk_device *device,
|
||||
VkResult nvk_allocate_memory(struct nvk_device *dev,
|
||||
const VkMemoryAllocateInfo *pAllocateInfo,
|
||||
const struct nvk_memory_tiling_info *tile_info,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
struct nvk_device_memory **mem_out);
|
||||
|
||||
void nvk_free_memory(struct nvk_device *device,
|
||||
void nvk_free_memory(struct nvk_device *dev,
|
||||
struct nvk_device_memory *mem,
|
||||
const VkAllocationCallbacks *pAllocator);
|
||||
|
||||
|
|
|
|||
|
|
@ -279,27 +279,27 @@ merge_tess_info(struct shader_info *tes_info, struct shader_info *tcs_info)
|
|||
}
|
||||
|
||||
VkResult
|
||||
nvk_graphics_pipeline_create(struct nvk_device *device,
|
||||
nvk_graphics_pipeline_create(struct nvk_device *dev,
|
||||
struct vk_pipeline_cache *cache,
|
||||
const VkGraphicsPipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipeline)
|
||||
{
|
||||
VK_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
|
||||
struct nvk_physical_device *pdevice = nvk_device_physical(device);
|
||||
struct nvk_physical_device *pdev = nvk_device_physical(dev);
|
||||
struct nvk_graphics_pipeline *pipeline;
|
||||
VkResult result = VK_SUCCESS;
|
||||
|
||||
pipeline = vk_object_zalloc(&device->vk, pAllocator, sizeof(*pipeline),
|
||||
pipeline = vk_object_zalloc(&dev->vk, pAllocator, sizeof(*pipeline),
|
||||
VK_OBJECT_TYPE_PIPELINE);
|
||||
if (pipeline == NULL)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
pipeline->base.type = NVK_PIPELINE_GRAPHICS;
|
||||
|
||||
struct vk_graphics_pipeline_all_state all;
|
||||
struct vk_graphics_pipeline_state state = {};
|
||||
result = vk_graphics_pipeline_state_fill(&device->vk, &state, pCreateInfo,
|
||||
result = vk_graphics_pipeline_state_fill(&dev->vk, &state, pCreateInfo,
|
||||
NULL, &all, NULL, 0, NULL);
|
||||
assert(result == VK_SUCCESS);
|
||||
|
||||
|
|
@ -310,15 +310,15 @@ nvk_graphics_pipeline_create(struct nvk_device *device,
|
|||
const VkPipelineShaderStageCreateInfo *sinfo = &pCreateInfo->pStages[i];
|
||||
gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
|
||||
|
||||
vk_pipeline_robustness_state_fill(&device->vk, &robustness[stage],
|
||||
vk_pipeline_robustness_state_fill(&dev->vk, &robustness[stage],
|
||||
pCreateInfo->pNext, sinfo->pNext);
|
||||
|
||||
const nir_shader_compiler_options *nir_options =
|
||||
nvk_physical_device_nir_options(pdevice, stage);
|
||||
nvk_physical_device_nir_options(pdev, stage);
|
||||
const struct spirv_to_nir_options spirv_options =
|
||||
nvk_physical_device_spirv_options(pdevice, &robustness[stage]);
|
||||
nvk_physical_device_spirv_options(pdev, &robustness[stage]);
|
||||
|
||||
result = vk_pipeline_shader_stage_to_nir(&device->vk, sinfo,
|
||||
result = vk_pipeline_shader_stage_to_nir(&dev->vk, sinfo,
|
||||
&spirv_options, nir_options,
|
||||
NULL, &nir[stage]);
|
||||
if (result != VK_SUCCESS)
|
||||
|
|
@ -333,7 +333,7 @@ nvk_graphics_pipeline_create(struct nvk_device *device,
|
|||
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
|
||||
const VkPipelineShaderStageCreateInfo *sinfo = &pCreateInfo->pStages[i];
|
||||
gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
|
||||
nvk_lower_nir(device, nir[stage], &robustness[stage],
|
||||
nvk_lower_nir(dev, nir[stage], &robustness[stage],
|
||||
state.rp->view_mask != 0, pipeline_layout);
|
||||
}
|
||||
|
||||
|
|
@ -347,13 +347,13 @@ nvk_graphics_pipeline_create(struct nvk_device *device,
|
|||
fs_key = &fs_key_tmp;
|
||||
}
|
||||
|
||||
result = nvk_compile_nir(pdevice, nir[stage], fs_key,
|
||||
result = nvk_compile_nir(pdev, nir[stage], fs_key,
|
||||
&pipeline->base.shaders[stage]);
|
||||
ralloc_free(nir[stage]);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
result = nvk_shader_upload(device, &pipeline->base.shaders[stage]);
|
||||
result = nvk_shader_upload(dev, &pipeline->base.shaders[stage]);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail;
|
||||
}
|
||||
|
|
@ -381,7 +381,7 @@ nvk_graphics_pipeline_create(struct nvk_device *device,
|
|||
last_geom = shader;
|
||||
|
||||
uint64_t addr = nvk_shader_address(shader);
|
||||
if (device->pdev->info.cls_eng3d >= VOLTA_A) {
|
||||
if (dev->pdev->info.cls_eng3d >= VOLTA_A) {
|
||||
P_MTHD(p, NVC397, SET_PIPELINE_PROGRAM_ADDRESS_A(idx));
|
||||
P_NVC397_SET_PIPELINE_PROGRAM_ADDRESS_A(p, idx, addr >> 32);
|
||||
P_NVC397_SET_PIPELINE_PROGRAM_ADDRESS_B(p, idx, addr);
|
||||
|
|
@ -408,7 +408,7 @@ nvk_graphics_pipeline_create(struct nvk_device *device,
|
|||
|
||||
P_IMMD(p, NV9097, SET_API_MANDATED_EARLY_Z, shader->fs.early_z);
|
||||
|
||||
if (device->pdev->info.cls_eng3d >= MAXWELL_B) {
|
||||
if (dev->pdev->info.cls_eng3d >= MAXWELL_B) {
|
||||
P_IMMD(p, NVB197, SET_POST_Z_PS_IMASK,
|
||||
shader->fs.post_depth_coverage);
|
||||
} else {
|
||||
|
|
@ -493,6 +493,6 @@ nvk_graphics_pipeline_create(struct nvk_device *device,
|
|||
return VK_SUCCESS;
|
||||
|
||||
fail:
|
||||
vk_object_free(&device->vk, pAllocator, pipeline);
|
||||
vk_object_free(&dev->vk, pAllocator, pipeline);
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
#include "nvk_cl90b5.h"
|
||||
|
||||
VkResult
|
||||
nvk_heap_init(struct nvk_device *device, struct nvk_heap *heap,
|
||||
nvk_heap_init(struct nvk_device *dev, struct nvk_heap *heap,
|
||||
enum nouveau_ws_bo_flags bo_flags,
|
||||
enum nouveau_ws_bo_map_flags map_flags,
|
||||
uint32_t overalloc, bool contiguous)
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ get_stencil_format(enum pipe_format format)
|
|||
}
|
||||
|
||||
VkResult
|
||||
nvk_image_view_init(struct nvk_device *device,
|
||||
nvk_image_view_init(struct nvk_device *dev,
|
||||
struct nvk_image_view *view,
|
||||
bool driver_internal,
|
||||
const VkImageViewCreateInfo *pCreateInfo)
|
||||
|
|
@ -88,7 +88,7 @@ nvk_image_view_init(struct nvk_device *device,
|
|||
|
||||
memset(view, 0, sizeof(*view));
|
||||
|
||||
vk_image_view_init(&device->vk, &view->vk, driver_internal, pCreateInfo);
|
||||
vk_image_view_init(&dev->vk, &view->vk, driver_internal, pCreateInfo);
|
||||
|
||||
/* First, figure out which image planes we need.
|
||||
* For depth/stencil, we only have plane so simply assert
|
||||
|
|
@ -156,14 +156,13 @@ nvk_image_view_init(struct nvk_device *device,
|
|||
if (view->vk.usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
|
||||
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
|
||||
uint32_t tic[8];
|
||||
nil_image_fill_tic(&nvk_device_physical(device)->info,
|
||||
nil_image_fill_tic(&nvk_device_physical(dev)->info,
|
||||
&nil_image, &nil_view, base_addr, tic);
|
||||
|
||||
result = nvk_descriptor_table_add(device, &device->images,
|
||||
tic, sizeof(tic),
|
||||
result = nvk_descriptor_table_add(dev, &dev->images, tic, sizeof(tic),
|
||||
&view->planes[view_plane].sampled_desc_index);
|
||||
if (result != VK_SUCCESS) {
|
||||
nvk_image_view_finish(device, view);
|
||||
nvk_image_view_finish(dev, view);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
@ -188,14 +187,13 @@ nvk_image_view_init(struct nvk_device *device,
|
|||
}
|
||||
|
||||
uint32_t tic[8];
|
||||
nil_image_fill_tic(&nvk_device_physical(device)->info,
|
||||
nil_image_fill_tic(&nvk_device_physical(dev)->info,
|
||||
&nil_image, &nil_view, base_addr, tic);
|
||||
|
||||
result = nvk_descriptor_table_add(device, &device->images,
|
||||
tic, sizeof(tic),
|
||||
result = nvk_descriptor_table_add(dev, &dev->images, tic, sizeof(tic),
|
||||
&view->planes[view_plane].storage_desc_index);
|
||||
if (result != VK_SUCCESS) {
|
||||
nvk_image_view_finish(device, view);
|
||||
nvk_image_view_finish(dev, view);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
@ -205,17 +203,17 @@ nvk_image_view_init(struct nvk_device *device,
|
|||
}
|
||||
|
||||
void
|
||||
nvk_image_view_finish(struct nvk_device *device,
|
||||
nvk_image_view_finish(struct nvk_device *dev,
|
||||
struct nvk_image_view *view)
|
||||
{
|
||||
for (uint8_t plane = 0; plane < view->plane_count; plane++) {
|
||||
if (view->planes[plane].sampled_desc_index) {
|
||||
nvk_descriptor_table_remove(device, &device->images,
|
||||
nvk_descriptor_table_remove(dev, &dev->images,
|
||||
view->planes[plane].sampled_desc_index);
|
||||
}
|
||||
|
||||
if (view->planes[plane].storage_desc_index) {
|
||||
nvk_descriptor_table_remove(device, &device->images,
|
||||
nvk_descriptor_table_remove(dev, &dev->images,
|
||||
view->planes[plane].storage_desc_index);
|
||||
}
|
||||
}
|
||||
|
|
@ -229,18 +227,18 @@ nvk_CreateImageView(VkDevice _device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkImageView *pView)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, _device);
|
||||
struct nvk_image_view *view;
|
||||
VkResult result;
|
||||
|
||||
view = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*view), 8,
|
||||
view = vk_alloc2(&dev->vk.alloc, pAllocator, sizeof(*view), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
if (!view)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
result = nvk_image_view_init(device, view, false, pCreateInfo);
|
||||
result = nvk_image_view_init(dev, view, false, pCreateInfo);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free2(&device->vk.alloc, pAllocator, view);
|
||||
vk_free2(&dev->vk.alloc, pAllocator, view);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -254,12 +252,12 @@ nvk_DestroyImageView(VkDevice _device,
|
|||
VkImageView imageView,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, _device);
|
||||
VK_FROM_HANDLE(nvk_image_view, view, imageView);
|
||||
|
||||
if (!view)
|
||||
return;
|
||||
|
||||
nvk_image_view_finish(device, view);
|
||||
vk_free2(&device->vk.alloc, pAllocator, view);
|
||||
nvk_image_view_finish(dev, view);
|
||||
vk_free2(&dev->vk.alloc, pAllocator, view);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,12 +25,12 @@ struct nvk_image_view {
|
|||
VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_image_view, vk.base, VkImageView,
|
||||
VK_OBJECT_TYPE_IMAGE_VIEW)
|
||||
|
||||
VkResult nvk_image_view_init(struct nvk_device *device,
|
||||
VkResult nvk_image_view_init(struct nvk_device *dev,
|
||||
struct nvk_image_view *view,
|
||||
bool driver_internal,
|
||||
const VkImageViewCreateInfo *pCreateInfo);
|
||||
|
||||
void nvk_image_view_finish(struct nvk_device *device,
|
||||
void nvk_image_view_finish(struct nvk_device *dev,
|
||||
struct nvk_image_view *view);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -642,7 +642,7 @@ nvk_get_device_features(const struct nv_device_info *info,
|
|||
VkResult
|
||||
nvk_create_drm_physical_device(struct vk_instance *_instance,
|
||||
drmDevicePtr drm_device,
|
||||
struct vk_physical_device **device_out)
|
||||
struct vk_physical_device **pdev_out)
|
||||
{
|
||||
struct nvk_instance *instance = (struct nvk_instance *)_instance;
|
||||
VkResult result;
|
||||
|
|
@ -811,7 +811,7 @@ nvk_create_drm_physical_device(struct vk_instance *_instance,
|
|||
if (result != VK_SUCCESS)
|
||||
goto fail_init;
|
||||
|
||||
*device_out = &pdev->vk;
|
||||
*pdev_out = &pdev->vk;
|
||||
|
||||
return VK_SUCCESS;
|
||||
|
||||
|
|
|
|||
|
|
@ -46,8 +46,8 @@ nvk_physical_device_instance(struct nvk_physical_device *pdev)
|
|||
}
|
||||
|
||||
VkResult nvk_create_drm_physical_device(struct vk_instance *vk_instance,
|
||||
struct _drmDevice *device,
|
||||
struct vk_physical_device **out);
|
||||
struct _drmDevice *drm_device,
|
||||
struct vk_physical_device **pdev_out);
|
||||
|
||||
void nvk_physical_device_destroy(struct vk_physical_device *vk_device);
|
||||
|
||||
|
|
|
|||
|
|
@ -6,14 +6,14 @@
|
|||
#include "vk_pipeline_cache.h"
|
||||
|
||||
struct nvk_pipeline *
|
||||
nvk_pipeline_zalloc(struct nvk_device *device,
|
||||
nvk_pipeline_zalloc(struct nvk_device *dev,
|
||||
enum nvk_pipeline_type type, size_t size,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
struct nvk_pipeline *pipeline;
|
||||
|
||||
assert(size >= sizeof(*pipeline));
|
||||
pipeline = vk_object_zalloc(&device->vk, pAllocator, size,
|
||||
pipeline = vk_object_zalloc(&dev->vk, pAllocator, size,
|
||||
VK_OBJECT_TYPE_PIPELINE);
|
||||
if (pipeline == NULL)
|
||||
return NULL;
|
||||
|
|
@ -24,20 +24,20 @@ nvk_pipeline_zalloc(struct nvk_device *device,
|
|||
}
|
||||
|
||||
void
|
||||
nvk_pipeline_free(struct nvk_device *device,
|
||||
nvk_pipeline_free(struct nvk_device *dev,
|
||||
struct nvk_pipeline *pipeline,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
for (uint32_t s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
|
||||
if (pipeline->shaders[s].upload_size > 0) {
|
||||
nvk_heap_free(device, &device->shader_heap,
|
||||
nvk_heap_free(dev, &dev->shader_heap,
|
||||
pipeline->shaders[s].upload_addr,
|
||||
pipeline->shaders[s].upload_size);
|
||||
}
|
||||
free(pipeline->shaders[s].xfb);
|
||||
}
|
||||
|
||||
vk_object_free(&device->vk, pAllocator, pipeline);
|
||||
vk_object_free(&dev->vk, pAllocator, pipeline);
|
||||
}
|
||||
|
||||
VKAPI_ATTR VkResult VKAPI_CALL
|
||||
|
|
@ -48,13 +48,13 @@ nvk_CreateGraphicsPipelines(VkDevice _device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, _device);
|
||||
VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
|
||||
VkResult result = VK_SUCCESS;
|
||||
|
||||
unsigned i = 0;
|
||||
for (; i < createInfoCount; i++) {
|
||||
VkResult r = nvk_graphics_pipeline_create(device, cache, &pCreateInfos[i],
|
||||
VkResult r = nvk_graphics_pipeline_create(dev, cache, &pCreateInfos[i],
|
||||
pAllocator, &pPipelines[i]);
|
||||
if (r == VK_SUCCESS)
|
||||
continue;
|
||||
|
|
@ -79,13 +79,13 @@ nvk_CreateComputePipelines(VkDevice _device,
|
|||
const VkAllocationCallbacks *pAllocator,
|
||||
VkPipeline *pPipelines)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, _device);
|
||||
VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
|
||||
VkResult result = VK_SUCCESS;
|
||||
|
||||
unsigned i = 0;
|
||||
for (; i < createInfoCount; i++) {
|
||||
VkResult r = nvk_compute_pipeline_create(device, cache, &pCreateInfos[i],
|
||||
VkResult r = nvk_compute_pipeline_create(dev, cache, &pCreateInfos[i],
|
||||
pAllocator, &pPipelines[i]);
|
||||
if (r == VK_SUCCESS)
|
||||
continue;
|
||||
|
|
@ -106,11 +106,11 @@ VKAPI_ATTR void VKAPI_CALL
|
|||
nvk_DestroyPipeline(VkDevice _device, VkPipeline _pipeline,
|
||||
const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VK_FROM_HANDLE(nvk_device, device, _device);
|
||||
VK_FROM_HANDLE(nvk_device, dev, _device);
|
||||
VK_FROM_HANDLE(nvk_pipeline, pipeline, _pipeline);
|
||||
|
||||
if (!pipeline)
|
||||
return;
|
||||
|
||||
nvk_pipeline_free(device, pipeline, pAllocator);
|
||||
nvk_pipeline_free(dev, pipeline, pAllocator);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,11 +26,11 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_pipeline, base, VkPipeline,
|
|||
VK_OBJECT_TYPE_PIPELINE)
|
||||
|
||||
void
|
||||
nvk_pipeline_free(struct nvk_device *device,
|
||||
nvk_pipeline_free(struct nvk_device *dev,
|
||||
struct nvk_pipeline *pipeline,
|
||||
const VkAllocationCallbacks *pAllocator);
|
||||
struct nvk_pipeline *
|
||||
nvk_pipeline_zalloc(struct nvk_device *device,
|
||||
nvk_pipeline_zalloc(struct nvk_device *dev,
|
||||
enum nvk_pipeline_type type, size_t size,
|
||||
const VkAllocationCallbacks *pAllocator);
|
||||
|
||||
|
|
@ -41,7 +41,7 @@ struct nvk_compute_pipeline {
|
|||
};
|
||||
|
||||
VkResult
|
||||
nvk_compute_pipeline_create(struct nvk_device *device,
|
||||
nvk_compute_pipeline_create(struct nvk_device *dev,
|
||||
struct vk_pipeline_cache *cache,
|
||||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
|
|
@ -59,7 +59,7 @@ struct nvk_graphics_pipeline {
|
|||
};
|
||||
|
||||
VkResult
|
||||
nvk_graphics_pipeline_create(struct nvk_device *device,
|
||||
nvk_graphics_pipeline_create(struct nvk_device *dev,
|
||||
struct vk_pipeline_cache *cache,
|
||||
const VkGraphicsPipelineCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ nvk_shader_address(const struct nvk_shader *shader)
|
|||
}
|
||||
|
||||
const nir_shader_compiler_options *
|
||||
nvk_physical_device_nir_options(const struct nvk_physical_device *pdevice,
|
||||
nvk_physical_device_nir_options(const struct nvk_physical_device *pdev,
|
||||
gl_shader_stage stage);
|
||||
|
||||
static inline nir_address_format
|
||||
|
|
@ -109,7 +109,7 @@ nvk_buffer_addr_format(VkPipelineRobustnessBufferBehaviorEXT robustness)
|
|||
}
|
||||
|
||||
struct spirv_to_nir_options
|
||||
nvk_physical_device_spirv_options(const struct nvk_physical_device *pdevice,
|
||||
nvk_physical_device_spirv_options(const struct nvk_physical_device *pdev,
|
||||
const struct vk_pipeline_robustness_state *rs);
|
||||
|
||||
bool
|
||||
|
|
@ -118,13 +118,13 @@ nvk_nir_lower_descriptors(nir_shader *nir,
|
|||
const struct vk_pipeline_layout *layout);
|
||||
|
||||
void
|
||||
nvk_lower_nir(struct nvk_device *device, nir_shader *nir,
|
||||
nvk_lower_nir(struct nvk_device *dev, nir_shader *nir,
|
||||
const struct vk_pipeline_robustness_state *rs,
|
||||
bool is_multiview,
|
||||
const struct vk_pipeline_layout *layout);
|
||||
|
||||
VkResult
|
||||
nvk_compile_nir(struct nvk_physical_device *device, nir_shader *nir,
|
||||
nvk_compile_nir(struct nvk_physical_device *dev, nir_shader *nir,
|
||||
const struct nvk_fs_key *fs_key,
|
||||
struct nvk_shader *shader);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue