nvk: remove some dead code files

Dead beef.

Fixes: 813b253939

Signed-off-by: Yusuf Khan <yusisamerican@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28354>
This commit is contained in:
Yusuf Khan 2024-03-24 13:51:50 -07:00 committed by Marge Bot
parent f351e4be11
commit dcc2e596c1
2 changed files with 0 additions and 519 deletions

View file

@ -1,263 +0,0 @@
/*
* Copyright © 2022 Collabora Ltd. and Red Hat Inc.
* SPDX-License-Identifier: MIT
*/
#include "nvk_private.h"
#include "nvk_device.h"
#include "nvk_physical_device.h"
#include "nvk_pipeline.h"
#include "nvk_shader.h"
#include "vk_nir.h"
#include "vk_pipeline.h"
#include "vk_pipeline_layout.h"
#include "nouveau_bo.h"
#include "nouveau_context.h"
#include "compiler/spirv/nir_spirv.h"
#include "drf.h"
#include "cla0c0.h"
#include "cla0c0qmd.h"
#include "clc0c0.h"
#include "clc0c0qmd.h"
#include "clc3c0.h"
#include "clc3c0qmd.h"
#include "clc6c0.h"
#include "clc6c0qmd.h"
#define NVA0C0_QMDV00_06_VAL_SET(p,a...) NVVAL_MW_SET((p), NVA0C0, QMDV00_06, ##a)
#define NVA0C0_QMDV00_06_DEF_SET(p,a...) NVDEF_MW_SET((p), NVA0C0, QMDV00_06, ##a)
#define NVC0C0_QMDV02_01_VAL_SET(p,a...) NVVAL_MW_SET((p), NVC0C0, QMDV02_01, ##a)
#define NVC0C0_QMDV02_01_DEF_SET(p,a...) NVDEF_MW_SET((p), NVC0C0, QMDV02_01, ##a)
#define NVC3C0_QMDV02_02_VAL_SET(p,a...) NVVAL_MW_SET((p), NVC3C0, QMDV02_02, ##a)
#define NVC3C0_QMDV02_02_DEF_SET(p,a...) NVDEF_MW_SET((p), NVC3C0, QMDV02_02, ##a)
#define NVC6C0_QMDV03_00_VAL_SET(p,a...) NVVAL_MW_SET((p), NVC6C0, QMDV03_00, ##a)
#define NVC6C0_QMDV03_00_DEF_SET(p,a...) NVDEF_MW_SET((p), NVC6C0, QMDV03_00, ##a)
#define QMD_DEF_SET(qmd, class_id, version_major, version_minor, a...) \
NVDEF_MW_SET((qmd), NV##class_id, QMDV##version_major##_##version_minor, ##a)
#define QMD_VAL_SET(qmd, class_id, version_major, version_minor, a...) \
NVVAL_MW_SET((qmd), NV##class_id, QMDV##version_major##_##version_minor, ##a)
static int
gv100_sm_config_smem_size(uint32_t size)
{
if (size > 64 * 1024) size = 96 * 1024;
else if (size > 32 * 1024) size = 64 * 1024;
else if (size > 16 * 1024) size = 32 * 1024;
else if (size > 8 * 1024) size = 16 * 1024;
else size = 8 * 1024;
return (size / 4096) + 1;
}
#define base_compute_setup_launch_desc_template(qmd, shader, class_id, version_major, version_minor) \
do { \
QMD_DEF_SET(qmd, class_id, version_major, version_minor, API_VISIBLE_CALL_LIMIT, NO_CHECK); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, BARRIER_COUNT, shader->info.num_barriers); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, CTA_THREAD_DIMENSION0, \
shader->info.cs.local_size[0]); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, CTA_THREAD_DIMENSION1, \
shader->info.cs.local_size[1]); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, CTA_THREAD_DIMENSION2, \
shader->info.cs.local_size[2]); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, QMD_MAJOR_VERSION, version_major); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, QMD_VERSION, version_minor); \
QMD_DEF_SET(qmd, class_id, version_major, version_minor, SAMPLER_INDEX, INDEPENDENTLY); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, SHADER_LOCAL_MEMORY_HIGH_SIZE, 0); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, SHADER_LOCAL_MEMORY_LOW_SIZE, \
align(shader->info.slm_size, 0x10)); \
QMD_VAL_SET(qmd, class_id, version_major, version_minor, SHARED_MEMORY_SIZE, \
align(shader->info.cs.smem_size, 0x100)); \
} while (0)
static void
nva0c0_compute_setup_launch_desc_template(uint32_t *qmd,
struct nvk_shader *shader)
{
base_compute_setup_launch_desc_template(qmd, shader, A0C0, 00, 06);
if (shader->info.cs.smem_size <= (16 << 10))
NVA0C0_QMDV00_06_DEF_SET(qmd, L1_CONFIGURATION, DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB);
else if (shader->info.cs.smem_size <= (32 << 10))
NVA0C0_QMDV00_06_DEF_SET(qmd, L1_CONFIGURATION, DIRECTLY_ADDRESSABLE_MEMORY_SIZE_32KB);
else if (shader->info.cs.smem_size <= (48 << 10))
NVA0C0_QMDV00_06_DEF_SET(qmd, L1_CONFIGURATION, DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB);
else
unreachable("Invalid shared memory size");
uint64_t addr = shader->hdr_addr;
assert(addr < 0xffffffff);
NVA0C0_QMDV00_06_VAL_SET(qmd, PROGRAM_OFFSET, addr);
NVA0C0_QMDV00_06_VAL_SET(qmd, REGISTER_COUNT, shader->info.num_gprs);
NVA0C0_QMDV00_06_VAL_SET(qmd, SASS_VERSION, 0x30);
}
static void
nvc0c0_compute_setup_launch_desc_template(uint32_t *qmd,
struct nvk_shader *shader)
{
base_compute_setup_launch_desc_template(qmd, shader, C0C0, 02, 01);
uint64_t addr = shader->hdr_addr;
assert(addr < 0xffffffff);
NVC0C0_QMDV02_01_VAL_SET(qmd, SM_GLOBAL_CACHING_ENABLE, 1);
NVC0C0_QMDV02_01_VAL_SET(qmd, PROGRAM_OFFSET, addr);
NVC0C0_QMDV02_01_VAL_SET(qmd, REGISTER_COUNT, shader->info.num_gprs);
}
static void
nvc3c0_compute_setup_launch_desc_template(uint32_t *qmd,
struct nvk_shader *shader)
{
base_compute_setup_launch_desc_template(qmd, shader, C3C0, 02, 02);
NVC3C0_QMDV02_02_VAL_SET(qmd, SM_GLOBAL_CACHING_ENABLE, 1);
/* those are all QMD 2.2+ */
NVC3C0_QMDV02_02_VAL_SET(qmd, MIN_SM_CONFIG_SHARED_MEM_SIZE,
gv100_sm_config_smem_size(shader->info.cs.smem_size));
NVC3C0_QMDV02_02_VAL_SET(qmd, MAX_SM_CONFIG_SHARED_MEM_SIZE,
gv100_sm_config_smem_size(NVK_MAX_SHARED_SIZE));
NVC3C0_QMDV02_02_VAL_SET(qmd, TARGET_SM_CONFIG_SHARED_MEM_SIZE,
gv100_sm_config_smem_size(shader->info.cs.smem_size));
NVC3C0_QMDV02_02_VAL_SET(qmd, REGISTER_COUNT_V, shader->info.num_gprs);
uint64_t addr = shader->hdr_addr;
NVC3C0_QMDV02_02_VAL_SET(qmd, PROGRAM_ADDRESS_LOWER, addr & 0xffffffff);
NVC3C0_QMDV02_02_VAL_SET(qmd, PROGRAM_ADDRESS_UPPER, addr >> 32);
}
static void
nvc6c0_compute_setup_launch_desc_template(uint32_t *qmd,
struct nvk_shader *shader)
{
base_compute_setup_launch_desc_template(qmd, shader, C6C0, 03, 00);
NVC6C0_QMDV03_00_VAL_SET(qmd, SM_GLOBAL_CACHING_ENABLE, 1);
/* those are all QMD 2.2+ */
NVC6C0_QMDV03_00_VAL_SET(qmd, MIN_SM_CONFIG_SHARED_MEM_SIZE,
gv100_sm_config_smem_size(shader->info.cs.smem_size));
NVC6C0_QMDV03_00_VAL_SET(qmd, MAX_SM_CONFIG_SHARED_MEM_SIZE,
gv100_sm_config_smem_size(NVK_MAX_SHARED_SIZE));
NVC6C0_QMDV03_00_VAL_SET(qmd, TARGET_SM_CONFIG_SHARED_MEM_SIZE,
gv100_sm_config_smem_size(shader->info.cs.smem_size));
NVC6C0_QMDV03_00_VAL_SET(qmd, REGISTER_COUNT_V, shader->info.num_gprs);
uint64_t addr = shader->hdr_addr;
NVC6C0_QMDV03_00_VAL_SET(qmd, PROGRAM_ADDRESS_LOWER, addr & 0xffffffff);
NVC6C0_QMDV03_00_VAL_SET(qmd, PROGRAM_ADDRESS_UPPER, addr >> 32);
}
VkResult
nvk_compute_pipeline_create(struct nvk_device *dev,
struct vk_pipeline_cache *cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
VK_FROM_HANDLE(vk_pipeline_layout, pipeline_layout, pCreateInfo->layout);
struct nvk_physical_device *pdev = nvk_device_physical(dev);
struct nvk_compute_pipeline *pipeline;
VkResult result;
pipeline = (void *)nvk_pipeline_zalloc(dev, NVK_PIPELINE_COMPUTE,
sizeof(*pipeline), pAllocator);
if (pipeline == NULL)
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
VkPipelineCreateFlags2KHR pipeline_flags =
vk_compute_pipeline_create_flags(pCreateInfo);
if (pipeline_flags &
VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)
cache = NULL;
struct vk_pipeline_robustness_state robustness;
vk_pipeline_robustness_state_fill(&dev->vk, &robustness,
pCreateInfo->pNext,
pCreateInfo->stage.pNext);
unsigned char sha1[SHA1_DIGEST_LENGTH];
nvk_hash_shader(sha1, &pCreateInfo->stage, &robustness, false,
pipeline_layout, NULL);
bool cache_hit = false;
struct vk_pipeline_cache_object *cache_obj = NULL;
if (cache) {
cache_obj = vk_pipeline_cache_lookup_object(cache, &sha1, sizeof(sha1),
&nvk_shader_ops, &cache_hit);
pipeline->base.shaders[MESA_SHADER_COMPUTE] =
container_of(cache_obj, struct nvk_shader, base);
result = VK_SUCCESS;
}
if (!cache_obj) {
if (pCreateInfo->flags &
VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR) {
result = VK_PIPELINE_COMPILE_REQUIRED;
goto fail;
}
nir_shader *nir;
result = nvk_shader_stage_to_nir(dev, &pCreateInfo->stage, &robustness,
cache, NULL, &nir);
if (result != VK_SUCCESS)
goto fail;
struct nvk_shader *shader = nvk_shader_init(dev, sha1, SHA1_DIGEST_LENGTH);
if(shader == NULL)
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
nvk_lower_nir(dev, nir, &robustness, false,
pipeline_layout->set_count,
pipeline_layout->set_layouts,
&shader->cbuf_map);
result = nvk_compile_nir(dev, nir, pipeline_flags, &robustness, NULL, cache, shader);
if (result == VK_SUCCESS) {
cache_obj = &shader->base;
if (cache)
cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
pipeline->base.shaders[MESA_SHADER_COMPUTE] =
container_of(cache_obj, struct nvk_shader, base);
}
ralloc_free(nir);
}
if (result != VK_SUCCESS)
goto fail;
struct nvk_shader *shader = container_of(cache_obj, struct nvk_shader, base);
result = nvk_shader_upload(dev, shader);
if (result != VK_SUCCESS)
goto fail;
if (pdev->info.cls_compute >= AMPERE_COMPUTE_A)
nvc6c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
else if (pdev->info.cls_compute >= VOLTA_COMPUTE_A)
nvc3c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
else if (pdev->info.cls_compute >= PASCAL_COMPUTE_A)
nvc0c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
else if (pdev->info.cls_compute >= KEPLER_COMPUTE_A)
nva0c0_compute_setup_launch_desc_template(pipeline->qmd_template, shader);
else
unreachable("Fermi and older not supported!");
*pPipeline = nvk_pipeline_to_handle(&pipeline->base);
return VK_SUCCESS;
fail:
nvk_pipeline_free(dev, &pipeline->base, pAllocator);
return result;
}

View file

@ -1,256 +0,0 @@
/*
* Copyright © 2022 Collabora Ltd. and Red Hat Inc.
* SPDX-License-Identifier: MIT
*/
#include "nvk_pipeline.h"
#include "nvk_device.h"
#include "nvk_entrypoints.h"
#include "vk_pipeline_cache.h"
struct nvk_pipeline *
nvk_pipeline_zalloc(struct nvk_device *dev,
enum nvk_pipeline_type type, size_t size,
const VkAllocationCallbacks *pAllocator)
{
struct nvk_pipeline *pipeline;
assert(size >= sizeof(*pipeline));
pipeline = vk_object_zalloc(&dev->vk, pAllocator, size,
VK_OBJECT_TYPE_PIPELINE);
if (pipeline == NULL)
return NULL;
pipeline->type = type;
return pipeline;
}
void
nvk_pipeline_free(struct nvk_device *dev,
struct nvk_pipeline *pipeline,
const VkAllocationCallbacks *pAllocator)
{
for (uint32_t s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (pipeline->shaders[s] != NULL)
vk_pipeline_cache_object_unref(&dev->vk, &pipeline->shaders[s]->base);
}
vk_object_free(&dev->vk, pAllocator, pipeline);
}
VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateGraphicsPipelines(VkDevice _device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
VK_FROM_HANDLE(nvk_device, dev, _device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
VkResult result = VK_SUCCESS;
unsigned i = 0;
for (; i < createInfoCount; i++) {
VkResult r = nvk_graphics_pipeline_create(dev, cache, &pCreateInfos[i],
pAllocator, &pPipelines[i]);
if (r == VK_SUCCESS)
continue;
result = r;
pPipelines[i] = VK_NULL_HANDLE;
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
break;
}
for (; i < createInfoCount; i++)
pPipelines[i] = VK_NULL_HANDLE;
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL
nvk_CreateComputePipelines(VkDevice _device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipelines)
{
VK_FROM_HANDLE(nvk_device, dev, _device);
VK_FROM_HANDLE(vk_pipeline_cache, cache, pipelineCache);
VkResult result = VK_SUCCESS;
unsigned i = 0;
for (; i < createInfoCount; i++) {
VkResult r = nvk_compute_pipeline_create(dev, cache, &pCreateInfos[i],
pAllocator, &pPipelines[i]);
if (r == VK_SUCCESS)
continue;
result = r;
pPipelines[i] = VK_NULL_HANDLE;
if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT)
break;
}
for (; i < createInfoCount; i++)
pPipelines[i] = VK_NULL_HANDLE;
return result;
}
VKAPI_ATTR void VKAPI_CALL
nvk_DestroyPipeline(VkDevice _device, VkPipeline _pipeline,
const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(nvk_device, dev, _device);
VK_FROM_HANDLE(nvk_pipeline, pipeline, _pipeline);
if (!pipeline)
return;
nvk_pipeline_free(dev, pipeline, pAllocator);
}
#define WRITE_STR(field, ...) ({ \
memset(field, 0, sizeof(field)); \
UNUSED int i = snprintf(field, sizeof(field), __VA_ARGS__); \
assert(i > 0 && i < sizeof(field)); \
})
VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetPipelineExecutablePropertiesKHR(
VkDevice device,
const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount,
VkPipelineExecutablePropertiesKHR *pProperties)
{
VK_FROM_HANDLE(nvk_pipeline, pipeline, pPipelineInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutablePropertiesKHR, out,
pProperties, pExecutableCount);
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
const struct nvk_shader *shader = pipeline->shaders[stage];
if (!shader || shader->code_size == 0)
continue;
vk_outarray_append_typed(VkPipelineExecutablePropertiesKHR, &out, props) {
props->stages = mesa_to_vk_shader_stage(stage);
props->subgroupSize = 32;
WRITE_STR(props->name, "%s", _mesa_shader_stage_to_string(stage));
WRITE_STR(props->description, "%s shader",
_mesa_shader_stage_to_string(stage));
}
}
return vk_outarray_status(&out);
}
static struct nvk_shader *
shader_for_exe_idx(struct nvk_pipeline *pipeline, uint32_t idx)
{
for (gl_shader_stage stage = 0; stage < MESA_SHADER_STAGES; stage++) {
const struct nvk_shader *shader = pipeline->shaders[stage];
if (!shader || shader->code_size == 0)
continue;
if (idx == 0)
return pipeline->shaders[stage];
idx--;
}
return NULL;
}
VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetPipelineExecutableStatisticsKHR(
VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount,
VkPipelineExecutableStatisticKHR *pStatistics)
{
VK_FROM_HANDLE(nvk_pipeline, pipeline, pExecutableInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableStatisticKHR, out,
pStatistics, pStatisticCount);
struct nvk_shader *shader =
shader_for_exe_idx(pipeline, pExecutableInfo->executableIndex);
vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
WRITE_STR(stat->name, "Code Size");
WRITE_STR(stat->description,
"Size of the compiled shader binary, in bytes");
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
stat->value.u64 = shader->code_size;
}
vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
WRITE_STR(stat->name, "Number of GPRs");
WRITE_STR(stat->description, "Number of GPRs used by this pipeline");
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
stat->value.u64 = shader->info.num_gprs;
}
vk_outarray_append_typed(VkPipelineExecutableStatisticKHR, &out, stat) {
WRITE_STR(stat->name, "SLM Size");
WRITE_STR(stat->description,
"Size of shader local (scratch) memory, in bytes");
stat->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
stat->value.u64 = shader->info.slm_size;
}
return vk_outarray_status(&out);
}
static bool
write_ir_text(VkPipelineExecutableInternalRepresentationKHR* ir,
const char *data)
{
ir->isText = VK_TRUE;
size_t data_len = strlen(data) + 1;
if (ir->pData == NULL) {
ir->dataSize = data_len;
return true;
}
strncpy(ir->pData, data, ir->dataSize);
if (ir->dataSize < data_len)
return false;
ir->dataSize = data_len;
return true;
}
VKAPI_ATTR VkResult VKAPI_CALL
nvk_GetPipelineExecutableInternalRepresentationsKHR(
VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
{
VK_FROM_HANDLE(nvk_pipeline, pipeline, pExecutableInfo->pipeline);
VK_OUTARRAY_MAKE_TYPED(VkPipelineExecutableInternalRepresentationKHR, out,
pInternalRepresentations,
pInternalRepresentationCount);
bool incomplete_text = false;
struct nvk_shader *shader =
shader_for_exe_idx(pipeline, pExecutableInfo->executableIndex);
if (shader->nak != NULL && shader->nak->asm_str != NULL) {
vk_outarray_append_typed(VkPipelineExecutableInternalRepresentationKHR, &out, ir) {
WRITE_STR(ir->name, "NAK assembly");
WRITE_STR(ir->description, "NAK assembly");
if (!write_ir_text(ir, shader->nak->asm_str))
incomplete_text = true;
}
}
return incomplete_text ? VK_INCOMPLETE : vk_outarray_status(&out);
}