nvk: Simplify mme build function argument

We do not need the full nvk_device struct for the mme builder functions,
just the nv_device_info is enough. We keep a pointer in the
mme_builder so we can use this instead. Also, when we run mme builder
tests we do not need to initialize a device struct.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24326>
This commit is contained in:
George Ouzounoudis 2023-01-26 19:39:28 +02:00 committed by Marge Bot
parent ffd896852b
commit 09358fc0c1
6 changed files with 38 additions and 49 deletions

View file

@ -12,8 +12,7 @@
#include "nvk_cl9097.h"
#include "drf.h"
void nvk_mme_clear_views(struct nvk_device *dev,
struct mme_builder *b)
void nvk_mme_clear_views(struct mme_builder *b)
{
struct mme_value payload = mme_load(b);
struct mme_value view_mask = mme_load(b);
@ -32,8 +31,7 @@ void nvk_mme_clear_views(struct nvk_device *dev,
}
}
void nvk_mme_clear_layers(struct nvk_device *dev,
struct mme_builder *b)
void nvk_mme_clear_layers(struct mme_builder *b)
{
struct mme_value payload = mme_load(b);
struct mme_value layer_count = mme_load(b);

View file

@ -54,9 +54,9 @@ nvc0c0_qmd_set_dispatch_size(UNUSED struct nvk_device *dev, uint32_t *qmd,
}
static uint32_t
qmd_dispatch_size_offset(struct nvk_device *dev)
qmd_dispatch_size_offset(const struct nv_device_info *devinfo)
{
assert(dev->pdev->info.cls_compute >= VOLTA_COMPUTE_A);
assert(devinfo->cls_compute >= VOLTA_COMPUTE_A);
uint32_t bit = DRF_LO(DRF_MW(NVC3C0_QMDV02_02_CTA_RASTER_WIDTH));
assert(bit % 32 == 0);
assert(DRF_LO(DRF_MW(NVC3C0_QMDV02_02_CTA_RASTER_HEIGHT)) == bit + 32);
@ -186,7 +186,7 @@ nvk_build_mme_add_cs_invocations(struct mme_builder *b,
}
void
nvk_mme_add_cs_invocations(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_add_cs_invocations(struct mme_builder *b)
{
struct mme_value count_hi = mme_load(b);
struct mme_value count_lo = mme_load(b);
@ -276,9 +276,9 @@ mme_store_global_vec3(struct mme_builder *b,
}
void
nvk_mme_dispatch_indirect(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_dispatch_indirect(struct mme_builder *b)
{
if (dev->pdev->info.cls_eng3d < TURING_A)
if (b->devinfo->cls_eng3d < TURING_A)
return;
struct mme_value local_size = mme_load(b);
@ -288,7 +288,7 @@ nvk_mme_dispatch_indirect(struct nvk_device *dev, struct mme_builder *b)
mme_tu104_read_fifoed(b, dispatch_addr, mme_imm(3));
uint32_t qmd_size_offset = qmd_dispatch_size_offset(dev);
uint32_t qmd_size_offset = qmd_dispatch_size_offset(b->devinfo);
uint32_t root_desc_size_offset =
offsetof(struct nvk_root_descriptor_table, cs.group_count);

View file

@ -73,7 +73,7 @@ nvk_queue_init_context_draw_state(struct nvk_queue *queue)
for (uint32_t mme = 0, mme_pos = 0; mme < NVK_MME_COUNT; mme++) {
size_t size;
uint32_t *dw = nvk_build_mme(dev, mme, &size);
uint32_t *dw = nvk_build_mme(&nvk_device_physical(dev)->info, mme, &size);
if (dw == NULL)
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
@ -1568,7 +1568,7 @@ nvk_mme_build_draw(struct mme_builder *b, struct mme_value begin)
}
void
nvk_mme_draw(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_draw(struct mme_builder *b)
{
struct mme_value begin = mme_load(b);
@ -1672,7 +1672,7 @@ nvk_mme_build_draw_indexed(struct mme_builder *b,
}
void
nvk_mme_draw_indexed(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_draw_indexed(struct mme_builder *b)
{
struct mme_value begin = mme_load(b);
@ -1731,11 +1731,11 @@ nvk_mme_fill(struct mme_builder *b, uint16_t idx)
}
void
nvk_mme_draw_indirect(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_draw_indirect(struct mme_builder *b)
{
struct mme_value begin = mme_load(b);
if (dev->pdev->info.cls_eng3d >= TURING_A) {
if (b->devinfo->cls_eng3d >= TURING_A) {
struct mme_value64 draw_addr = mme_load_addr64(b);
struct mme_value draw_count = mme_load(b);
struct mme_value stride = mme_load(b);
@ -1846,11 +1846,11 @@ nvk_CmdDrawIndirect(VkCommandBuffer commandBuffer,
}
void
nvk_mme_draw_indexed_indirect(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_draw_indexed_indirect(struct mme_builder *b)
{
struct mme_value begin = mme_load(b);
if (dev->pdev->info.cls_eng3d >= TURING_A) {
if (b->devinfo->cls_eng3d >= TURING_A) {
struct mme_value64 draw_addr = mme_load_addr64(b);
struct mme_value draw_count = mme_load(b);
struct mme_value stride = mme_load(b);

View file

@ -1,7 +1,6 @@
#include "nvk_mme.h"
#include "nvk_device.h"
#include "nvk_physical_device.h"
#include "nvk_private.h"
static const nvk_mme_builder_func mme_builders[NVK_MME_COUNT] = {
[NVK_MME_CLEAR_VIEWS] = nvk_mme_clear_views,
@ -17,12 +16,13 @@ static const nvk_mme_builder_func mme_builders[NVK_MME_COUNT] = {
};
uint32_t *
nvk_build_mme(struct nvk_device *dev, enum nvk_mme mme, size_t *size_out)
nvk_build_mme(const struct nv_device_info *devinfo,
enum nvk_mme mme, size_t *size_out)
{
struct mme_builder b;
mme_builder_init(&b, &nvk_device_physical(dev)->info);
mme_builder_init(&b, devinfo);
mme_builders[mme](dev, &b);
mme_builders[mme](&b);
return mme_builder_finish(&b, size_out);
}
@ -30,16 +30,9 @@ nvk_build_mme(struct nvk_device *dev, enum nvk_mme mme, size_t *size_out)
void
nvk_test_build_all_mmes(const struct nv_device_info *devinfo)
{
struct nvk_physical_device pdev = { .info = *devinfo };
vk_object_base_init(NULL, &pdev.vk.base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
struct nvk_device dev = { .pdev = &pdev };
vk_object_base_init(NULL, &dev.vk.base, VK_OBJECT_TYPE_DEVICE);
dev.vk.physical = &pdev.vk;
for (uint32_t mme = 0; mme < NVK_MME_COUNT; mme++) {
size_t size;
uint32_t *dw = nvk_build_mme(&dev, mme, &size);
uint32_t *dw = nvk_build_mme(devinfo, mme, &size);
assert(dw != NULL);
free(dw);
}

View file

@ -3,7 +3,7 @@
#include "mme_builder.h"
struct nvk_device;
struct nv_device_info;
enum nvk_mme {
NVK_MME_CLEAR_VIEWS,
@ -27,24 +27,22 @@ enum nvk_mme_scratch {
NVK_MME_NUM_SCRATCH,
};
typedef void (*nvk_mme_builder_func)(struct nvk_device *dev,
struct mme_builder *b);
typedef void (*nvk_mme_builder_func)(struct mme_builder *b);
uint32_t *nvk_build_mme(struct nvk_device *dev, enum nvk_mme mme,
size_t *size_out);
uint32_t *nvk_build_mme(const struct nv_device_info *devinfo,
enum nvk_mme mme, size_t *size_out);
void nvk_test_build_all_mmes(const struct nv_device_info *devinfo);
void nvk_mme_clear_views(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_clear_layers(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_draw(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_draw_indexed(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_draw_indirect(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_draw_indexed_indirect(struct nvk_device *dev,
struct mme_builder *b);
void nvk_mme_add_cs_invocations(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_dispatch_indirect(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_write_cs_invocations(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_copy_queries(struct nvk_device *dev, struct mme_builder *b);
void nvk_mme_clear_views(struct mme_builder *b);
void nvk_mme_clear_layers(struct mme_builder *b);
void nvk_mme_draw(struct mme_builder *b);
void nvk_mme_draw_indexed(struct mme_builder *b);
void nvk_mme_draw_indirect(struct mme_builder *b);
void nvk_mme_draw_indexed_indirect(struct mme_builder *b);
void nvk_mme_add_cs_invocations(struct mme_builder *b);
void nvk_mme_dispatch_indirect(struct mme_builder *b);
void nvk_mme_write_cs_invocations(struct mme_builder *b);
void nvk_mme_copy_queries(struct mme_builder *b);
#endif /* NVK_MME_H */

View file

@ -289,7 +289,7 @@ mme_store_global(struct mme_builder *b,
}
void
nvk_mme_write_cs_invocations(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_write_cs_invocations(struct mme_builder *b)
{
struct mme_value64 dst_addr = mme_load_addr64(b);
@ -534,9 +534,9 @@ nvk_GetQueryPoolResults(VkDevice device,
}
void
nvk_mme_copy_queries(struct nvk_device *dev, struct mme_builder *b)
nvk_mme_copy_queries(struct mme_builder *b)
{
if (dev->pdev->info.cls_eng3d < TURING_A)
if (b->devinfo->cls_eng3d < TURING_A)
return;
struct mme_value64 dst_addr = mme_load_addr64(b);