nvk/nvkmd: Re-implement NVK_DEBUG=vm

This new implementation is hooked at the nvkmd level and also works for
queued binds via a bind ctx.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30033>
This commit is contained in:
Faith Ekstrand 2024-07-05 18:00:40 -05:00 committed by Marge Bot
parent 0abd7fa58a
commit 7bd9b9d96c
4 changed files with 150 additions and 47 deletions

View file

@ -189,10 +189,17 @@ nvkmd_nouveau_mem_export_dma_buf(struct nvkmd_mem *_mem,
return VK_SUCCESS;
}
static uint32_t
nvkmd_nouveau_mem_log_handle(struct nvkmd_mem *_mem)
{
return nvkmd_nouveau_mem(_mem)->bo->handle;
}
const struct nvkmd_mem_ops nvkmd_nouveau_mem_ops = {
.free = nvkmd_nouveau_mem_free,
.map = nvkmd_nouveau_mem_map,
.unmap = nvkmd_nouveau_mem_unmap,
.overmap = nvkmd_nouveau_mem_overmap,
.export_dma_buf = nvkmd_nouveau_mem_export_dma_buf,
.log_handle = nvkmd_nouveau_mem_log_handle,
};

View file

@ -96,6 +96,7 @@ nvkmd_nouveau_try_create_pdev(struct _drmDevice *drm_device,
}
pdev->base.ops = &nvkmd_nouveau_pdev_ops;
pdev->base.debug_flags = debug_flags;
pdev->base.dev_info = ws_dev->info;
pdev->base.kmd_info = (struct nvkmd_info) {
.has_dma_buf = true,

View file

@ -6,6 +6,8 @@
#include "nvkmd.h"
#include "nouveau/nvkmd_nouveau.h"
#include <inttypes.h>
VkResult
nvkmd_try_create_pdev_for_drm(struct _drmDevice *drm_device,
struct vk_object_base *log_obj,
@ -44,6 +46,131 @@ nvkmd_dev_alloc_mapped_mem(struct nvkmd_dev *dev,
return VK_SUCCESS;
}
VkResult MUST_CHECK
nvkmd_dev_alloc_va(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
enum nvkmd_va_flags flags, uint8_t pte_kind,
uint64_t size_B, uint64_t align_B,
uint64_t fixed_addr, struct nvkmd_va **va_out)
{
VkResult result = dev->ops->alloc_va(dev, log_obj, flags, pte_kind,
size_B, align_B, fixed_addr, va_out);
if (result != VK_SUCCESS)
return result;
if (unlikely(dev->pdev->debug_flags & NVK_DEBUG_VM)) {
const char *sparse = (flags & NVKMD_VA_SPARSE) ? " sparse" : "";
fprintf(stderr, "alloc va [0x%" PRIx64 ", 0x%" PRIx64 ")%s\n",
(*va_out)->addr, (*va_out)->addr + size_B, sparse);
}
return VK_SUCCESS;
}
void
nvkmd_va_free(struct nvkmd_va *va)
{
if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM)) {
const char *sparse = (va->flags & NVKMD_VA_SPARSE) ? " sparse" : "";
fprintf(stderr, "free va [0x%" PRIx64 ", 0x%" PRIx64 ")%s\n",
va->addr, va->addr + va->size_B, sparse);
}
va->ops->free(va);
}
static inline void
log_va_bind_mem(struct nvkmd_va *va,
uint64_t va_offset_B,
struct nvkmd_mem *mem,
uint64_t mem_offset_B,
uint64_t range_B)
{
fprintf(stderr, "bind vma mem<0x%" PRIx32 ">"
"[0x%" PRIx64 ", 0x%" PRIx64 ") to "
"[0x%" PRIx64 ", 0x%" PRIx64 ")\n",
mem->ops->log_handle(mem),
mem_offset_B, mem_offset_B + range_B,
va->addr, va->addr + range_B);
}
static inline void
log_va_unbind(struct nvkmd_va *va,
uint64_t va_offset_B,
uint64_t range_B)
{
fprintf(stderr, "unbind vma [0x%" PRIx64 ", 0x%" PRIx64 ")\n",
va->addr, va->addr + range_B);
}
VkResult MUST_CHECK
nvkmd_va_bind_mem(struct nvkmd_va *va,
struct vk_object_base *log_obj,
uint64_t va_offset_B,
struct nvkmd_mem *mem,
uint64_t mem_offset_B,
uint64_t range_B)
{
assert(va_offset_B <= va->size_B);
assert(va_offset_B + range_B <= va->size_B);
assert(mem_offset_B <= mem->size_B);
assert(mem_offset_B + range_B <= mem->size_B);
if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM))
log_va_bind_mem(va, va_offset_B, mem, mem_offset_B, range_B);
return va->ops->bind_mem(va, log_obj, va_offset_B,
mem, mem_offset_B, range_B);
}
VkResult MUST_CHECK
nvkmd_va_unbind(struct nvkmd_va *va,
struct vk_object_base *log_obj,
uint64_t va_offset_B,
uint64_t range_B)
{
assert(va_offset_B <= va->size_B);
assert(va_offset_B + range_B <= va->size_B);
if (unlikely(va->dev->pdev->debug_flags & NVK_DEBUG_VM))
log_va_unbind(va, va_offset_B, range_B);
return va->ops->unbind(va, log_obj, va_offset_B, range_B);
}
VkResult MUST_CHECK
nvkmd_ctx_bind(struct nvkmd_ctx *ctx,
struct vk_object_base *log_obj,
uint32_t bind_count,
const struct nvkmd_ctx_bind *binds)
{
for (uint32_t i = 0; i < bind_count; i++) {
assert(binds[i].va_offset_B <= binds[i].va->size_B);
assert(binds[i].va_offset_B + binds[i].range_B <= binds[i].va->size_B);
if (binds[i].op == NVKMD_BIND_OP_BIND) {
assert(binds[i].mem_offset_B <= binds[i].mem->size_B);
assert(binds[i].mem_offset_B + binds[i].range_B <=
binds[i].mem->size_B);
} else {
assert(binds[i].mem == NULL);
}
}
if (unlikely(ctx->dev->pdev->debug_flags & NVK_DEBUG_VM)) {
for (uint32_t i = 0; i < bind_count; i++) {
if (binds[i].op == NVKMD_BIND_OP_BIND) {
log_va_bind_mem(binds[i].va, binds[i].va_offset_B,
binds[i].mem, binds[i].mem_offset_B,
binds[i].range_B);
} else {
log_va_unbind(binds[i].va, binds[i].va_offset_B, binds[i].range_B);
}
}
}
return ctx->ops->bind(ctx, log_obj, bind_count, binds);
}
void
nvkmd_mem_unref(struct nvkmd_mem *mem)
{

View file

@ -98,6 +98,8 @@ struct nvkmd_pdev_ops {
struct nvkmd_pdev {
const struct nvkmd_pdev_ops *ops;
enum nvk_debug debug_flags;
struct nv_device_info dev_info;
struct nvkmd_info kmd_info;
@ -166,6 +168,9 @@ struct nvkmd_mem_ops {
VkResult (*export_dma_buf)(struct nvkmd_mem *mem,
struct vk_object_base *log_obj,
int *fd_out);
/** Handle to use for NVK_DEBUG_VM logging */
uint32_t (*log_handle)(struct nvkmd_mem *mem);
};
struct nvkmd_mem {
@ -381,16 +386,12 @@ nvkmd_dev_import_dma_buf(struct nvkmd_dev *dev,
return dev->ops->import_dma_buf(dev, log_obj, fd, mem_out);
}
static inline VkResult MUST_CHECK
VkResult MUST_CHECK
nvkmd_dev_alloc_va(struct nvkmd_dev *dev,
struct vk_object_base *log_obj,
enum nvkmd_va_flags flags, uint8_t pte_kind,
uint64_t size_B, uint64_t align_B,
uint64_t fixed_addr, struct nvkmd_va **va_out)
{
return dev->ops->alloc_va(dev, log_obj, flags, pte_kind, size_B, align_B,
fixed_addr, va_out);
}
uint64_t fixed_addr, struct nvkmd_va **va_out);
static inline VkResult MUST_CHECK
nvkmd_dev_create_ctx(struct nvkmd_dev *dev,
@ -453,40 +454,22 @@ nvkmd_mem_export_dma_buf(struct nvkmd_mem *mem,
return mem->ops->export_dma_buf(mem, log_obj, fd_out);
}
static inline void
nvkmd_va_free(struct nvkmd_va *va)
{
va->ops->free(va);
}
void
nvkmd_va_free(struct nvkmd_va *va);
static inline VkResult MUST_CHECK
VkResult MUST_CHECK
nvkmd_va_bind_mem(struct nvkmd_va *va,
struct vk_object_base *log_obj,
uint64_t va_offset_B,
struct nvkmd_mem *mem,
uint64_t mem_offset_B,
uint64_t range_B)
{
assert(va_offset_B <= va->size_B);
assert(va_offset_B + range_B <= va->size_B);
assert(mem_offset_B <= mem->size_B);
assert(mem_offset_B + range_B <= mem->size_B);
uint64_t range_B);
return va->ops->bind_mem(va, log_obj, va_offset_B,
mem, mem_offset_B, range_B);
}
static inline VkResult MUST_CHECK
VkResult MUST_CHECK
nvkmd_va_unbind(struct nvkmd_va *va,
struct vk_object_base *log_obj,
uint64_t va_offset_B,
uint64_t range_B)
{
assert(va_offset_B <= va->size_B);
assert(va_offset_B + range_B <= va->size_B);
return va->ops->unbind(va, log_obj, va_offset_B, range_B);
}
uint64_t range_B);
static inline void
nvkmd_ctx_destroy(struct nvkmd_ctx *ctx)
@ -512,26 +495,11 @@ nvkmd_ctx_exec(struct nvkmd_ctx *ctx,
return ctx->ops->exec(ctx, log_obj, exec_count, execs);
}
static inline VkResult MUST_CHECK
VkResult MUST_CHECK
nvkmd_ctx_bind(struct nvkmd_ctx *ctx,
struct vk_object_base *log_obj,
uint32_t bind_count,
const struct nvkmd_ctx_bind *binds)
{
for (uint32_t i = 0; i < bind_count; i++) {
assert(binds[i].va_offset_B <= binds[i].va->size_B);
assert(binds[i].va_offset_B + binds[i].range_B <= binds[i].va->size_B);
if (binds[i].op == NVKMD_BIND_OP_BIND) {
assert(binds[i].mem_offset_B <= binds[i].mem->size_B);
assert(binds[i].mem_offset_B + binds[i].range_B <=
binds[i].mem->size_B);
} else {
assert(binds[i].mem == NULL);
}
}
return ctx->ops->bind(ctx, log_obj, bind_count, binds);
}
const struct nvkmd_ctx_bind *binds);
static inline VkResult MUST_CHECK
nvkmd_ctx_signal(struct nvkmd_ctx *ctx,