nvk: Use nvkmd_mem for nvk_cmd_pool

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30033>
This commit is contained in:
Faith Ekstrand 2024-07-01 08:36:03 -05:00 committed by Marge Bot
parent bf8115e3c2
commit 586990f89e
4 changed files with 119 additions and 110 deletions

View file

@ -14,6 +14,7 @@
#include "nvk_mme.h"
#include "nvk_physical_device.h"
#include "nvk_shader.h"
#include "nvkmd/nvkmd.h"
#include "vk_pipeline_layout.h"
#include "vk_synchronization.h"
@ -48,8 +49,8 @@ nvk_destroy_cmd_buffer(struct vk_command_buffer *vk_cmd_buffer)
nvk_descriptor_state_fini(cmd, &cmd->state.gfx.descriptors);
nvk_descriptor_state_fini(cmd, &cmd->state.cs.descriptors);
nvk_cmd_pool_free_bo_list(pool, &cmd->bos);
nvk_cmd_pool_free_bo_list(pool, &cmd->gart_bos);
nvk_cmd_pool_free_mem_list(pool, &cmd->owned_mem);
nvk_cmd_pool_free_mem_list(pool, &cmd->owned_gart_mem);
util_dynarray_fini(&cmd->pushes);
vk_command_buffer_finish(&cmd->vk);
vk_free(&pool->vk.alloc, cmd);
@ -81,8 +82,8 @@ nvk_create_cmd_buffer(struct vk_command_pool *vk_pool,
cmd->vk.dynamic_graphics_state.ms.sample_locations =
&cmd->state.gfx._dynamic_sl;
list_inithead(&cmd->bos);
list_inithead(&cmd->gart_bos);
list_inithead(&cmd->owned_mem);
list_inithead(&cmd->owned_gart_mem);
util_dynarray_init(&cmd->pushes, NULL);
*cmd_buffer_out = &cmd->vk;
@ -103,11 +104,11 @@ nvk_reset_cmd_buffer(struct vk_command_buffer *vk_cmd_buffer,
nvk_descriptor_state_fini(cmd, &cmd->state.gfx.descriptors);
nvk_descriptor_state_fini(cmd, &cmd->state.cs.descriptors);
nvk_cmd_pool_free_bo_list(pool, &cmd->bos);
nvk_cmd_pool_free_gart_bo_list(pool, &cmd->gart_bos);
cmd->upload_bo = NULL;
cmd->push_bo = NULL;
cmd->push_bo_limit = NULL;
nvk_cmd_pool_free_mem_list(pool, &cmd->owned_mem);
nvk_cmd_pool_free_gart_mem_list(pool, &cmd->owned_gart_mem);
cmd->upload_mem = NULL;
cmd->push_mem = NULL;
cmd->push_mem_limit = NULL;
cmd->push = (struct nv_push) {0};
util_dynarray_clear(&cmd->pushes);
@ -125,16 +126,18 @@ const struct vk_command_buffer_ops nvk_cmd_buffer_ops = {
static uint32_t push_runout[NVK_CMD_BUFFER_MAX_PUSH];
static VkResult
nvk_cmd_buffer_alloc_bo(struct nvk_cmd_buffer *cmd, bool force_gart, struct nvk_cmd_bo **bo_out)
nvk_cmd_buffer_alloc_mem(struct nvk_cmd_buffer *cmd, bool force_gart,
struct nvk_cmd_mem **mem_out)
{
VkResult result = nvk_cmd_pool_alloc_bo(nvk_cmd_buffer_pool(cmd), force_gart, bo_out);
VkResult result = nvk_cmd_pool_alloc_mem(nvk_cmd_buffer_pool(cmd),
force_gart, mem_out);
if (result != VK_SUCCESS)
return result;
if (force_gart)
list_addtail(&(*bo_out)->link, &cmd->gart_bos);
list_addtail(&(*mem_out)->link, &cmd->owned_gart_mem);
else
list_addtail(&(*bo_out)->link, &cmd->bos);
list_addtail(&(*mem_out)->link, &cmd->owned_mem);
return VK_SUCCESS;
}
@ -142,13 +145,13 @@ nvk_cmd_buffer_alloc_bo(struct nvk_cmd_buffer *cmd, bool force_gart, struct nvk_
static void
nvk_cmd_buffer_flush_push(struct nvk_cmd_buffer *cmd)
{
if (likely(cmd->push_bo != NULL)) {
const uint32_t bo_offset =
(char *)cmd->push.start - (char *)cmd->push_bo->map;
if (likely(cmd->push_mem != NULL)) {
const uint32_t mem_offset =
(char *)cmd->push.start - (char *)cmd->push_mem->mem->map;
struct nvk_cmd_push push = {
.map = cmd->push.start,
.addr = cmd->push_bo->bo->offset + bo_offset,
.addr = cmd->push_mem->mem->va->addr + mem_offset,
.range = nv_push_dw_count(&cmd->push) * 4,
};
util_dynarray_append(&cmd->pushes, struct nvk_cmd_push, push);
@ -162,16 +165,16 @@ nvk_cmd_buffer_new_push(struct nvk_cmd_buffer *cmd)
{
nvk_cmd_buffer_flush_push(cmd);
VkResult result = nvk_cmd_buffer_alloc_bo(cmd, false, &cmd->push_bo);
VkResult result = nvk_cmd_buffer_alloc_mem(cmd, false, &cmd->push_mem);
if (unlikely(result != VK_SUCCESS)) {
STATIC_ASSERT(NVK_CMD_BUFFER_MAX_PUSH <= NVK_CMD_BO_SIZE / 4);
cmd->push_bo = NULL;
STATIC_ASSERT(NVK_CMD_BUFFER_MAX_PUSH <= NVK_CMD_MEM_SIZE / 4);
cmd->push_mem = NULL;
nv_push_init(&cmd->push, push_runout, 0);
cmd->push_bo_limit = &push_runout[NVK_CMD_BUFFER_MAX_PUSH];
cmd->push_mem_limit = &push_runout[NVK_CMD_BUFFER_MAX_PUSH];
} else {
nv_push_init(&cmd->push, cmd->push_bo->map, 0);
cmd->push_bo_limit =
(uint32_t *)((char *)cmd->push_bo->map + NVK_CMD_BO_SIZE);
nv_push_init(&cmd->push, cmd->push_mem->mem->map, 0);
cmd->push_mem_limit =
(uint32_t *)((char *)cmd->push_mem->mem->map + NVK_CMD_MEM_SIZE);
}
}
@ -196,29 +199,29 @@ nvk_cmd_buffer_upload_alloc(struct nvk_cmd_buffer *cmd,
uint64_t *addr, void **ptr)
{
assert(size % 4 == 0);
assert(size <= NVK_CMD_BO_SIZE);
assert(size <= NVK_CMD_MEM_SIZE);
uint32_t offset = cmd->upload_offset;
if (alignment > 0)
offset = align(offset, alignment);
assert(offset <= NVK_CMD_BO_SIZE);
if (cmd->upload_bo != NULL && size <= NVK_CMD_BO_SIZE - offset) {
*addr = cmd->upload_bo->bo->offset + offset;
*ptr = (char *)cmd->upload_bo->map + offset;
assert(offset <= NVK_CMD_MEM_SIZE);
if (cmd->upload_mem != NULL && size <= NVK_CMD_MEM_SIZE - offset) {
*addr = cmd->upload_mem->mem->va->addr + offset;
*ptr = (char *)cmd->upload_mem->mem->map + offset;
cmd->upload_offset = offset + size;
return VK_SUCCESS;
}
struct nvk_cmd_bo *bo;
VkResult result = nvk_cmd_buffer_alloc_bo(cmd, false, &bo);
struct nvk_cmd_mem *mem;
VkResult result = nvk_cmd_buffer_alloc_mem(cmd, false, &mem);
if (unlikely(result != VK_SUCCESS))
return result;
*addr = bo->bo->offset;
*ptr = bo->map;
*addr = mem->mem->va->addr;
*ptr = mem->mem->map;
/* Pick whichever of the current upload BO and the new BO will have more
* room left to be the BO for the next upload. If our upload size is
@ -226,8 +229,8 @@ nvk_cmd_buffer_upload_alloc(struct nvk_cmd_buffer *cmd,
* upload BO on this one allocation and continuing on the current upload
* BO.
*/
if (cmd->upload_bo == NULL || size < cmd->upload_offset) {
cmd->upload_bo = bo;
if (cmd->upload_mem == NULL || size < cmd->upload_offset) {
cmd->upload_mem = mem;
cmd->upload_offset = size;
}
@ -258,21 +261,21 @@ nvk_cmd_buffer_cond_render_alloc(struct nvk_cmd_buffer *cmd,
uint32_t offset = cmd->cond_render_gart_offset;
uint32_t size = 64;
assert(offset <= NVK_CMD_BO_SIZE);
if (cmd->cond_render_gart_bo != NULL && size <= NVK_CMD_BO_SIZE - offset) {
*addr = cmd->cond_render_gart_bo->bo->offset + offset;
assert(offset <= NVK_CMD_MEM_SIZE);
if (cmd->cond_render_gart_mem != NULL && size <= NVK_CMD_MEM_SIZE - offset) {
*addr = cmd->cond_render_gart_mem->mem->va->addr + offset;
cmd->cond_render_gart_offset = offset + size;
return VK_SUCCESS;
}
struct nvk_cmd_bo *bo;
VkResult result = nvk_cmd_buffer_alloc_bo(cmd, true, &bo);
struct nvk_cmd_mem *mem;
VkResult result = nvk_cmd_buffer_alloc_mem(cmd, true, &mem);
if (unlikely(result != VK_SUCCESS))
return result;
*addr = bo->bo->offset;
*addr = mem->mem->va->addr;
/* Pick whichever of the current upload BO and the new BO will have more
* room left to be the BO for the next upload. If our upload size is
@ -280,8 +283,8 @@ nvk_cmd_buffer_cond_render_alloc(struct nvk_cmd_buffer *cmd,
* upload BO on this one allocation and continuing on the current upload
* BO.
*/
if (cmd->cond_render_gart_bo == NULL || size < cmd->cond_render_gart_offset) {
cmd->cond_render_gart_bo = bo;
if (cmd->cond_render_gart_mem == NULL || size < cmd->cond_render_gart_offset) {
cmd->cond_render_gart_mem = mem;
cmd->cond_render_gart_offset = size;
}

View file

@ -21,7 +21,7 @@
struct nvk_buffer;
struct nvk_cbuf;
struct nvk_cmd_bo;
struct nvk_cmd_mem;
struct nvk_cmd_buffer;
struct nvk_cmd_pool;
struct nvk_image_view;
@ -195,23 +195,23 @@ struct nvk_cmd_buffer {
struct nvk_compute_state cs;
} state;
/** List of nvk_cmd_bo
/** List of nvk_cmd_mem
*
* This list exists entirely for ownership tracking. Everything in here
* must also be in pushes or bo_refs if it is to be referenced by this
* command buffer.
*/
struct list_head bos;
struct list_head gart_bos;
struct list_head owned_mem;
struct list_head owned_gart_mem;
struct nvk_cmd_bo *upload_bo;
struct nvk_cmd_mem *upload_mem;
uint32_t upload_offset;
struct nvk_cmd_bo *cond_render_gart_bo;
struct nvk_cmd_mem *cond_render_gart_mem;
uint32_t cond_render_gart_offset;
struct nvk_cmd_bo *push_bo;
uint32_t *push_bo_limit;
struct nvk_cmd_mem *push_mem;
uint32_t *push_mem_limit;
struct nv_push push;
/** Array of struct nvk_cmd_push
@ -251,7 +251,7 @@ nvk_cmd_buffer_push(struct nvk_cmd_buffer *cmd, uint32_t dw_count)
assert(dw_count <= NVK_CMD_BUFFER_MAX_PUSH);
/* Compare to the actual limit on our push bo */
if (unlikely(cmd->push.end + dw_count > cmd->push_bo_limit))
if (unlikely(cmd->push.end + dw_count > cmd->push_mem_limit))
nvk_cmd_buffer_new_push(cmd);
cmd->push.limit = cmd->push.end + dw_count;

View file

@ -7,38 +7,41 @@
#include "nvk_device.h"
#include "nvk_entrypoints.h"
#include "nvk_physical_device.h"
#include "nvkmd/nvkmd.h"
static VkResult
nvk_cmd_bo_create(struct nvk_cmd_pool *pool, bool force_gart, struct nvk_cmd_bo **bo_out)
nvk_cmd_mem_create(struct nvk_cmd_pool *pool, bool force_gart, struct nvk_cmd_mem **mem_out)
{
struct nvk_device *dev = nvk_cmd_pool_device(pool);
struct nvk_cmd_bo *bo;
struct nvk_cmd_mem *mem;
VkResult result;
bo = vk_zalloc(&pool->vk.alloc, sizeof(*bo), 8,
mem = vk_zalloc(&pool->vk.alloc, sizeof(*mem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (bo == NULL)
if (mem == NULL)
return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP | NOUVEAU_WS_BO_NO_SHARE;
uint32_t flags = NVKMD_MEM_GART | NVKMD_MEM_CAN_MAP | NVKMD_MEM_NO_SHARE;
if (force_gart)
assert(flags & NOUVEAU_WS_BO_GART);
bo->bo = nouveau_ws_bo_new_mapped(dev->ws_dev, NVK_CMD_BO_SIZE, 0,
flags, NOUVEAU_WS_BO_WR, &bo->map);
if (bo->bo == NULL) {
vk_free(&pool->vk.alloc, bo);
return vk_error(pool, VK_ERROR_OUT_OF_DEVICE_MEMORY);
assert(flags & NVKMD_MEM_GART);
result = nvkmd_dev_alloc_mapped_mem(dev->nvkmd, &pool->vk.base,
NVK_CMD_MEM_SIZE, 0,
flags, NVKMD_MEM_MAP_WR,
&mem->mem);
if (result != VK_SUCCESS) {
vk_free(&pool->vk.alloc, mem);
return result;
}
*bo_out = bo;
*mem_out = mem;
return VK_SUCCESS;
}
static void
nvk_cmd_bo_destroy(struct nvk_cmd_pool *pool, struct nvk_cmd_bo *bo)
nvk_cmd_mem_destroy(struct nvk_cmd_pool *pool, struct nvk_cmd_mem *mem)
{
nouveau_ws_bo_unmap(bo->bo, bo->map);
nouveau_ws_bo_destroy(bo->bo);
vk_free(&pool->vk.alloc, bo);
nvkmd_mem_unref(mem->mem);
vk_free(&pool->vk.alloc, mem);
}
VKAPI_ATTR VkResult VKAPI_CALL
@ -62,8 +65,8 @@ nvk_CreateCommandPool(VkDevice _device,
return result;
}
list_inithead(&pool->free_bos);
list_inithead(&pool->free_gart_bos);
list_inithead(&pool->free_mem);
list_inithead(&pool->free_gart_mem);
*pCmdPool = nvk_cmd_pool_to_handle(pool);
@ -71,51 +74,54 @@ nvk_CreateCommandPool(VkDevice _device,
}
static void
nvk_cmd_pool_destroy_bos(struct nvk_cmd_pool *pool)
nvk_cmd_pool_destroy_mem(struct nvk_cmd_pool *pool)
{
list_for_each_entry_safe(struct nvk_cmd_bo, bo, &pool->free_bos, link)
nvk_cmd_bo_destroy(pool, bo);
list_for_each_entry_safe(struct nvk_cmd_mem, mem, &pool->free_mem, link)
nvk_cmd_mem_destroy(pool, mem);
list_inithead(&pool->free_bos);
list_inithead(&pool->free_mem);
list_for_each_entry_safe(struct nvk_cmd_bo, bo, &pool->free_gart_bos, link)
nvk_cmd_bo_destroy(pool, bo);
list_for_each_entry_safe(struct nvk_cmd_mem, mem, &pool->free_gart_mem, link)
nvk_cmd_mem_destroy(pool, mem);
list_inithead(&pool->free_gart_bos);
list_inithead(&pool->free_gart_mem);
}
VkResult
nvk_cmd_pool_alloc_bo(struct nvk_cmd_pool *pool, bool force_gart, struct nvk_cmd_bo **bo_out)
nvk_cmd_pool_alloc_mem(struct nvk_cmd_pool *pool, bool force_gart,
struct nvk_cmd_mem **mem_out)
{
struct nvk_cmd_bo *bo = NULL;
struct nvk_cmd_mem *mem = NULL;
if (force_gart) {
if (!list_is_empty(&pool->free_gart_bos))
bo = list_first_entry(&pool->free_gart_bos, struct nvk_cmd_bo, link);
if (!list_is_empty(&pool->free_gart_mem))
mem = list_first_entry(&pool->free_gart_mem, struct nvk_cmd_mem, link);
} else {
if (!list_is_empty(&pool->free_bos))
bo = list_first_entry(&pool->free_bos, struct nvk_cmd_bo, link);
if (!list_is_empty(&pool->free_mem))
mem = list_first_entry(&pool->free_mem, struct nvk_cmd_mem, link);
}
if (bo) {
list_del(&bo->link);
*bo_out = bo;
if (mem) {
list_del(&mem->link);
*mem_out = mem;
return VK_SUCCESS;
}
return nvk_cmd_bo_create(pool, force_gart, bo_out);
return nvk_cmd_mem_create(pool, force_gart, mem_out);
}
void
nvk_cmd_pool_free_bo_list(struct nvk_cmd_pool *pool, struct list_head *bos)
nvk_cmd_pool_free_mem_list(struct nvk_cmd_pool *pool,
struct list_head *mem_list)
{
list_splicetail(bos, &pool->free_bos);
list_inithead(bos);
list_splicetail(mem_list, &pool->free_mem);
list_inithead(mem_list);
}
void
nvk_cmd_pool_free_gart_bo_list(struct nvk_cmd_pool *pool, struct list_head *bos)
nvk_cmd_pool_free_gart_mem_list(struct nvk_cmd_pool *pool,
struct list_head *mem_list)
{
list_splicetail(bos, &pool->free_gart_bos);
list_inithead(bos);
list_splicetail(mem_list, &pool->free_gart_mem);
list_inithead(mem_list);
}
VKAPI_ATTR void VKAPI_CALL
@ -130,7 +136,7 @@ nvk_DestroyCommandPool(VkDevice _device,
return;
vk_command_pool_finish(&pool->vk);
nvk_cmd_pool_destroy_bos(pool);
nvk_cmd_pool_destroy_mem(pool);
vk_free2(&device->vk.alloc, pAllocator, pool);
}
@ -142,5 +148,5 @@ nvk_TrimCommandPool(VkDevice device,
VK_FROM_HANDLE(nvk_cmd_pool, pool, commandPool);
vk_command_pool_trim(&pool->vk, flags);
nvk_cmd_pool_destroy_bos(pool);
nvk_cmd_pool_destroy_mem(pool);
}

View file

@ -9,13 +9,13 @@
#include "vk_command_pool.h"
#define NVK_CMD_BO_SIZE 64*1024
#define NVK_CMD_MEM_SIZE 64*1024
struct nvkmd_mem;
/* Recyclable command buffer BO, used for both push buffers and upload */
struct nvk_cmd_bo {
struct nouveau_ws_bo *bo;
void *map;
struct nvk_cmd_mem {
struct nvkmd_mem *mem;
/** Link in nvk_cmd_pool::free_bos or nvk_cmd_buffer::bos */
struct list_head link;
@ -24,9 +24,9 @@ struct nvk_cmd_bo {
struct nvk_cmd_pool {
struct vk_command_pool vk;
/** List of nvk_cmd_bo */
struct list_head free_bos;
struct list_head free_gart_bos;
/** List of nvk_cmd_mem */
struct list_head free_mem;
struct list_head free_gart_mem;
};
VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_cmd_pool, vk.base, VkCommandPool,
@ -38,12 +38,12 @@ nvk_cmd_pool_device(struct nvk_cmd_pool *pool)
return (struct nvk_device *)pool->vk.base.device;
}
VkResult nvk_cmd_pool_alloc_bo(struct nvk_cmd_pool *pool,
bool force_gart,
struct nvk_cmd_bo **bo_out);
VkResult nvk_cmd_pool_alloc_mem(struct nvk_cmd_pool *pool,
bool force_gart,
struct nvk_cmd_mem **mem_out);
void nvk_cmd_pool_free_bo_list(struct nvk_cmd_pool *pool,
struct list_head *bos);
void nvk_cmd_pool_free_gart_bo_list(struct nvk_cmd_pool *pool,
struct list_head *bos);
void nvk_cmd_pool_free_mem_list(struct nvk_cmd_pool *pool,
struct list_head *mem_list);
void nvk_cmd_pool_free_gart_mem_list(struct nvk_cmd_pool *pool,
struct list_head *mem_list);
#endif /* NVK_CMD_POOL_H */