nvk: Add a ws_dev to nvk_device and use it

At the moment, this is just a convenient short-hand but we're about to
permanently move the nouveau_ws_device to nvk_device so it will be the
only way to access it soon.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24326>
This commit is contained in:
Faith Ekstrand 2023-07-20 12:49:34 -05:00 committed by Marge Bot
parent e2bdbc2151
commit be5d6c7cbe
13 changed files with 42 additions and 41 deletions

View file

@ -52,7 +52,7 @@ nvk_bo_sync_init(struct vk_device *vk_dev,
sync->state = initial_value ? NVK_BO_SYNC_STATE_SIGNALED :
NVK_BO_SYNC_STATE_RESET;
sync->bo = nouveau_ws_bo_new(dev->pdev->dev, 0x1000, 0,
sync->bo = nouveau_ws_bo_new(dev->ws_dev, 0x1000, 0,
NOUVEAU_WS_BO_GART);
if (!sync->bo)
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);

View file

@ -15,7 +15,7 @@ nvk_cmd_bo_create(struct nvk_cmd_pool *pool, struct nvk_cmd_bo **bo_out)
return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP;
bo->bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, NVK_CMD_BO_SIZE, 0,
bo->bo = nouveau_ws_bo_new_mapped(dev->ws_dev, NVK_CMD_BO_SIZE, 0,
flags, NOUVEAU_WS_BO_WR, &bo->map);
if (bo->bo == NULL) {
vk_free(&pool->vk.alloc, bo);

View file

@ -424,7 +424,7 @@ nvk_CreateDescriptorPool(VkDevice _device,
if (bo_size) {
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP;
pool->bo = nouveau_ws_bo_new(device->pdev->dev, bo_size, 0, flags);
pool->bo = nouveau_ws_bo_new(device->ws_dev, bo_size, 0, flags);
if (!pool->bo) {
nvk_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);

View file

@ -10,7 +10,6 @@ nvk_descriptor_table_grow_locked(struct nvk_device *dev,
struct nvk_descriptor_table *table,
uint32_t new_alloc)
{
struct nvk_physical_device *pdev = nvk_device_physical(dev);
struct nouveau_ws_bo *new_bo;
void *new_map;
uint32_t *new_free_table;
@ -18,7 +17,7 @@ nvk_descriptor_table_grow_locked(struct nvk_device *dev,
assert(new_alloc > table->alloc && new_alloc <= table->max_alloc);
const uint32_t new_bo_size = new_alloc * table->desc_size;
new_bo = nouveau_ws_bo_new(pdev->dev, new_bo_size, 256,
new_bo = nouveau_ws_bo_new(dev->ws_dev, new_bo_size, 256,
NOUVEAU_WS_BO_LOCAL | NOUVEAU_WS_BO_MAP);
if (new_bo == NULL) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,

View file

@ -81,7 +81,7 @@ nvk_slm_area_ensure(struct nvk_device *dev,
if (likely(bytes_per_mp <= area->bytes_per_mp))
return VK_SUCCESS;
uint64_t size = bytes_per_mp * dev->pdev->dev->mp_count;
uint64_t size = bytes_per_mp * dev->ws_dev->mp_count;
/* The hardware seems to require this alignment for
* NV9097_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER.
@ -89,7 +89,7 @@ nvk_slm_area_ensure(struct nvk_device *dev,
size = ALIGN(size, 0x20000);
struct nouveau_ws_bo *bo =
nouveau_ws_bo_new(dev->pdev->dev, size, 0, NOUVEAU_WS_BO_LOCAL);
nouveau_ws_bo_new(dev->ws_dev, size, 0, NOUVEAU_WS_BO_LOCAL);
if (bo == NULL)
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
@ -139,11 +139,12 @@ nvk_CreateDevice(VkPhysicalDevice physicalDevice,
if (result != VK_SUCCESS)
goto fail_alloc;
vk_device_set_drm_fd(&dev->vk, pdev->dev->fd);
vk_device_set_drm_fd(&dev->vk, pdev->ws_dev->fd);
dev->vk.command_buffer_ops = &nvk_cmd_buffer_ops;
dev->pdev = pdev;
dev->ws_dev = pdev->ws_dev;
int ret = nouveau_ws_context_create(pdev->dev, &dev->ws_ctx);
int ret = nouveau_ws_context_create(dev->ws_dev, &dev->ws_ctx);
if (ret) {
if (ret == -ENOSPC)
result = vk_error(dev, VK_ERROR_TOO_MANY_OBJECTS);
@ -216,9 +217,9 @@ nvk_CreateDevice(VkPhysicalDevice physicalDevice,
pthread_condattr_destroy(&condattr);
void *zero_map;
dev->zero_page = nouveau_ws_bo_new_mapped(dev->pdev->dev, 0x1000, 0,
NOUVEAU_WS_BO_LOCAL,
NOUVEAU_WS_BO_WR, &zero_map);
dev->zero_page = nouveau_ws_bo_new_mapped(dev->ws_dev, 0x1000, 0,
NOUVEAU_WS_BO_LOCAL,
NOUVEAU_WS_BO_WR, &zero_map);
if (dev->zero_page == NULL)
goto fail_queue_submit;
@ -228,7 +229,7 @@ nvk_CreateDevice(VkPhysicalDevice physicalDevice,
if (dev->pdev->info.cls_eng3d >= FERMI_A &&
dev->pdev->info.cls_eng3d < MAXWELL_A) {
/* max size is 256k */
dev->vab_memory = nouveau_ws_bo_new(dev->pdev->dev, 1 << 17, 1 << 20,
dev->vab_memory = nouveau_ws_bo_new(dev->ws_dev, 1 << 17, 1 << 20,
NOUVEAU_WS_BO_LOCAL);
if (dev->vab_memory == NULL)
goto fail_zero_page;

View file

@ -29,6 +29,7 @@ struct nvk_device {
struct vk_device vk;
struct nvk_physical_device *pdev;
struct nouveau_ws_device *ws_dev;
struct nouveau_ws_context *ws_ctx;
/* Protected by nvk_device::mutex */

View file

@ -120,7 +120,7 @@ nvk_GetMemoryFdPropertiesKHR(VkDevice device,
switch (handleType) {
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
bo = nouveau_ws_bo_from_dma_buf(pdev->dev, fd);
bo = nouveau_ws_bo_from_dma_buf(dev->ws_dev, fd);
if (bo == NULL)
return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
break;
@ -181,14 +181,14 @@ nvk_allocate_memory(struct nvk_device *dev,
fd_info->handleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
mem->bo = nouveau_ws_bo_from_dma_buf(pdev->dev, fd_info->fd);
mem->bo = nouveau_ws_bo_from_dma_buf(dev->ws_dev, fd_info->fd);
if (mem->bo == NULL) {
result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
goto fail_alloc;
}
assert(!(flags & ~mem->bo->flags));
} else if (tile_info) {
mem->bo = nouveau_ws_bo_new_tiled(pdev->dev,
mem->bo = nouveau_ws_bo_new_tiled(dev->ws_dev,
pAllocateInfo->allocationSize, 0,
tile_info->pte_kind,
tile_info->tile_mode,
@ -198,14 +198,14 @@ nvk_allocate_memory(struct nvk_device *dev,
goto fail_alloc;
}
} else {
mem->bo = nouveau_ws_bo_new(pdev->dev, aligned_size, alignment, flags);
mem->bo = nouveau_ws_bo_new(dev->ws_dev, aligned_size, alignment, flags);
if (!mem->bo) {
result = vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
goto fail_alloc;
}
}
if (pdev->dev->debug_flags & NVK_DEBUG_ZERO_MEMORY) {
if (dev->ws_dev->debug_flags & NVK_DEBUG_ZERO_MEMORY) {
if (type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
void *map = nouveau_ws_bo_map(mem->bo, NOUVEAU_WS_BO_RDWR);
if (map == NULL) {

View file

@ -83,7 +83,7 @@ nvk_heap_grow_locked(struct nvk_device *dev, struct nvk_heap *heap)
void *new_bo_map;
struct nouveau_ws_bo *new_bo =
nouveau_ws_bo_new_mapped(dev->pdev->dev,
nouveau_ws_bo_new_mapped(dev->ws_dev,
new_bo_size + heap->overalloc, 0,
heap->bo_flags, heap->map_flags,
&new_bo_map);
@ -163,7 +163,7 @@ nvk_heap_grow_locked(struct nvk_device *dev, struct nvk_heap *heap)
NVK_HEAP_MIN_SIZE << (MAX2(heap->bo_count, 1) - 1);
heap->bos[heap->bo_count].bo =
nouveau_ws_bo_new_mapped(dev->pdev->dev,
nouveau_ws_bo_new_mapped(dev->ws_dev,
new_bo_size + heap->overalloc, 0,
heap->bo_flags, heap->map_flags,
&heap->bos[heap->bo_count].map);

View file

@ -615,8 +615,8 @@ nvk_create_drm_physical_device(struct vk_instance *_instance,
return VK_ERROR_INCOMPATIBLE_DRIVER;
}
struct nouveau_ws_device *ndev = nouveau_ws_device_new(drm_device);
if (!ndev)
struct nouveau_ws_device *ws_dev = nouveau_ws_device_new(drm_device);
if (!ws_dev)
return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
vk_warn_non_conformant_implementation("NVK");
@ -637,10 +637,10 @@ nvk_create_drm_physical_device(struct vk_instance *_instance,
&dispatch_table, &wsi_physical_device_entrypoints, false);
struct vk_device_extension_table supported_extensions;
nvk_get_device_extensions(&ndev->info, &supported_extensions);
nvk_get_device_extensions(&ws_dev->info, &supported_extensions);
struct vk_features supported_features;
nvk_get_device_features(&ndev->info, &supported_features);
nvk_get_device_features(&ws_dev->info, &supported_features);
result = vk_physical_device_init(&pdev->vk, &instance->vk,
&supported_extensions,
@ -650,8 +650,8 @@ nvk_create_drm_physical_device(struct vk_instance *_instance,
if (result != VK_SUCCESS)
goto fail_alloc;
pdev->dev = ndev;
pdev->info = ndev->info;
pdev->ws_dev = ws_dev;
pdev->info = ws_dev->info;
const struct {
uint16_t vendor_id;
@ -714,7 +714,7 @@ fail_init:
fail_alloc:
vk_free(&instance->vk.alloc, pdev);
fail_dev_alloc:
nouveau_ws_device_destroy(ndev);
nouveau_ws_device_destroy(ws_dev);
return result;
}
@ -725,7 +725,7 @@ nvk_physical_device_destroy(struct vk_physical_device *vk_pdev)
container_of(vk_pdev, struct nvk_physical_device, vk);
nvk_finish_wsi(pdev);
nouveau_ws_device_destroy(pdev->dev);
nouveau_ws_device_destroy(pdev->ws_dev);
vk_physical_device_finish(&pdev->vk);
vk_free(&pdev->vk.instance->alloc, pdev);
}

View file

@ -14,7 +14,7 @@ struct nvk_instance;
struct nvk_physical_device {
struct vk_physical_device vk;
struct nouveau_ws_device *dev;
struct nouveau_ws_device *ws_dev;
struct nv_device_info info;
struct wsi_device wsi_device;

View file

@ -62,7 +62,7 @@ nvk_CreateQueryPool(VkDevice device,
if (pool->vk.query_count > 0) {
uint32_t bo_size = pool->query_start +
pool->query_stride * pool->vk.query_count;
pool->bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, bo_size, 0,
pool->bo = nouveau_ws_bo_new_mapped(dev->ws_dev, bo_size, 0,
NOUVEAU_WS_BO_GART,
NOUVEAU_WS_BO_RDWR,
&pool->bo_map);
@ -71,7 +71,7 @@ nvk_CreateQueryPool(VkDevice device,
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
if (dev->pdev->dev->debug_flags & NVK_DEBUG_ZERO_MEMORY)
if (dev->ws_dev->debug_flags & NVK_DEBUG_ZERO_MEMORY)
memset(pool->bo_map, 0, bo_size);
}

View file

@ -123,7 +123,7 @@ nvk_queue_state_update(struct nvk_device *dev,
struct nouveau_ws_bo *push_bo;
void *push_map;
push_bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, 256 * 4, 0,
push_bo = nouveau_ws_bo_new_mapped(dev->ws_dev, 256 * 4, 0,
NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP,
NOUVEAU_WS_BO_WR, &push_map);
if (push_bo == NULL)
@ -282,12 +282,12 @@ nvk_queue_submit(struct vk_queue *vk_queue,
"pointers pushbuf");
}
const bool sync = dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
const bool sync = dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
result = nvk_queue_submit_drm_nouveau(queue, submit, sync);
if ((sync && result != VK_SUCCESS) ||
(dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
(dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
nvk_queue_state_dump_push(dev, &queue->state, stderr);
for (unsigned i = 0; i < submit->command_buffer_count; i++) {
@ -320,7 +320,7 @@ nvk_queue_init(struct nvk_device *dev, struct nvk_queue *queue,
queue->vk.driver_submit = nvk_queue_submit;
void *empty_push_map;
queue->empty_push = nouveau_ws_bo_new_mapped(dev->pdev->dev, 4096, 0,
queue->empty_push = nouveau_ws_bo_new_mapped(dev->ws_dev, 4096, 0,
NOUVEAU_WS_BO_GART |
NOUVEAU_WS_BO_MAP,
NOUVEAU_WS_BO_WR,
@ -376,7 +376,7 @@ nvk_queue_submit_simple(struct nvk_queue *queue,
return VK_ERROR_DEVICE_LOST;
void *push_map;
push_bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, dw_count * 4, 0,
push_bo = nouveau_ws_bo_new_mapped(dev->ws_dev, dw_count * 4, 0,
NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP,
NOUVEAU_WS_BO_WR, &push_map);
if (push_bo == NULL)
@ -384,14 +384,14 @@ nvk_queue_submit_simple(struct nvk_queue *queue,
memcpy(push_map, dw, dw_count * 4);
const bool debug_sync = dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
const bool debug_sync = dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
result = nvk_queue_submit_simple_drm_nouveau(queue, dw_count, push_bo,
extra_bo_count, extra_bos,
sync || debug_sync);
if ((debug_sync && result != VK_SUCCESS) ||
(dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
(dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
struct nv_push push = {
.start = (uint32_t *)dw,
.end = (uint32_t *)dw + dw_count,

View file

@ -39,7 +39,7 @@ push_add_bo(struct push_builder *pb,
{
const uint32_t domain = (bo->flags & NOUVEAU_WS_BO_GART) ?
NOUVEAU_GEM_DOMAIN_GART :
pb->dev->pdev->dev->local_mem_domain;
pb->dev->ws_dev->local_mem_domain;
for (uint32_t i = 0; i < pb->req.nr_buffers; i++) {
if (pb->req_bo[i].handle == bo->handle) {
@ -88,7 +88,7 @@ push_add_push(struct push_builder *pb, struct nouveau_ws_bo *bo,
static VkResult
push_submit(struct push_builder *pb, struct nvk_queue *queue, bool sync)
{
int err = drmCommandWriteRead(pb->dev->pdev->dev->fd,
int err = drmCommandWriteRead(pb->dev->ws_dev->fd,
DRM_NOUVEAU_GEM_PUSHBUF,
&pb->req, sizeof(pb->req));
if (err) {
@ -100,7 +100,7 @@ push_submit(struct push_builder *pb, struct nvk_queue *queue, bool sync)
struct drm_nouveau_gem_cpu_prep req = {};
req.handle = pb->req_bo[0].handle;
req.flags = NOUVEAU_GEM_CPU_PREP_WRITE;
err = drmCommandWrite(pb->dev->pdev->dev->fd,
err = drmCommandWrite(pb->dev->ws_dev->fd,
DRM_NOUVEAU_GEM_CPU_PREP,
&req, sizeof(req));
if (err) {