tu: Move VMA heap to the logical device

Since last commit drm fd is being created on per logical device
granularity, which means each logical device has its own
address space. So VMA heap could be moved to logical device.

Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18254>
This commit is contained in:
Danylo Piliaiev 2022-10-11 17:51:08 +02:00 committed by Marge Bot
parent 3a8fac0ccd
commit 0df8532777
3 changed files with 26 additions and 27 deletions

View file

@ -351,12 +351,6 @@ tu_physical_device_init(struct tu_physical_device *device,
device->memory.type_count++;
}
if (device->has_set_iova) {
mtx_init(&device->vma_mutex, mtx_plain);
util_vma_heap_init(&device->vma, device->va_start,
ROUND_DOWN_TO(device->va_size, 4096));
}
fd_get_driver_uuid(device->driver_uuid);
fd_get_device_uuid(device->device_uuid, &device->dev_id);
@ -374,7 +368,7 @@ tu_physical_device_init(struct tu_physical_device *device,
NULL,
&dispatch_table);
if (result != VK_SUCCESS)
goto fail_free_vma;
goto fail_free_name;
device->vk.supported_sync_types = device->sync_types;
@ -383,7 +377,7 @@ tu_physical_device_init(struct tu_physical_device *device,
if (result != VK_SUCCESS) {
vk_startup_errorf(instance, result, "WSI init failure");
vk_physical_device_finish(&device->vk);
goto fail_free_vma;
goto fail_free_name;
}
#endif
@ -398,9 +392,6 @@ tu_physical_device_init(struct tu_physical_device *device,
return VK_SUCCESS;
fail_free_vma:
if (device->has_set_iova)
util_vma_heap_finish(&device->vma);
fail_free_name:
vk_free(&instance->vk.alloc, (void *)device->name);
return result;
@ -417,11 +408,7 @@ tu_physical_device_finish(struct tu_physical_device *device)
if (device->master_fd != -1)
close(device->master_fd);
if (device->has_set_iova)
util_vma_heap_finish(&device->vma);
disk_cache_destroy(device->vk.disk_cache);
vk_free(&device->instance->vk.alloc, (void *)device->name);
vk_physical_device_finish(&device->vk);
@ -2223,6 +2210,12 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
u_rwlock_init(&device->dma_bo_lock);
pthread_mutex_init(&device->submit_mutex, NULL);
if (physical_device->has_set_iova) {
mtx_init(&device->vma_mutex, mtx_plain);
util_vma_heap_init(&device->vma, physical_device->va_start,
ROUND_DOWN_TO(physical_device->va_size, 4096));
}
if (TU_DEBUG(BOS))
device->bo_sizes = _mesa_hash_table_create(NULL, _mesa_hash_string, _mesa_key_string_equal);
@ -2464,6 +2457,8 @@ fail_global_bo_map:
fail_global_bo:
ir3_compiler_destroy(device->compiler);
util_sparse_array_finish(&device->bo_map);
if (physical_device->has_set_iova)
util_vma_heap_finish(&device->vma);
fail_queues:
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
@ -2540,6 +2535,9 @@ tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
tu_drm_device_finish(device);
if (device->physical_device->has_set_iova)
util_vma_heap_finish(&device->vma);
util_sparse_array_finish(&device->bo_map);
u_rwlock_destroy(&device->dma_bo_lock);

View file

@ -111,8 +111,6 @@ struct tu_physical_device
uint32_t submitqueue_priority_count;
struct tu_memory_heap heap;
mtx_t vma_mutex;
struct util_vma_heap vma;
struct vk_sync_type syncobj_type;
struct vk_sync_timeline_type timeline_type;
@ -287,6 +285,9 @@ struct tu_device
BITSET_DECLARE(custom_border_color, TU_BORDER_COLOR_COUNT);
mtx_t mutex;
mtx_t vma_mutex;
struct util_vma_heap vma;
/* bo list for submits: */
struct drm_msm_gem_submit_bo *bo_list;
/* map bo handles to bo list index: */

View file

@ -281,13 +281,13 @@ tu_allocate_userspace_iova(struct tu_device *dev,
enum tu_bo_alloc_flags flags,
uint64_t *iova)
{
mtx_lock(&dev->physical_device->vma_mutex);
mtx_lock(&dev->vma_mutex);
*iova = 0;
if (flags & TU_BO_ALLOC_REPLAYABLE) {
if (client_iova) {
if (util_vma_heap_alloc_addr(&dev->physical_device->vma, client_iova,
if (util_vma_heap_alloc_addr(&dev->vma, client_iova,
size)) {
*iova = client_iova;
} else {
@ -298,16 +298,16 @@ tu_allocate_userspace_iova(struct tu_device *dev,
* for them not to clash. The easiest way to do this is to allocate
* them from the other end of the address space.
*/
dev->physical_device->vma.alloc_high = true;
dev->vma.alloc_high = true;
*iova =
util_vma_heap_alloc(&dev->physical_device->vma, size, 0x1000);
util_vma_heap_alloc(&dev->vma, size, 0x1000);
}
} else {
dev->physical_device->vma.alloc_high = false;
*iova = util_vma_heap_alloc(&dev->physical_device->vma, size, 0x1000);
dev->vma.alloc_high = false;
*iova = util_vma_heap_alloc(&dev->vma, size, 0x1000);
}
mtx_unlock(&dev->physical_device->vma_mutex);
mtx_unlock(&dev->vma_mutex);
if (!*iova)
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
@ -619,9 +619,9 @@ msm_bo_finish(struct tu_device *dev, struct tu_bo *bo)
mtx_unlock(&dev->bo_mutex);
if (dev->physical_device->has_set_iova) {
mtx_lock(&dev->physical_device->vma_mutex);
util_vma_heap_free(&dev->physical_device->vma, bo->iova, bo->size);
mtx_unlock(&dev->physical_device->vma_mutex);
mtx_lock(&dev->vma_mutex);
util_vma_heap_free(&dev->vma, bo->iova, bo->size);
mtx_unlock(&dev->vma_mutex);
}
/* Our BO structs are stored in a sparse array in the physical device,