diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index bec34ad6fde..b0331e9242d 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -2437,77 +2437,6 @@ radv_queue_finish(struct radv_queue *queue) vk_object_base_finish(&queue->base); } -static void -radv_bo_list_init(struct radv_bo_list *bo_list) -{ - u_rwlock_init(&bo_list->rwlock); - bo_list->list.count = bo_list->capacity = 0; - bo_list->list.bos = NULL; -} - -static void -radv_bo_list_finish(struct radv_bo_list *bo_list) -{ - free(bo_list->list.bos); - u_rwlock_destroy(&bo_list->rwlock); -} - -VkResult radv_bo_list_add(struct radv_device *device, - struct radeon_winsys_bo *bo) -{ - struct radv_bo_list *bo_list = &device->bo_list; - - if (bo->is_local) - return VK_SUCCESS; - - if (unlikely(!device->use_global_bo_list)) - return VK_SUCCESS; - - u_rwlock_wrlock(&bo_list->rwlock); - if (bo_list->list.count == bo_list->capacity) { - unsigned capacity = MAX2(4, bo_list->capacity * 2); - void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*)); - - if (!data) { - u_rwlock_wrunlock(&bo_list->rwlock); - return VK_ERROR_OUT_OF_HOST_MEMORY; - } - - bo_list->list.bos = (struct radeon_winsys_bo**)data; - bo_list->capacity = capacity; - } - - bo_list->list.bos[bo_list->list.count++] = bo; - bo->use_global_list = true; - u_rwlock_wrunlock(&bo_list->rwlock); - return VK_SUCCESS; -} - -void radv_bo_list_remove(struct radv_device *device, - struct radeon_winsys_bo *bo) -{ - struct radv_bo_list *bo_list = &device->bo_list; - - if (bo->is_local) - return; - - if (unlikely(!device->use_global_bo_list)) - return; - - u_rwlock_wrlock(&bo_list->rwlock); - /* Loop the list backwards so we find the most recently added - * memory first. */ - for(unsigned i = bo_list->list.count; i-- > 0;) { - if (bo_list->list.bos[i] == bo) { - bo_list->list.bos[i] = bo_list->list.bos[bo_list->list.count - 1]; - bo->use_global_list = false; - --bo_list->list.count; - break; - } - } - u_rwlock_wrunlock(&bo_list->rwlock); -} - static void radv_device_init_gs_info(struct radv_device *device) { @@ -2739,8 +2668,6 @@ VkResult radv_CreateDevice( device->overallocation_disallowed = overallocation_disallowed; mtx_init(&device->overallocation_mutex, mtx_plain); - radv_bo_list_init(&device->bo_list); - /* Create one context per queue priority. */ for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) { const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i]; @@ -2968,8 +2895,6 @@ fail_mem_cache: fail_meta: radv_device_finish_meta(device); fail: - radv_bo_list_finish(&device->bo_list); - radv_thread_trace_finish(device); free(device->thread_trace.trigger_file); @@ -3041,7 +2966,6 @@ void radv_DestroyDevice( radv_destroy_shader_slabs(device); u_cnd_monotonic_destroy(&device->timeline_cond); - radv_bo_list_finish(&device->bo_list); free(device->thread_trace.trigger_file); radv_thread_trace_finish(device); @@ -4707,7 +4631,7 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, result = queue->device->ws->cs_submit(ctx, queue->queue_idx, &queue->device->empty_cs[queue->queue_family_index], 1, NULL, NULL, - &sem_info, NULL, + &sem_info, false, base_fence); if (result != VK_SUCCESS) goto fail; @@ -4728,8 +4652,6 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, for (uint32_t j = 0; j < submission->cmd_buffer_count; j += advance) { struct radeon_cmdbuf *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs; - const struct radv_winsys_bo_list *bo_list = NULL; - advance = MIN2(max_cs_submission, submission->cmd_buffer_count - j); @@ -4739,19 +4661,10 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, sem_info.cs_emit_wait = j == 0; sem_info.cs_emit_signal = j + advance == submission->cmd_buffer_count; - if (unlikely(queue->device->use_global_bo_list)) { - u_rwlock_rdlock(&queue->device->bo_list.rwlock); - bo_list = &queue->device->bo_list.list; - } - result = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j, advance, initial_preamble, continue_preamble_cs, - &sem_info, bo_list, + &sem_info, can_patch, base_fence); - - if (unlikely(queue->device->use_global_bo_list)) - u_rwlock_rdunlock(&queue->device->bo_list.rwlock); - if (result != VK_SUCCESS) goto fail; @@ -4979,7 +4892,7 @@ radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs) return false; result = queue->device->ws->cs_submit(ctx, queue->queue_idx, &cs, 1, - NULL, NULL, &sem_info, NULL, + NULL, NULL, &sem_info, false, NULL); radv_free_sem_info(&sem_info); if (result != VK_SUCCESS) @@ -5232,7 +5145,8 @@ radv_free_memory(struct radv_device *device, mtx_unlock(&device->overallocation_mutex); } - radv_bo_list_remove(device, mem->bo); + if (device->use_global_bo_list) + device->ws->buffer_make_resident(device->ws, mem->bo, false); device->ws->buffer_destroy(device->ws, mem->bo); mem->bo = NULL; } @@ -5413,9 +5327,11 @@ static VkResult radv_alloc_memory(struct radv_device *device, } if (!wsi_info) { - result = radv_bo_list_add(device, mem->bo); - if (result != VK_SUCCESS) - goto fail; + if (device->use_global_bo_list) { + result = device->ws->buffer_make_resident(device->ws, mem->bo, true); + if (result != VK_SUCCESS) + goto fail; + } } *pMem = radv_device_memory_to_handle(mem); diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index 34df1c8c521..539e99b6f2b 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -713,17 +713,6 @@ struct radv_queue { bool cond_created; }; -struct radv_bo_list { - struct radv_winsys_bo_list list; - unsigned capacity; - struct u_rwlock rwlock; -}; - -VkResult radv_bo_list_add(struct radv_device *device, - struct radeon_winsys_bo *bo); -void radv_bo_list_remove(struct radv_device *device, - struct radeon_winsys_bo *bo); - #define RADV_BORDER_COLOR_COUNT 4096 #define RADV_BORDER_COLOR_BUFFER_SIZE (sizeof(VkClearColorValue) * RADV_BORDER_COLOR_COUNT) @@ -807,8 +796,6 @@ struct radv_device { /* Whether the driver uses a global BO list. */ bool use_global_bo_list; - struct radv_bo_list bo_list; - /* Whether anisotropy is forced with RADV_TEX_ANISO (-1 is disabled). */ int force_aniso; diff --git a/src/amd/vulkan/radv_radeon_winsys.h b/src/amd/vulkan/radv_radeon_winsys.h index c7b67253a45..cbab78ad249 100644 --- a/src/amd/vulkan/radv_radeon_winsys.h +++ b/src/amd/vulkan/radv_radeon_winsys.h @@ -292,7 +292,6 @@ struct radeon_winsys { struct radeon_cmdbuf *initial_preamble_cs, struct radeon_cmdbuf *continue_preamble_cs, struct radv_winsys_sem_info *sem_info, - const struct radv_winsys_bo_list *bo_list, /* optional */ bool can_patch, struct radeon_winsys_fence *fence); diff --git a/src/amd/vulkan/radv_wsi.c b/src/amd/vulkan/radv_wsi.c index 13c052de572..19bd6c1721f 100644 --- a/src/amd/vulkan/radv_wsi.c +++ b/src/amd/vulkan/radv_wsi.c @@ -44,10 +44,7 @@ radv_wsi_set_memory_ownership(VkDevice _device, RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_device_memory, mem, _mem); - if (ownership) - radv_bo_list_add(device, mem->bo); - else - radv_bo_list_remove(device, mem->bo); + device->ws->buffer_make_resident(device->ws, mem->bo, ownership); } VkResult diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c index 8fcecc1639c..1ff04b152a3 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c @@ -693,7 +693,6 @@ radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws, struct radv_amdgpu_winsys_bo **extra_bo_array, unsigned num_extra_bo, struct radeon_cmdbuf *extra_cs, - const struct radv_winsys_bo_list *radv_bo_list, unsigned *rnum_handles, struct drm_amdgpu_bo_list_entry **rhandles) { @@ -711,7 +710,7 @@ radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws, handles[i].bo_priority = ws->global_bo_list.bos[i]->priority; num_handles++; } - } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list && + } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers && !ws->global_bo_list.count) { struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0]; @@ -739,10 +738,6 @@ radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws, total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers; } - if (radv_bo_list) { - total_buffer_count += radv_bo_list->count; - } - total_buffer_count += ws->global_bo_list.count; if (total_buffer_count == 0) @@ -807,25 +802,6 @@ radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws, } } - if (radv_bo_list) { - unsigned unique_bo_so_far = num_handles; - for (unsigned i = 0; i < radv_bo_list->count; ++i) { - struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]); - bool found = false; - for (unsigned j = 0; j < unique_bo_so_far; ++j) { - if (bo->bo_handle == handles[j].bo_handle) { - found = true; - break; - } - } - if (!found) { - handles[num_handles].bo_handle = bo->bo_handle; - handles[num_handles].bo_priority = bo->priority; - ++num_handles; - } - } - } - unsigned unique_bo_so_far = num_handles; for (unsigned i = 0; i < ws->global_bo_list.count; ++i) { struct radv_amdgpu_winsys_bo *bo = ws->global_bo_list.bos[i]; @@ -862,7 +838,6 @@ static VkResult radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx, int queue_idx, struct radv_winsys_sem_info *sem_info, - const struct radv_winsys_bo_list *radv_bo_list, struct radeon_cmdbuf **cs_array, unsigned cs_count, struct radeon_cmdbuf *initial_preamble_cs, @@ -905,7 +880,7 @@ radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx, /* Get the BO list. */ result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0, - initial_preamble_cs, radv_bo_list, + initial_preamble_cs, &num_handles, &handles); if (result != VK_SUCCESS) goto fail; @@ -949,7 +924,6 @@ static VkResult radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx, int queue_idx, struct radv_winsys_sem_info *sem_info, - const struct radv_winsys_bo_list *radv_bo_list, struct radeon_cmdbuf **cs_array, unsigned cs_count, struct radeon_cmdbuf *initial_preamble_cs, @@ -977,7 +951,7 @@ radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx, /* Get the BO list. */ result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0, - initial_preamble_cs, radv_bo_list, + initial_preamble_cs, &num_handles, &handles); if (result != VK_SUCCESS) { goto fail; @@ -1036,7 +1010,6 @@ static VkResult radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, int queue_idx, struct radv_winsys_sem_info *sem_info, - const struct radv_winsys_bo_list *radv_bo_list, struct radeon_cmdbuf **cs_array, unsigned cs_count, struct radeon_cmdbuf *initial_preamble_cs, @@ -1191,7 +1164,6 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt, (struct radv_amdgpu_winsys_bo **)bos, number_of_ibs, preamble_cs, - radv_bo_list, &num_handles, &handles); if (result != VK_SUCCESS) { free(ibs); @@ -1241,7 +1213,6 @@ static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx, struct radeon_cmdbuf *initial_preamble_cs, struct radeon_cmdbuf *continue_preamble_cs, struct radv_winsys_sem_info *sem_info, - const struct radv_winsys_bo_list *bo_list, bool can_patch, struct radeon_winsys_fence *_fence) { @@ -1251,13 +1222,13 @@ static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx, assert(sem_info); if (!cs->ws->use_ib_bos) { - result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array, + result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array, cs_count, initial_preamble_cs, continue_preamble_cs, _fence); } else if (can_patch) { - result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array, + result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array, cs_count, initial_preamble_cs, _fence); } else { - result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array, + result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array, cs_count, initial_preamble_cs, _fence); }