From abf3bcd6ea62bab8ae31d1c883525ce166b1abbf Mon Sep 17 00:00:00 2001 From: Friedrich Vock Date: Wed, 7 Dec 2022 16:11:38 +0100 Subject: [PATCH] radv: Add RMV resource tracking Part-of: --- src/amd/vulkan/radv_cmd_buffer.c | 8 +- src/amd/vulkan/radv_descriptor_set.c | 6 +- src/amd/vulkan/radv_device.c | 161 ++++++++++++++++++++------- src/amd/vulkan/radv_image.c | 9 +- src/amd/vulkan/radv_pipeline.c | 5 +- src/amd/vulkan/radv_pipeline_rt.c | 10 +- src/amd/vulkan/radv_query.c | 7 +- src/amd/vulkan/radv_shader.c | 7 +- 8 files changed, 165 insertions(+), 48 deletions(-) diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 752990c3c3f..c79e633a62c 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -394,13 +394,16 @@ radv_destroy_cmd_buffer(struct vk_command_buffer *vk_cmd_buffer) list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, &cmd_buffer->upload.list, list) { + radv_rmv_log_command_buffer_bo_destroy(cmd_buffer->device, up->upload_bo); cmd_buffer->device->ws->buffer_destroy(cmd_buffer->device->ws, up->upload_bo); list_del(&up->list); free(up); } - if (cmd_buffer->upload.upload_bo) + if (cmd_buffer->upload.upload_bo) { + radv_rmv_log_command_buffer_bo_destroy(cmd_buffer->device, cmd_buffer->upload.upload_bo); cmd_buffer->device->ws->buffer_destroy(cmd_buffer->device->ws, cmd_buffer->upload.upload_bo); + } if (cmd_buffer->cs) cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); @@ -487,6 +490,7 @@ radv_reset_cmd_buffer(struct vk_command_buffer *vk_cmd_buffer, list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, &cmd_buffer->upload.list, list) { + radv_rmv_log_command_buffer_bo_destroy(cmd_buffer->device, up->upload_bo); cmd_buffer->device->ws->buffer_destroy(cmd_buffer->device->ws, up->upload_bo); list_del(&up->list); free(up); @@ -575,6 +579,8 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, uint64_t m vk_command_buffer_set_error(&cmd_buffer->vk, VK_ERROR_OUT_OF_DEVICE_MEMORY); return false; } + radv_rmv_log_command_buffer_bo_create(device, cmd_buffer->upload.upload_bo, 0, + cmd_buffer->upload.size, 0); return true; } diff --git a/src/amd/vulkan/radv_descriptor_set.c b/src/amd/vulkan/radv_descriptor_set.c index c667a80206a..2a0b344d4f3 100644 --- a/src/amd/vulkan/radv_descriptor_set.c +++ b/src/amd/vulkan/radv_descriptor_set.c @@ -772,11 +772,14 @@ radv_destroy_descriptor_pool(struct radv_device *device, const VkAllocationCallb } } - if (pool->bo) + if (pool->bo) { + radv_rmv_log_bo_destroy(device, pool->bo); device->ws->buffer_destroy(device->ws, pool->bo); + } if (pool->host_bo) vk_free2(&device->vk.alloc, pAllocator, pool->host_bo); + radv_rmv_log_resource_destroy(device, (uint64_t)radv_descriptor_pool_to_handle(pool)); vk_object_base_finish(&pool->base); vk_free2(&device->vk.alloc, pAllocator, pool); } @@ -924,6 +927,7 @@ radv_create_descriptor_pool(struct radv_device *device, pool->max_entry_count = pCreateInfo->maxSets; *pDescriptorPool = radv_descriptor_pool_to_handle(pool); + radv_rmv_log_descriptor_pool_create(device, pCreateInfo, *pDescriptorPool, is_internal); return VK_SUCCESS; } diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 22c13a5b1ce..f0f9e9cb049 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -3116,44 +3116,60 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue, int idx, } static void -radv_queue_state_finish(struct radv_queue_state *queue, struct radeon_winsys *ws) +radv_queue_state_finish(struct radv_queue_state *queue, struct radv_device *device) { if (queue->initial_full_flush_preamble_cs) - ws->cs_destroy(queue->initial_full_flush_preamble_cs); + device->ws->cs_destroy(queue->initial_full_flush_preamble_cs); if (queue->initial_preamble_cs) - ws->cs_destroy(queue->initial_preamble_cs); + device->ws->cs_destroy(queue->initial_preamble_cs); if (queue->continue_preamble_cs) - ws->cs_destroy(queue->continue_preamble_cs); + device->ws->cs_destroy(queue->continue_preamble_cs); if (queue->gang_wait_preamble_cs) - ws->cs_destroy(queue->gang_wait_preamble_cs); + device->ws->cs_destroy(queue->gang_wait_preamble_cs); if (queue->gang_wait_postamble_cs) - ws->cs_destroy(queue->gang_wait_postamble_cs); + device->ws->cs_destroy(queue->gang_wait_postamble_cs); if (queue->descriptor_bo) - ws->buffer_destroy(ws, queue->descriptor_bo); - if (queue->scratch_bo) - ws->buffer_destroy(ws, queue->scratch_bo); - if (queue->esgs_ring_bo) - ws->buffer_destroy(ws, queue->esgs_ring_bo); - if (queue->gsvs_ring_bo) - ws->buffer_destroy(ws, queue->gsvs_ring_bo); - if (queue->tess_rings_bo) - ws->buffer_destroy(ws, queue->tess_rings_bo); - if (queue->task_rings_bo) - ws->buffer_destroy(ws, queue->task_rings_bo); - if (queue->mesh_scratch_ring_bo) - ws->buffer_destroy(ws, queue->mesh_scratch_ring_bo); - if (queue->attr_ring_bo) - ws->buffer_destroy(ws, queue->attr_ring_bo); + device->ws->buffer_destroy(device->ws, queue->descriptor_bo); + if (queue->scratch_bo) { + device->ws->buffer_destroy(device->ws, queue->scratch_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo); + } + if (queue->esgs_ring_bo) { + device->ws->buffer_destroy(device->ws, queue->esgs_ring_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo); + } + if (queue->gsvs_ring_bo) { + device->ws->buffer_destroy(device->ws, queue->gsvs_ring_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo); + } + if (queue->tess_rings_bo) { + device->ws->buffer_destroy(device->ws, queue->tess_rings_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->tess_rings_bo); + } + if (queue->task_rings_bo) { + device->ws->buffer_destroy(device->ws, queue->task_rings_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->task_rings_bo); + } + if (queue->mesh_scratch_ring_bo) { + device->ws->buffer_destroy(device->ws, queue->mesh_scratch_ring_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->mesh_scratch_ring_bo); + } + if (queue->attr_ring_bo) { + device->ws->buffer_destroy(device->ws, queue->attr_ring_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->attr_ring_bo); + } if (queue->gds_bo) { - ws->buffer_make_resident(ws, queue->gds_bo, false); - ws->buffer_destroy(ws, queue->gds_bo); + device->ws->buffer_make_resident(device->ws, queue->gds_bo, false); + device->ws->buffer_destroy(device->ws, queue->gds_bo); } if (queue->gds_oa_bo) { - ws->buffer_make_resident(ws, queue->gds_oa_bo, false); - ws->buffer_destroy(ws, queue->gds_oa_bo); + device->ws->buffer_make_resident(device->ws, queue->gds_oa_bo, false); + device->ws->buffer_destroy(device->ws, queue->gds_oa_bo); + } + if (queue->compute_scratch_bo) { + device->ws->buffer_destroy(device->ws, queue->compute_scratch_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo); } - if (queue->compute_scratch_bo) - ws->buffer_destroy(ws, queue->compute_scratch_bo); } static void @@ -3164,14 +3180,14 @@ radv_queue_finish(struct radv_queue *queue) queue->ace_internal_state->task_rings_bo = NULL; /* Clean up the internal ACE queue state. */ - radv_queue_state_finish(queue->ace_internal_state, queue->device->ws); + radv_queue_state_finish(queue->ace_internal_state, queue->device); free(queue->ace_internal_state); } if (queue->gang_sem_bo) queue->device->ws->buffer_destroy(queue->device->ws, queue->gang_sem_bo); - radv_queue_state_finish(&queue->state, queue->device->ws); + radv_queue_state_finish(&queue->state, queue->device); vk_queue_finish(&queue->vk); } @@ -3202,6 +3218,8 @@ radv_device_init_border_color(struct radv_device *device) if (result != VK_SUCCESS) return vk_error(device, result); + radv_rmv_log_border_color_palette_create(device, device->border_color_data.bo); + result = device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, true); if (result != VK_SUCCESS) return vk_error(device, result); @@ -3218,6 +3236,7 @@ static void radv_device_finish_border_color(struct radv_device *device) { if (device->border_color_data.bo) { + radv_rmv_log_border_color_palette_destroy(device, device->border_color_data.bo); device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, false); device->ws->buffer_destroy(device->ws, device->border_color_data.bo); @@ -4798,6 +4817,7 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi RADV_BO_PRIORITY_SCRATCH, 0, &scratch_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, scratch_bo, 0, 0, scratch_size); } const uint32_t compute_scratch_size = @@ -4809,6 +4829,7 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi RADV_BO_PRIORITY_SCRATCH, 0, &compute_scratch_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, compute_scratch_bo, 0, 0, compute_scratch_size); } if (needs->esgs_ring_size > queue->ring_info.esgs_ring_size) { @@ -4816,6 +4837,7 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi RADV_BO_PRIORITY_SCRATCH, 0, &esgs_ring_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, esgs_ring_bo, 0, 0, needs->esgs_ring_size); } if (needs->gsvs_ring_size > queue->ring_info.gsvs_ring_size) { @@ -4823,14 +4845,17 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi RADV_BO_PRIORITY_SCRATCH, 0, &gsvs_ring_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, gsvs_ring_bo, 0, 0, needs->gsvs_ring_size); } if (!queue->ring_info.tess_rings && needs->tess_rings) { - result = ws->buffer_create( - ws, device->physical_device->hs.tess_offchip_ring_offset + device->physical_device->hs.tess_offchip_ring_size, 256, - RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &tess_rings_bo); + uint64_t tess_rings_size = device->physical_device->hs.tess_offchip_ring_offset + + device->physical_device->hs.tess_offchip_ring_size; + result = ws->buffer_create(ws, tess_rings_size, 256, RADEON_DOMAIN_VRAM, ring_bo_flags, + RADV_BO_PRIORITY_SCRATCH, 0, &tess_rings_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, tess_rings_bo, 0, 0, tess_rings_size); } if (!queue->ring_info.task_rings && needs->task_rings) { @@ -4847,6 +4872,8 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi 0, &task_rings_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, task_rings_bo, 0, 0, + device->physical_device->task_info.bo_size_bytes); result = radv_initialise_task_control_buffer(device, task_rings_bo); if (result != VK_SUCCESS) @@ -4861,6 +4888,9 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create( + device, mesh_scratch_ring_bo, 0, 0, + RADV_MESH_SCRATCH_NUM_ENTRIES * RADV_MESH_SCRATCH_ENTRY_BYTES); } if (needs->attr_ring_size > queue->ring_info.attr_ring_size) { @@ -4870,6 +4900,7 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, 0, &attr_ring_bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_command_buffer_bo_create(device, attr_ring_bo, 0, 0, needs->attr_ring_size); } if (!queue->ring_info.gds && needs->gds) { @@ -5070,26 +5101,34 @@ radv_update_preamble_cs(struct radv_queue_state *queue, struct radv_device *devi queue->continue_preamble_cs = dest_cs[2]; if (scratch_bo != queue->scratch_bo) { - if (queue->scratch_bo) + if (queue->scratch_bo) { ws->buffer_destroy(ws, queue->scratch_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->scratch_bo); + } queue->scratch_bo = scratch_bo; } if (compute_scratch_bo != queue->compute_scratch_bo) { - if (queue->compute_scratch_bo) + if (queue->compute_scratch_bo) { ws->buffer_destroy(ws, queue->compute_scratch_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->compute_scratch_bo); + } queue->compute_scratch_bo = compute_scratch_bo; } if (esgs_ring_bo != queue->esgs_ring_bo) { - if (queue->esgs_ring_bo) + if (queue->esgs_ring_bo) { ws->buffer_destroy(ws, queue->esgs_ring_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->esgs_ring_bo); + } queue->esgs_ring_bo = esgs_ring_bo; } if (gsvs_ring_bo != queue->gsvs_ring_bo) { - if (queue->gsvs_ring_bo) + if (queue->gsvs_ring_bo) { ws->buffer_destroy(ws, queue->gsvs_ring_bo); + radv_rmv_log_command_buffer_bo_destroy(device, queue->gsvs_ring_bo); + } queue->gsvs_ring_bo = gsvs_ring_bo; } @@ -5252,6 +5291,11 @@ radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferM mem ? mem->bo : NULL, memoryOffset); if (result != VK_SUCCESS) return result; + + if (bind->pBinds[i].memory) + radv_rmv_log_sparse_add_residency(device, buffer->bo, memoryOffset); + else + radv_rmv_log_sparse_remove_residency(device, buffer->bo, memoryOffset); } mem = cur_mem; resourceOffset = bind->pBinds[i].resourceOffset; @@ -5262,6 +5306,11 @@ radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferM result = device->ws->buffer_virtual_bind(device->ws, buffer->bo, resourceOffset, size, mem ? mem->bo : NULL, memoryOffset); + + if (mem) + radv_rmv_log_sparse_add_residency(device, buffer->bo, memoryOffset); + else + radv_rmv_log_sparse_remove_residency(device, buffer->bo, memoryOffset); } return result; @@ -5285,6 +5334,13 @@ radv_sparse_image_opaque_bind_memory(struct radv_device *device, mem ? mem->bo : NULL, bind->pBinds[i].memoryOffset); if (result != VK_SUCCESS) return result; + + if (bind->pBinds[i].memory) + radv_rmv_log_sparse_add_residency(device, image->bindings[0].bo, + bind->pBinds[i].resourceOffset); + else + radv_rmv_log_sparse_remove_residency(device, image->bindings[0].bo, + bind->pBinds[i].resourceOffset); } return VK_SUCCESS; @@ -5348,6 +5404,12 @@ radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMem mem ? mem->bo : NULL, mem_offset); if (result != VK_SUCCESS) return result; + + if (bind->pBinds[i].memory) + radv_rmv_log_sparse_add_residency(device, image->bindings[0].bo, offset); + else + radv_rmv_log_sparse_remove_residency(device, image->bindings[0].bo, offset); + } else { uint32_t img_y_increment = pitch * bs * surface->prt_tile_depth; uint32_t mem_y_increment = aligned_extent_width * bs * surface->prt_tile_depth; @@ -5361,6 +5423,11 @@ radv_sparse_image_bind_memory(struct radv_device *device, const VkSparseImageMem mem ? mem->bo : NULL, mem_offset + mem_y_increment * y + mem_z_increment * z); if (result != VK_SUCCESS) return result; + + if (bind->pBinds[i].memory) + radv_rmv_log_sparse_add_residency(device, image->bindings[0].bo, offset); + else + radv_rmv_log_sparse_remove_residency(device, image->bindings[0].bo, offset); } } } @@ -5804,6 +5871,8 @@ radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission) struct radv_queue *queue = (struct radv_queue *)vqueue; VkResult result; + radv_rmv_log_submit(queue->device, radv_queue_ring(queue)); + result = radv_queue_submit_bind_sparse_memory(queue->device, submission); if (result != VK_SUCCESS) goto fail; @@ -5933,6 +6002,8 @@ radv_free_memory(struct radv_device *device, const VkAllocationCallbacks *pAlloc #endif if (mem->bo) { + radv_rmv_log_bo_destroy(device, mem->bo); + if (device->overallocation_disallowed) { mtx_lock(&device->overallocation_mutex); device->allocated_memory_size[mem->heap_index] -= mem->alloc_size; @@ -5945,6 +6016,7 @@ radv_free_memory(struct radv_device *device, const VkAllocationCallbacks *pAlloc mem->bo = NULL; } + radv_rmv_log_resource_destroy(device, (uint64_t)radv_device_memory_to_handle(mem)); radv_device_memory_finish(mem); vk_free2(&device->vk.alloc, pAllocator, mem); } @@ -5970,6 +6042,8 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc vk_find_struct_const(pAllocateInfo->pNext, IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID); const VkImportMemoryHostPointerInfoEXT *host_ptr_info = vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT); + const struct VkMemoryAllocateFlagsInfo *flags_info = + vk_find_struct_const(pAllocateInfo->pNext, MEMORY_ALLOCATE_FLAGS_INFO); const struct wsi_memory_allocate_info *wsi_info = vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA); @@ -6103,7 +6177,6 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc } } - const VkMemoryAllocateFlagsInfo *flags_info = vk_find_struct_const(pAllocateInfo->pNext, MEMORY_ALLOCATE_FLAGS_INFO); if (flags_info && flags_info->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) flags |= RADEON_FLAG_REPLAYABLE; @@ -6150,7 +6223,7 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc } *pMem = radv_device_memory_to_handle(mem); - + radv_rmv_log_heap_create(device, *pMem, is_internal, flags_info ? flags_info->flags : 0); return VK_SUCCESS; fail: @@ -6189,6 +6262,7 @@ radv_MapMemory(VkDevice _device, VkDeviceMemory _memory, VkDeviceSize offset, Vk *ppData = device->ws->buffer_map(mem->bo); if (*ppData) { + vk_rmv_log_cpu_map(&device->vk, mem->bo->va, false); *ppData = (uint8_t *)*ppData + offset; return VK_SUCCESS; } @@ -6202,6 +6276,7 @@ radv_UnmapMemory(VkDevice _device, VkDeviceMemory _memory) RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_device_memory, mem, _memory); + vk_rmv_log_cpu_map(&device->vk, mem->bo->va, true); if (mem->user_ptr == NULL) device->ws->buffer_unmap(mem->bo); } @@ -6395,6 +6470,7 @@ radv_BindBufferMemory2(VkDevice _device, uint32_t bindInfoCount, buffer->bo = mem->bo; buffer->offset = pBindInfos[i].memoryOffset; + radv_rmv_log_buffer_bind(device, pBindInfos[i].buffer); } return VK_SUCCESS; } @@ -6450,6 +6526,7 @@ radv_BindImageMemory2(VkDevice _device, uint32_t bindInfoCount, image->bindings[0].bo = mem->bo; image->bindings[0].offset = pBindInfos[i].memoryOffset; } + radv_rmv_log_image_bind(device, pBindInfos[i].image); } return VK_SUCCESS; } @@ -6507,7 +6584,7 @@ radv_create_event(struct radv_device *device, const VkEventCreateInfo *pCreateIn } *pEvent = radv_event_to_handle(event); - + radv_rmv_log_event_create(device, *pEvent, pCreateInfo->flags, is_internal); return VK_SUCCESS; } @@ -6596,6 +6673,7 @@ radv_destroy_buffer(struct radv_device *device, const VkAllocationCallbacks *pAl if ((buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && buffer->bo) device->ws->buffer_destroy(device->ws, buffer->bo); + radv_rmv_log_resource_destroy(device, (uint64_t)radv_buffer_to_handle(buffer)); radv_buffer_finish(buffer); vk_free2(&device->vk.alloc, pAllocator, buffer); } @@ -6635,10 +6713,13 @@ radv_create_buffer(struct radv_device *device, const VkBufferCreateInfo *pCreate radv_destroy_buffer(device, pAllocator, buffer); return vk_error(device, result); } + radv_rmv_log_bo_allocate(device, buffer->bo, buffer->vk.size, true); } *pBuffer = radv_buffer_to_handle(buffer); - + vk_rmv_log_buffer_create(&device->vk, false, *pBuffer); + if (buffer->bo) + radv_rmv_log_buffer_bind(device, *pBuffer); return VK_SUCCESS; } diff --git a/src/amd/vulkan/radv_image.c b/src/amd/vulkan/radv_image.c index 97085b53176..8ddd67d0e4f 100644 --- a/src/amd/vulkan/radv_image.c +++ b/src/amd/vulkan/radv_image.c @@ -1757,14 +1757,17 @@ static void radv_destroy_image(struct radv_device *device, const VkAllocationCallbacks *pAllocator, struct radv_image *image) { - if ((image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && image->bindings[0].bo) + if ((image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) && image->bindings[0].bo) { + radv_rmv_log_bo_destroy(device, image->bindings[0].bo); device->ws->buffer_destroy(device->ws, image->bindings[0].bo); + } if (image->owned_memory != VK_NULL_HANDLE) { RADV_FROM_HANDLE(radv_device_memory, mem, image->owned_memory); radv_free_memory(device, pAllocator, mem); } + radv_rmv_log_resource_destroy(device, (uint64_t)radv_image_to_handle(image)); vk_image_finish(&image->vk); vk_free2(&device->vk.alloc, pAllocator, image); } @@ -1937,6 +1940,7 @@ radv_image_create(VkDevice _device, const struct radv_image_create_info *create_ radv_destroy_image(device, alloc, image); return vk_error(device, result); } + radv_rmv_log_bo_allocate(device, image->bindings[0].bo, image->size, true); } if (device->instance->debug_flags & RADV_DEBUG_IMG) { @@ -1945,6 +1949,9 @@ radv_image_create(VkDevice _device, const struct radv_image_create_info *create_ *pImage = radv_image_to_handle(image); + radv_rmv_log_image_create(device, pCreateInfo, is_internal, *pImage); + if (image->bindings[0].bo) + radv_rmv_log_image_bind(device, *pImage); return VK_SUCCESS; } diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c index 9ee0ad854eb..7ad689484d1 100644 --- a/src/amd/vulkan/radv_pipeline.c +++ b/src/amd/vulkan/radv_pipeline.c @@ -201,6 +201,7 @@ radv_pipeline_destroy(struct radv_device *device, struct radv_pipeline *pipeline if (pipeline->cs.buf) free(pipeline->cs.buf); + radv_rmv_log_resource_destroy(device, (uint64_t)radv_pipeline_to_handle(pipeline)); vk_object_base_finish(&pipeline->base); vk_free2(&device->vk.alloc, allocator, pipeline); } @@ -5291,7 +5292,7 @@ radv_graphics_pipeline_create(VkDevice _device, VkPipelineCache _cache, } *pPipeline = radv_pipeline_to_handle(&pipeline->base); - + radv_rmv_log_graphics_pipeline_create(device, pCreateInfo->flags, &pipeline->base, is_internal); return VK_SUCCESS; } @@ -5591,7 +5592,7 @@ radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache, radv_compute_pipeline_init(pipeline, pipeline_layout); *pPipeline = radv_pipeline_to_handle(&pipeline->base); - + radv_rmv_log_compute_pipeline_create(device, pCreateInfo->flags, &pipeline->base, is_internal); return VK_SUCCESS; } diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c index d11b2995446..35c93236309 100644 --- a/src/amd/vulkan/radv_pipeline_rt.c +++ b/src/amd/vulkan/radv_pipeline_rt.c @@ -420,8 +420,16 @@ radv_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR defer for (; i < count; ++i) pPipelines[i] = VK_NULL_HANDLE; + if (result != VK_SUCCESS) + return result; + + RADV_FROM_HANDLE(radv_device, device, _device); + for (uint32_t j = 0; j < count; ++j) + radv_rmv_log_compute_pipeline_create(device, pCreateInfos[i].flags, + radv_pipeline_from_handle(pPipelines[j]), false); + /* Work around Portal RTX not handling VK_OPERATION_NOT_DEFERRED_KHR correctly. */ - if (result == VK_SUCCESS && deferredOperation != VK_NULL_HANDLE) + if (deferredOperation != VK_NULL_HANDLE) return VK_OPERATION_DEFERRED_KHR; return result; diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index 4ef2eabb664..cf599893f85 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -1060,8 +1060,12 @@ radv_destroy_query_pool(struct radv_device *device, const VkAllocationCallbacks if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) radv_pc_deinit_query_pool((struct radv_pc_query_pool *)pool); - if (pool->bo) + if (pool->bo) { + radv_rmv_log_bo_destroy(device, pool->bo); device->ws->buffer_destroy(device->ws, pool->bo); + } + + radv_rmv_log_resource_destroy(device, (uint64_t)radv_query_pool_to_handle(pool)); vk_object_base_finish(&pool->base); vk_free2(&device->vk.alloc, pAllocator, pool); } @@ -1162,6 +1166,7 @@ radv_create_query_pool(struct radv_device *device, const VkQueryPoolCreateInfo * } *pQueryPool = radv_query_pool_to_handle(pool); + radv_rmv_log_query_pool_create(device, *pQueryPool, is_internal); return VK_SUCCESS; } diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index e0fc13ba470..a392ae8b693 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -1612,6 +1612,7 @@ radv_alloc_shader_memory(struct radv_device *device, uint32_t size, void *ptr) RADV_BO_PRIORITY_SHADER, 0, &arena->bo); if (result != VK_SUCCESS) goto fail; + radv_rmv_log_bo_allocate(device, arena->bo, arena_size, true); list_inithead(&arena->entries); @@ -1649,8 +1650,10 @@ fail: mtx_unlock(&device->shader_arena_mutex); free(alloc); free(hole); - if (arena && arena->bo) + if (arena && arena->bo) { + radv_rmv_log_bo_destroy(device, arena->bo); device->ws->buffer_destroy(device->ws, arena->bo); + } free(arena); return NULL; } @@ -1702,6 +1705,7 @@ radv_free_shader_memory(struct radv_device *device, union radv_shader_arena_bloc struct radv_shader_arena *arena = hole->arena; free_block_obj(device, hole); + radv_rmv_log_bo_destroy(device, arena->bo); device->ws->buffer_destroy(device->ws, arena->bo); list_del(&arena->list); free(arena); @@ -1733,6 +1737,7 @@ radv_destroy_shader_arenas(struct radv_device *device) list_for_each_entry_safe(struct radv_shader_arena, arena, &device->shader_arenas, list) { + radv_rmv_log_bo_destroy(device, arena->bo); device->ws->buffer_destroy(device->ws, arena->bo); free(arena); }