mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-26 04:10:09 +01:00
radv: use radv_buffer_get_va() more
Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38031>
This commit is contained in:
parent
89fbcc8c34
commit
f9af7f7b2a
5 changed files with 21 additions and 21 deletions
|
|
@ -36,7 +36,7 @@ radv_create_shadow_regs_preamble(struct radv_device *device, struct radv_queue_s
|
|||
goto fail;
|
||||
|
||||
/* fill the cs for shadow regs preamble ib that starts the register shadowing */
|
||||
pm4 = ac_create_shadowing_ib_preamble(gpu_info, queue_state->shadowed_regs->va, device->pbb_allowed);
|
||||
pm4 = ac_create_shadowing_ib_preamble(gpu_info, radv_buffer_get_va(queue_state->shadowed_regs), device->pbb_allowed);
|
||||
if (!pm4)
|
||||
goto fail_create;
|
||||
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ radv_free_memory(struct radv_device *device, const VkAllocationCallbacks *pAlloc
|
|||
#endif
|
||||
|
||||
if (mem->bo) {
|
||||
radv_va_validation_update_page(device, mem->bo->va, mem->alloc_size, false);
|
||||
radv_va_validation_update_page(device, radv_buffer_get_va(mem->bo), mem->alloc_size, false);
|
||||
|
||||
if (device->overallocation_disallowed) {
|
||||
mtx_lock(&device->overallocation_mutex);
|
||||
|
|
@ -302,7 +302,7 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc
|
|||
mem->heap_index = heap_index;
|
||||
mem->alloc_size = alloc_size;
|
||||
|
||||
radv_va_validation_update_page(device, mem->bo->va, alloc_size, true);
|
||||
radv_va_validation_update_page(device, radv_buffer_get_va(mem->bo), alloc_size, true);
|
||||
}
|
||||
|
||||
if (!wsi_info) {
|
||||
|
|
@ -370,7 +370,7 @@ radv_MapMemory2(VkDevice _device, const VkMemoryMapInfo *pMemoryMapInfo, void **
|
|||
*ppData = device->ws->buffer_map(device->ws, mem->bo, use_fixed_address, fixed_address);
|
||||
|
||||
if (*ppData) {
|
||||
vk_rmv_log_cpu_map(&device->vk, mem->bo->va, false);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(mem->bo), false);
|
||||
*ppData = (uint8_t *)*ppData + pMemoryMapInfo->offset;
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
|
@ -384,7 +384,7 @@ radv_UnmapMemory2(VkDevice _device, const VkMemoryUnmapInfo *pMemoryUnmapInfo)
|
|||
VK_FROM_HANDLE(radv_device, device, _device);
|
||||
VK_FROM_HANDLE(radv_device_memory, mem, pMemoryUnmapInfo->memory);
|
||||
|
||||
vk_rmv_log_cpu_map(&device->vk, mem->bo->va, true);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(mem->bo), true);
|
||||
if (mem->user_ptr == NULL)
|
||||
device->ws->buffer_unmap(device->ws, mem->bo, (pMemoryUnmapInfo->flags & VK_MEMORY_UNMAP_RESERVE_BIT_EXT));
|
||||
|
||||
|
|
|
|||
|
|
@ -1311,7 +1311,7 @@ radv_GetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline
|
|||
if (shader) {
|
||||
data[i].recursive_shader_alloc.offset = shader->alloc->offset;
|
||||
data[i].recursive_shader_alloc.size = shader->alloc->size;
|
||||
data[i].recursive_shader_alloc.arena_va = shader->alloc->arena->bo->va;
|
||||
data[i].recursive_shader_alloc.arena_va = radv_buffer_get_va(shader->alloc->arena->bo);
|
||||
data[i].recursive_shader_alloc.arena_size = shader->alloc->arena->size;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -515,7 +515,7 @@ radv_rmv_log_bo_allocate(struct radv_device *device, struct radeon_winsys_bo *bo
|
|||
return;
|
||||
|
||||
struct vk_rmv_virtual_allocate_token token = {0};
|
||||
token.address = bo->va;
|
||||
token.address = radv_buffer_get_va(bo);
|
||||
/* If all VRAM is visible, no bo will be in invisible memory. */
|
||||
token.is_in_invisible_vram = bo->vram_no_cpu_access && !pdev->info.all_vram_visible;
|
||||
token.preferred_domains = (enum vk_rmv_kernel_memory_domain)bo->initial_domain;
|
||||
|
|
@ -539,7 +539,7 @@ radv_rmv_log_bo_destroy(struct radv_device *device, struct radeon_winsys_bo *bo)
|
|||
return;
|
||||
|
||||
struct vk_rmv_virtual_free_token token = {0};
|
||||
token.address = bo->va;
|
||||
token.address = radv_buffer_get_va(bo);
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_VIRTUAL_FREE, &token);
|
||||
|
|
@ -663,14 +663,14 @@ radv_rmv_log_command_buffer_bo_create(struct radv_device *device, struct radeon_
|
|||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
log_resource_bind_locked(device, upload_resource_identifier, bo->initial_domain, radv_buffer_get_va(bo), bo->size);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_log_cpu_map(&device->vk, bo->va, false);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(bo), false);
|
||||
}
|
||||
|
||||
void
|
||||
radv_rmv_log_command_buffer_bo_destroy(struct radv_device *device, struct radeon_winsys_bo *bo)
|
||||
{
|
||||
radv_rmv_log_resource_destroy(device, (uint64_t)(uintptr_t)bo);
|
||||
vk_rmv_log_cpu_map(&device->vk, bo->va, true);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(bo), true);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -692,7 +692,7 @@ radv_rmv_log_border_color_palette_create(struct radv_device *device, struct rade
|
|||
create_token.border_color_palette.num_entries = 255; /* = RADV_BORDER_COLOR_COUNT; */
|
||||
|
||||
struct vk_rmv_resource_bind_token bind_token;
|
||||
bind_token.address = bo->va;
|
||||
bind_token.address = radv_buffer_get_va(bo);
|
||||
bind_token.is_system_memory = false;
|
||||
bind_token.resource_id = resource_id;
|
||||
bind_token.size = RADV_BORDER_COLOR_BUFFER_SIZE;
|
||||
|
|
@ -700,14 +700,14 @@ radv_rmv_log_border_color_palette_create(struct radv_device *device, struct rade
|
|||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &bind_token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_log_cpu_map(&device->vk, bo->va, false);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(bo), false);
|
||||
}
|
||||
|
||||
void
|
||||
radv_rmv_log_border_color_palette_destroy(struct radv_device *device, struct radeon_winsys_bo *bo)
|
||||
{
|
||||
radv_rmv_log_resource_destroy(device, (uint64_t)(uintptr_t)bo);
|
||||
vk_rmv_log_cpu_map(&device->vk, bo->va, true);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(bo), true);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -717,7 +717,7 @@ radv_rmv_log_sparse_add_residency(struct radv_device *device, struct radeon_wins
|
|||
return;
|
||||
|
||||
struct vk_rmv_resource_reference_token token = {0};
|
||||
token.virtual_address = src_bo->va + offset;
|
||||
token.virtual_address = radv_buffer_get_va(src_bo) + offset;
|
||||
token.residency_removed = false;
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
|
@ -733,7 +733,7 @@ radv_rmv_log_sparse_remove_residency(struct radv_device *device, struct radeon_w
|
|||
return;
|
||||
|
||||
struct vk_rmv_resource_reference_token token = {0};
|
||||
token.virtual_address = src_bo->va + offset;
|
||||
token.virtual_address = radv_buffer_get_va(src_bo) + offset;
|
||||
token.residency_removed = true;
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
|
@ -752,7 +752,7 @@ radv_rmv_log_descriptor_pool_create(struct radv_device *device, const VkDescript
|
|||
VK_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
|
||||
|
||||
if (pool->bo)
|
||||
vk_rmv_log_cpu_map(&device->vk, pool->bo->va, false);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(pool->bo), false);
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token create_token = {0};
|
||||
|
|
@ -775,7 +775,7 @@ radv_rmv_log_descriptor_pool_create(struct radv_device *device, const VkDescript
|
|||
if (pool->bo) {
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_bind_token bind_token;
|
||||
bind_token.address = pool->bo->va;
|
||||
bind_token.address = radv_buffer_get_va(pool->bo);
|
||||
bind_token.is_system_memory = false;
|
||||
bind_token.resource_id = vk_rmv_get_resource_id_locked(&device->vk, (uint64_t)_pool);
|
||||
bind_token.size = pool->size;
|
||||
|
|
@ -910,7 +910,7 @@ radv_rmv_log_event_create(struct radv_device *device, VkEvent _event, VkEventCre
|
|||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
if (event->map)
|
||||
vk_rmv_log_cpu_map(&device->vk, event->bo->va, false);
|
||||
vk_rmv_log_cpu_map(&device->vk, radv_buffer_get_va(event->bo), false);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
|
|
@ -675,7 +675,7 @@ radv_sdma_copy_buffer_image_unaligned(const struct radv_device *device, struct r
|
|||
const struct radv_sdma_chunked_copy_info info = radv_sdma_get_chunked_copy_info(device, img_in, base_extent);
|
||||
struct radv_sdma_surf img = *img_in;
|
||||
struct radv_sdma_surf tmp = {
|
||||
.va = temp_bo->va,
|
||||
.va = radv_buffer_get_va(temp_bo),
|
||||
.bpp = img.bpp,
|
||||
.blk_w = img.blk_w,
|
||||
.blk_h = img.blk_h,
|
||||
|
|
@ -824,7 +824,7 @@ radv_sdma_copy_image_t2t_scanline(const struct radv_device *device, struct radv_
|
|||
const struct radv_sdma_chunked_copy_info info = radv_sdma_get_chunked_copy_info(device, src, extent);
|
||||
struct radv_sdma_surf t2l_src = *src;
|
||||
struct radv_sdma_surf t2l_dst = {
|
||||
.va = temp_bo->va,
|
||||
.va = radv_buffer_get_va(temp_bo),
|
||||
.bpp = src->bpp,
|
||||
.blk_w = src->blk_w,
|
||||
.blk_h = src->blk_h,
|
||||
|
|
@ -832,7 +832,7 @@ radv_sdma_copy_image_t2t_scanline(const struct radv_device *device, struct radv_
|
|||
};
|
||||
struct radv_sdma_surf l2t_dst = *dst;
|
||||
struct radv_sdma_surf l2t_src = {
|
||||
.va = temp_bo->va,
|
||||
.va = radv_buffer_get_va(temp_bo),
|
||||
.bpp = dst->bpp,
|
||||
.blk_w = dst->blk_w,
|
||||
.blk_h = dst->blk_h,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue