tu: Switch to device address from vk_buffer
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

Signed-off-by: Valentine Burley <valentine.burley@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33965>
This commit is contained in:
Valentine Burley 2025-03-10 08:01:53 +01:00 committed by Marge Bot
parent 01f92acf10
commit 68bbc87063
9 changed files with 35 additions and 43 deletions

View file

@ -713,10 +713,11 @@ dump_as(struct vk_acceleration_structure *as)
struct tu_accel_struct_header *hdr =
(struct tu_accel_struct_header *)((char *)buf->bo->map + as->offset);
fprintf(stderr, "dumping AS at %" PRIx64 "\n", buf->iova + as->offset);
fprintf(stderr, "dumping AS at %" PRIx64 "\n",
vk_buffer_address(&buf->vk, as->offset));
u_hexdump(stderr, (uint8_t *)hdr, sizeof(*hdr), false);
char *base = ((char *)buf->bo->map + (hdr->bvh_ptr - buf->iova));
char *base = ((char *)buf->bo->map + (hdr->bvh_ptr - buf->vk.device_address));
struct tu_node *node = (struct tu_node *)base;
fprintf(stderr, "dumping nodes at %" PRIx64 "\n", hdr->bvh_ptr);
@ -746,7 +747,7 @@ as_finished(struct tu_device *dev, struct vk_acceleration_structure *as)
struct tu_accel_struct_header *hdr =
(struct tu_accel_struct_header *)((char *)buf->bo->map + as->offset);
return hdr->self_ptr == buf->iova + as->offset;
return hdr->self_ptr == vk_buffer_address(&buf->vk, as->offset);
}
VKAPI_ATTR void VKAPI_CALL

View file

@ -57,9 +57,9 @@ tu_DestroyBuffer(VkDevice _device,
tu_perfetto_log_destroy_buffer(device, buffer);
#endif
if (buffer->iova)
if (buffer->vk.device_address)
vk_address_binding_report(&instance->vk, &buffer->vk.base,
buffer->iova, buffer->bo_size,
buffer->vk.device_address, buffer->bo_size,
VK_DEVICE_ADDRESS_BINDING_TYPE_UNBIND_EXT);
@ -148,7 +148,7 @@ tu_BindBufferMemory2(VkDevice device,
if (mem) {
buffer->bo = mem->bo;
buffer->iova = mem->bo->iova + pBindInfos[i].memoryOffset;
buffer->vk.device_address = mem->bo->iova + pBindInfos[i].memoryOffset;
if (buffer->vk.usage &
(VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT |
VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT))
@ -170,15 +170,6 @@ tu_BindBufferMemory2(VkDevice device,
return VK_SUCCESS;
}
VkDeviceAddress
tu_GetBufferDeviceAddress(VkDevice _device,
const VkBufferDeviceAddressInfo* pInfo)
{
VK_FROM_HANDLE(tu_buffer, buffer, pInfo->buffer);
return buffer->iova;
}
uint64_t tu_GetBufferOpaqueCaptureAddress(
VkDevice _device,
const VkBufferDeviceAddressInfo* pInfo)

View file

@ -19,7 +19,6 @@ struct tu_buffer
struct vk_buffer vk;
struct tu_bo *bo;
uint64_t iova;
uint64_t bo_size;
};

View file

@ -30,7 +30,7 @@ tu_CreateBufferView(VkDevice _device,
fdl6_buffer_view_init(
view->descriptor, vk_format_to_pipe_format(view->vk.format),
swiz, buffer->iova + view->vk.offset, view->vk.range);
swiz, vk_buffer_address(&buffer->vk, view->vk.offset), view->vk.range);
*pView = tu_buffer_view_to_handle(view);

View file

@ -2461,7 +2461,7 @@ tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd,
for (uint32_t i = 0; i < layers; i++) {
ops->dst(cs, &dst, i, src_format);
uint64_t src_va = src_buffer->iova + info->bufferOffset + layer_size * i;
uint64_t src_va = vk_buffer_address(&src_buffer->vk, info->bufferOffset) + layer_size * i;
bool unaligned = (src_va & 63) || (pitch & 63);
if (!has_unaligned && unaligned) {
for (uint32_t y = 0; y < extent.height; y++) {
@ -2647,7 +2647,7 @@ tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd,
uint32_t layer_size = pitch * dst_height;
handle_buffer_unaligned_store<CHIP>(cmd,
dst_buffer->iova + info->bufferOffset,
vk_buffer_address(&dst_buffer->vk, info->bufferOffset),
layer_size * layers, unaligned_store);
ops->setup(cmd, cs, src_format, dst_format, VK_IMAGE_ASPECT_COLOR_BIT, blit_param, false, false,
@ -2660,7 +2660,7 @@ tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd,
for (uint32_t i = 0; i < layers; i++) {
ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST, dst_format);
uint64_t dst_va = dst_buffer->iova + info->bufferOffset + layer_size * i;
uint64_t dst_va = vk_buffer_address(&dst_buffer->vk, info->bufferOffset) + layer_size * i;
if ((dst_va & 63) || (pitch & 63)) {
for (uint32_t y = 0; y < extent.height; y++) {
uint32_t x = (dst_va & 63) / util_format_get_blocksize(dst_format);
@ -3287,9 +3287,9 @@ tu_CmdCopyBuffer2(VkCommandBuffer commandBuffer,
for (unsigned i = 0; i < pCopyBufferInfo->regionCount; ++i) {
const VkBufferCopy2 *region = &pCopyBufferInfo->pRegions[i];
copy_buffer<CHIP>(cmd,
dst_buffer->iova + region->dstOffset,
src_buffer->iova + region->srcOffset,
region->size, 1, &unaligned_store);
vk_buffer_address(&dst_buffer->vk, region->dstOffset),
vk_buffer_address(&src_buffer->vk, region->srcOffset),
region->size, 1, &unaligned_store);
}
after_buffer_unaligned_buffer_store<CHIP>(cmd, unaligned_store);
@ -3316,7 +3316,8 @@ tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
bool unaligned_store = false;
memcpy(tmp.map, pData, dataSize);
copy_buffer<CHIP>(cmd, buffer->iova + dstOffset, tmp.iova, dataSize, 4, &unaligned_store);
copy_buffer<CHIP>(cmd, vk_buffer_address(&buffer->vk, dstOffset),
tmp.iova, dataSize, 4, &unaligned_store);
after_buffer_unaligned_buffer_store<CHIP>(cmd, unaligned_store);
}
@ -3387,7 +3388,7 @@ tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
fillSize = vk_buffer_range(&buffer->vk, dstOffset, fillSize);
VkDeviceAddress dst_va = buffer->iova + dstOffset;
VkDeviceAddress dst_va = vk_buffer_address(&buffer->vk, dstOffset);
tu_cmd_fill_buffer<CHIP>(commandBuffer, dst_va, fillSize, data);
}

View file

@ -3037,7 +3037,7 @@ tu_CmdBindVertexBuffers2(VkCommandBuffer commandBuffer,
cmd->state.vb[firstBinding + i].size = 0;
} else {
struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
cmd->state.vb[firstBinding + i].base = buf->iova + pOffsets[i];
cmd->state.vb[firstBinding + i].base = vk_buffer_address(&buf->vk, pOffsets[i]);
cmd->state.vb[firstBinding + i].size =
vk_buffer_range(&buf->vk, pOffsets[i], pSizes ? pSizes[i] : VK_WHOLE_SIZE);
}
@ -3089,7 +3089,7 @@ tu_CmdBindIndexBuffer2KHR(VkCommandBuffer commandBuffer,
if (cmd->state.index_size != index_size)
tu_cs_emit_regs(&cmd->draw_cs, A6XX_PC_RESTART_INDEX(restart_index));
cmd->state.index_va = buf->iova + offset;
cmd->state.index_va = vk_buffer_address(&buf->vk, offset);
cmd->state.max_index_count = size >> index_shift;
cmd->state.index_size = index_size;
} else {
@ -3600,7 +3600,7 @@ tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
for (uint32_t i = 0; i < bindingCount; i++) {
VK_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
uint64_t iova = buf->iova + pOffsets[i];
uint64_t iova = vk_buffer_address(&buf->vk, pOffsets[i]);
uint32_t size = buf->bo->size - (iova - buf->bo->iova);
uint32_t idx = i + firstBinding;
@ -3655,7 +3655,7 @@ tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
CP_MEM_TO_REG_0_UNK31 |
CP_MEM_TO_REG_0_CNT(1));
tu_cs_emit_qw(cs, buf->iova + counter_buffer_offset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, counter_buffer_offset));
if (offset) {
tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
@ -3724,7 +3724,7 @@ tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
CP_REG_TO_MEM_0_CNT(1));
tu_cs_emit_qw(cs, buf->iova + counter_buffer_offset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, counter_buffer_offset));
}
tu_cond_exec_end(cs);
@ -6638,7 +6638,7 @@ tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL) |
A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
tu_cs_emit(cs, drawCount);
tu_cs_emit_qw(cs, buf->iova + offset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, offset));
tu_cs_emit(cs, stride);
}
TU_GENX(tu_CmdDrawIndirect);
@ -6669,7 +6669,7 @@ tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
tu_cs_emit(cs, drawCount);
tu_cs_emit_qw(cs, cmd->state.index_va);
tu_cs_emit(cs, cmd->state.max_index_count);
tu_cs_emit_qw(cs, buf->iova + offset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, offset));
tu_cs_emit(cs, stride);
}
TU_GENX(tu_CmdDrawIndexedIndirect);
@ -6705,8 +6705,8 @@ tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT) |
A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
tu_cs_emit(cs, drawCount);
tu_cs_emit_qw(cs, buf->iova + offset);
tu_cs_emit_qw(cs, count_buf->iova + countBufferOffset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, offset));
tu_cs_emit_qw(cs, vk_buffer_address(&count_buf->vk, countBufferOffset));
tu_cs_emit(cs, stride);
}
TU_GENX(tu_CmdDrawIndirectCount);
@ -6739,8 +6739,8 @@ tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
tu_cs_emit(cs, drawCount);
tu_cs_emit_qw(cs, cmd->state.index_va);
tu_cs_emit(cs, cmd->state.max_index_count);
tu_cs_emit_qw(cs, buf->iova + offset);
tu_cs_emit_qw(cs, count_buf->iova + countBufferOffset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, offset));
tu_cs_emit_qw(cs, vk_buffer_address(&count_buf->vk, countBufferOffset));
tu_cs_emit(cs, stride);
}
TU_GENX(tu_CmdDrawIndexedIndirectCount);
@ -6781,7 +6781,7 @@ tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
vertexStride = vertexStride >> 2;
}
tu_cs_emit(cs, instanceCount);
tu_cs_emit_qw(cs, buf->iova + counterBufferOffset);
tu_cs_emit_qw(cs, vk_buffer_address(&buf->vk, counterBufferOffset));
tu_cs_emit(cs, counterOffset);
tu_cs_emit(cs, vertexStride);
}
@ -7336,7 +7336,7 @@ tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
VK_FROM_HANDLE(tu_buffer, buffer, _buffer);
struct tu_dispatch_info info = {};
info.indirect = buffer->iova + offset;
info.indirect = vk_buffer_address(&buffer->vk, offset);
tu_dispatch<CHIP>(cmd_buffer, &info);
}
@ -7663,7 +7663,7 @@ tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
tu_emit_cache_flush<CHIP>(cmd);
VK_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer);
uint64_t iova = buf->iova + pConditionalRenderingBegin->offset;
uint64_t iova = vk_buffer_address(&buf->vk, pConditionalRenderingBegin->offset);
/* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan
* mandates 32-bit comparisons. Our workaround is to copy the the reference
@ -7711,7 +7711,7 @@ tu_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer,
VK_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
VK_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
uint64_t va = buffer->iova + dstOffset;
uint64_t va = vk_buffer_address(&buffer->vk, dstOffset);
struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
struct tu_cache_state *cache =

View file

@ -1007,7 +1007,7 @@ buffer_info_to_address(const VkDescriptorBufferInfo *buffer_info)
VK_FROM_HANDLE(tu_buffer, buffer, buffer_info->buffer);
uint32_t range = buffer ? vk_buffer_range(&buffer->vk, buffer_info->offset, buffer_info->range) : 0;
uint64_t va = buffer ? buffer->iova + buffer_info->offset : 0;
uint64_t va = buffer ? vk_buffer_address(&buffer->vk, buffer_info->offset) : 0;
return (VkDescriptorAddressInfoEXT) {
.address = va,

View file

@ -790,7 +790,7 @@ emit_copy_query_pool_results(struct tu_cmd_buffer *cmdbuf,
for (uint32_t i = 0; i < queryCount; i++) {
uint32_t query = firstQuery + i;
uint64_t available_iova = query_available_iova(pool, query);
uint64_t buffer_iova = buffer->iova + dstOffset + i * stride;
uint64_t buffer_iova = vk_buffer_address(&buffer->vk, dstOffset) + i * stride;
uint32_t result_count = get_result_count(pool);
uint32_t statistics = pool->vk.pipeline_statistics;

View file

@ -279,7 +279,7 @@ tu_rmv_log_buffer_bind(struct tu_device *device, struct tu_buffer *buffer)
tu_rmv_emit_resource_bind_locked(device,
tu_rmv_get_resource_id_locked(device, buffer),
buffer->bo ? buffer->iova : 0,
buffer->bo ? buffer->vk.device_address : 0,
buffer->vk.size);
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);